query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Retrieve the ItopapiOrganization corresponding to this server
def find_organization(self): if self.org_id is not None: ItopapiPrototype.get_itop_class('Organization').find(self.org_id) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organization(self):\n return self._tower.get_organization_by_id(self._data.get('organization'))", "def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]", "def getOrganization(self):\n return _libsbml.ModelCreator_getOrganization(self)", "def get_organization(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org.get_organization()\n elif pos_or_org.portal_type == 'organization':\n return pos_or_org", "def organization(self, organization_id):\r\n return organizations.Organization(self, organization_id)", "def GetOrganization(**argd):\n flag, ret = CGateway.core.GetOrganizationName(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})", "def organization(self) -> \"Organization\":\n return Organization(connection=self)", "def fetch_organization(organization):\n return fetch_json(organization_url, organization)", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_org(self):\n return Org.deserialize(self._get_single('org', {}, from_results=False))", "def organization(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization\")", "async def get_organization(request: Request, org: str):\n\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n return {org: organizations_obj[org]}", "def get_organization(self, id: str) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(id), params=params)", "def get_organization(\n self, organization_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Organization\":\n\n return communicator.Organization(self.__requester).from_id(\n organization_id=organization_id, parameters=params\n )", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")", "def sub_organization(self) -> object:\n return self._sub_organization", "def organization_id(self):\n return self._organization_id", "def organization_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_id\")", "def test_get_organization(self):\n pass", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)", "def get_org(self, retry_on_rate_exceed=False):\n return Org.deserialize(self._get_raw('org', {}, retry_on_rate_exceed))", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def test_get_organization_from_api_key(self):\n pass", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")", "def organization_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_id\")" ]
[ "0.7651209", "0.71187073", "0.7091516", "0.68542224", "0.6810734", "0.680332", "0.67780435", "0.6712279", "0.6706957", "0.66816705", "0.6667797", "0.6625002", "0.65964967", "0.6589382", "0.6561183", "0.6561183", "0.6561183", "0.65521574", "0.6543021", "0.64575344", "0.63788193", "0.63149315", "0.6290847", "0.6290847", "0.6279144", "0.6279081", "0.6276546", "0.6250836", "0.6137893", "0.6137893" ]
0.75703514
1
Determine whether a Roman token is the next logical Roman token. This test is for Roman levels 3 or 6, and checks whether the next token is both a Roman numeral and the next bigger Roman numeral. For instance 'v' is a valid Roman numeral. But unless the current Roman evaluates to 4, the 'v' must be a level1 alpha marker.
def roman_surf_test(self, token, next_token): if not token: return False for each in [token, next_token]: if not roman_to_int(each): return False return roman_to_int(next_token) == roman_to_int(token) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_roman_numeral(s: str) -> bool:\n if not isinstance(s, str):\n raise TypeError(\"Only strings may be tested \")\n return bool(_romanNumeralPattern.match(s))", "def roman_numerals_decoder(roman):\n roman_numerals = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n result = 0\n for i, c in enumerate(roman):\n if (i + 1) == len(roman) or roman_numerals[c] >= roman_numerals[roman[i + 1]]:\n result += roman_numerals[c]\n else:\n result -= roman_numerals[c]\n return result", "def roman_to_arabic_previous(roman):\n result = [0]\n previous_number = 4000\n p_previous_number = 4001\n # we store 2 previous numbers in order to check is\n # this number still correct\n for i, char in enumerate(roman):\n if char in MAP_ROMAN:\n number = MAP_ROMAN[char]\n # Chars in Roman numbers should decrease if not 3 same chars in line\n if p_previous_number <= number and previous_number != number:\n raise ValueError('Wrong Roman Number (...1)')\n if number > previous_number:\n # minus previous number if current > previous\n # IV: 5 - 1, IX: 10 - 1, XC: 100 - 10\n if number % previous_number < 5:\n sign = -1\n else:\n raise ValueError('Wrong Roman number (...2)')\n else:\n sign = 1\n\n print_debug(i, roman, char, number, previous_number, sign)\n\n result[-1] *= sign\n result.append(number)\n p_previous_number = previous_number\n previous_number = number\n else:\n raise ValueError('Unknown char \"%s\" in input roman number' % char)\n counts = defaultdict(int)\n\n # test for same multiple Roman numbers\n for number in result:\n num = abs(number)\n counts[num] += 1\n if counts[num] > 3:\n raise ValueError('Wrong Roman number (...3)')\n\n return sum(result)", "def solution(roman):\n r = roman.upper()\n nums = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n total = 0\n for i, c in enumerate(r):\n if i < len(r) - 1:\n if nums[r[i]] < nums[r[i+1]]:\n total -= nums[c]\n else:\n total += nums[c]\n else:\n total += nums[c]\n return total", "def toRoman(n):\n pass", "def is_ror(val):\n return ror_regexp.match(val)", "def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS", "def fromRoman(s):\n if not s:\n raise InvalidRomanNumeralError, 'Input can not be blank'\n if not romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s\n\n result = 0\n index = 0\n for numeral, integer in romanNumeralMap:\n while s[index:index+len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result", "def islogicseparator(token):\n\n # Token is a logic separator\n return token and token.lower() in Token.LOGIC_SEPARATORS", "def to_roman(an_arabic):\n result = \"\"\n\n for level, symbol in [(1000,\"M\"),\n (900,\"CM\"),\n (500,\"D\"),\n (400,\"CD\"),\n (100,\"C\"),\n (90,\"XC\"),\n (50,\"L\"),\n (40,\"XL\"),\n (10,\"X\"),\n (9,\"IX\"),\n (5,\"V\"),\n (4,\"IV\"),\n (1,\"I\")]:\n\n while an_arabic >= level:\n result += symbol\n an_arabic -= level\n \n return result", "def isBinaryOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = prevIsWS and nextIsWS or (not prevIsWS and not nextIsWS)\n text = tokens.getText(start, stop)\n return result", "def romanize(digit, glyphs):\n if 1 <= digit <= 3:\n return digit*glyphs[0]\n elif digit == 4:\n return glyphs[0] + glyphs[1]\n elif digit >= 5 and digit <= 8:\n return glyphs[1] + ((digit - 5) * glyphs[0])\n elif digit == 9:\n return glyphs[0]+glyphs[2]\n else:\n return ''", "def fromRoman(s):\n pass", "def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)", "def check_polarity(vertex, lexicon):\n if vertex.lang != \"EN\":\n return False\n return vertex.w in lexicon", "def isRegexPossible(self):\n if self._lastToken is None:\n # No token has been produced yet: at the start of the input,\n # no division is possible, so a regex literal _is_ possible.\n return True\n\n if self._lastToken.type == ECMAScriptLexer.Identifier or \\\n self._lastToken.type == ECMAScriptLexer.NullLiteral or \\\n self._lastToken.type == ECMAScriptLexer.BooleanLiteral or \\\n self._lastToken.type == ECMAScriptLexer.This or \\\n self._lastToken.type == ECMAScriptLexer.CloseBracket or \\\n self._lastToken.type == ECMAScriptLexer.CloseParen or \\\n self._lastToken.type == ECMAScriptLexer.OctalIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.DecimalLiteral or \\\n self._lastToken.type == ECMAScriptLexer.HexIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.StringLiteral or \\\n self._lastToken.type == ECMAScriptLexer.PlusPlus or \\\n self._lastToken.type == ECMAScriptLexer.MinusMinus:\n # After any of the tokens above, no regex literal can follow.\n return False\n else:\n # In all other cases, a regex literal _is_ possible.\n return True", "def num2roman(num):\n roman = ''\n while num > 0:\n for i, r in ROMAN_MAP:\n while num >= i:\n roman += r\n num -= i\n return roman", "def check_sym(ikjl, nmo, sym):\n if sym == 1:\n return True\n else:\n i, k, j, l = ikjl\n if sym == 4:\n kilj = (k,i,l,j)\n jlik = (j,l,i,k)\n ljki = (l,j,k,i)\n if (ikjl > jlik) or (ikjl > kilj) or (ikjl > ljki):\n return False\n else:\n return True\n else:\n ik = i + k*nmo\n jl = j + l*nmo\n return (i >= k and j >= l) and ik >= jl", "def toRoman(dec):\t\t\n if dec <=0:\n\t raise ValueError, \"It must be a positive\"\n # to avoid MMMM\n\telif dec>=4000: \n\t raise ValueError, \"It must be lower than MMMM(4000)\"\n \n\treturn decToRoman(dec,\"\",decimalDens,romanDens)", "def roman_number(value):\n try:\n value = to_roman(value)\n except RomanError as e:\n raise TemplateSyntaxError(\"roman_number error: %s\" % str(e))\n return value", "def toRoman(n):\n result = \"\"\n for numeral, integer in romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def isRNANucleotide(letter):\n if letter == 'A' or letter == 'C' or letter == 'G' or letter == 'U':\n return True\n return False", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def romanify(num):\n result = \"\"\n return result", "def formatRomanNumeral(rn, key):\n # Something of \"I\" and \"I\" of something\n if rn == \"I/I\":\n rn = \"I\"\n return rn", "def should_lex(cls, char):\n return char == '{' or char == '}'", "def has_room(r_l, out, char_bud):\n if r_l == \"R\":\n if len(out) + len(sentence[\"tokens\"][counter_r][0]) < char_bud:\n return True\n else:\n return False\n if r_l == \"L\":\n if len(out) + len(sentence[\"tokens\"][counter_l][0]) < char_bud:\n return True\n else:\n return False", "def check(i):\r\n return (has_palindrome(i, 2, 4) and\r\n has_palindrome(i+1, 1, 5) and\r\n has_palindrome(i+2, 1, 4) and\r\n has_palindrome(i+3, 0, 6))", "def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result" ]
[ "0.5724986", "0.55127585", "0.53830534", "0.52930975", "0.5107975", "0.5093595", "0.49485403", "0.49321708", "0.4916438", "0.49055704", "0.48494673", "0.48381567", "0.48101753", "0.47975814", "0.47125", "0.47098485", "0.46954077", "0.46896818", "0.46814248", "0.46712714", "0.46646854", "0.4634048", "0.46017638", "0.45813024", "0.4556283", "0.450957", "0.4505944", "0.44940138", "0.44937068", "0.448682" ]
0.6816487
0
Fetch token from the server using the provided user, password resulting in subsequent web service requests for waveforms being authenticated for potential access to restricted data.
def _retrieve_jwt_token(self, user, password): # force https so that we don't send around tokens unsecurely url = 'https://{}/api/token'.format(urlparse(self.base_url).netloc) # paranoid: check again that we only send the token to https if urlparse(url).scheme != "https": msg = 'This should not happen, please file a bug report.' raise Exception(msg) # convert to json data = json.dumps({"username": user, "password": password}) # encode data = bytes(data, "utf-8") headers = {"Content-Type": "application/json"} html = urllib_request.Request(url, data=data, headers=headers) # decode('utf-8') result = urllib_request.urlopen(html).read().decode("utf-8") dic = json.loads(result) # get token self.jwt_access_token = dic['access'] self.jwt_refresh_token = dic['refresh'] if self.debug: print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_token(self, user_id, password):\n url = buildCommandUrl(self.server, \"/as/user/token\")\n result = json_request(\"POST\", url, {\n \"userId\": user_id,\n \"password\": password\n })\n return result[\"token\"]", "def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)", "def get_token(user, password):\n url = urljoin(PivotalTrackerService.URI, \"me\")\n auth = (user, password)\n response = PivotalTrackerService.get_response(\"get\", url, auth=auth)\n\n try:\n response.raise_for_status()\n data = response.json()\n ret_val = data[\"api_token\"]\n except RequestException:\n ret_val = None\n\n return ret_val", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_auth_token(self, username, password):\n url = '/'.join([self.base_url, self.TOKEN_ENDPOINT])\n r = requests.get(url, auth=(username, password))\n if r.status_code == 200:\n return r.content\n return r", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def getUser(self, authenticationToken):\r\n pass", "def get_token(client, email_or_username, password):\n\turl = 'account/token'\n\tbasic_auth = (email_or_username, password)\n\treturn client._request(url, Request.GET, basic_auth=basic_auth)", "def get_auth_token(username, password):\n url = get_auth_token_url()\n user_credentials = {\"username\": username, \"password\": password}\n r = requests.post(url, json=user_credentials)\n return r", "def get_token():\n json = request.get_json(force=True)\n\n user = User.query.filter_by(username=json['username']).first()\n if user is None:\n raise UserDoesNotExistException()\n\n if not user.passhash == json['password']:\n raise InvalidPasswordException()\n\n return jsonify(status='OK',\n token=user.get_token())", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def get_api_token(self, app, user, pwd):\n authorization = ('Basic ' + base64.b64encode(user + \":\" + pwd))\n api_token_resp = app.post('/v1/api_token', headers={'Authorization': authorization})\n if api_token_resp.status != '200 OK':\n raise ValueError(api_token_resp.status)\n api_token = json.loads(api_token_resp.data)['api_token']\n return api_token", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n if not username:\n abort(400, \"Invalid username or password\")\n if not password:\n abort(400, \"Invalid username or password\")\n users = app.data.driver.db[config.DOMAIN['user']['datasource']['source']]\n user = users.find_one({'email':username})\n # validate the user in the user's service\n if not user:\n abort(401, \"Invalid username or password\")\n if not check_password_hash(user.get('password'), password):\n abort(401, \"Invalid username or password\")\n role = user.get('role', 'user')\n user_id = str(user.get('_id'))\n user = User(user_id, username, role)\n access_token, refresh_token = create_token(user)\n return jsonify(\n token=access_token,\n type='bearer',\n roles=role,\n user=username,\n refreshToken=refresh_token), 200", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))", "def get_token(self, tenant_name, user_name, password):\n _url = \"http://\" + self.host_ip + \":5000/v2.0/tokens\"\n _headers = {\"content-type\": \"application/json\"}\n _token_info = {\"auth\": {\"tenantName\": tenant_name,\n \"passwordCredentials\":\n {\"username\": user_name,\n \"password\": password}}\n }\n\n _body = json.dumps(_token_info)\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting token for\"\n \" tenant: %s\" % tenant_name)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Request of token for %s tenant Failed with\"\n \" status %s \" % (tenant_name, response.status))\n return response.status\n output = json.loads(response.data)\n token_id = output['access']['token']['id']\n LOG_OBJ.debug(\"Token ID for tenant %s is %s\" % (tenant_name, token_id))\n\n return token_id", "def UserToken(self) -> object:", "def get_auth_token(self):\r\n\r\n self._authenticating = True\r\n\r\n auth_data = {\r\n \"auth\": {\r\n \"identity\": {\r\n \"methods\": [\r\n \"password\"\r\n ],\r\n \"password\": {\r\n \"user\": {\r\n \"domain\": {\r\n \"name\": self._config['user_domain'] if 'user_domain' in self._config else self._config[\r\n 'domain']\r\n },\r\n \"name\": self._config['user'],\r\n\r\n \"password\": self._config['password']\r\n }\r\n }\r\n },\r\n \"scope\": {\r\n \"project\": {\r\n \"domain\": {\r\n \"name\": self._config['domain']\r\n },\r\n \"name\": self._config['project'],\r\n }\r\n }\r\n }\r\n }\r\n\r\n # profile = prof,\r\n # user_agent = 'toil',\r\n # auth_url = self._config['auth_url'],\r\n # project_name = self._config['project'],\r\n # project_domain_name = self._config['domain'],\r\n # user_domain_name = self._config['domain'],\r\n # username = self._config['user'],\r\n # password = self._config['password']\r\n\r\n response = self.post(None, self.URL_AUTH_TOKEN, data=json.dumps(auth_data))\r\n\r\n self._authenticating = False\r\n\r\n json_response = response.json()\r\n self._token = json_response['token']\r\n self._token_x_subject = response.headers['x-subject-token']\r\n\r\n catalog = json_response['token']['catalog']\r\n\r\n for service in catalog:\r\n self._services[service['name']] = service", "def get_token(username, password):\n\t\ttoken = cf.get_token(username, password)\n\t\treturn token", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def _get_token(self):\n return user.get_token()", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def _get_token(self, client):\n\n url = self._url('token')\n data = {'grant_type': 'password',\n 'username': self.user,\n 'password': self.password,\n 'scope': 'PRODUCTION'}\n client_data = self.clients[client]\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)", "def _get_token(self, username, password, user_domain, project_id=None):\n request = {\n \"auth\": {\n \"identity\": {\n \"methods\": [\n \"password\"\n ],\n \"password\": {\n \"user\": {\n \"domain\": {\n \"name\": user_domain\n },\n \"name\": username,\n \"password\": password\n }\n }\n }\n }\n }\n\n if project_id:\n request['auth']['scope'] = {\n \"project\": {\n \"id\": project_id\n }\n }\n response = self.client.post(TOKEN_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 201:\n return response.headers['X-Subject-Token']\n else:\n msg = \"Failed to authenticate %s user. Status %s\" % (username,\n response.status_code)\n raise SystemExit(msg)" ]
[ "0.68086314", "0.6705352", "0.660126", "0.6583421", "0.6583421", "0.6573778", "0.645508", "0.6440794", "0.64196825", "0.64065206", "0.6396614", "0.63819987", "0.63676745", "0.63418996", "0.63418996", "0.6296961", "0.62778217", "0.62672204", "0.6247356", "0.6242239", "0.61941713", "0.6193632", "0.61840963", "0.61683995", "0.61664414", "0.6151358", "0.614308", "0.6142724", "0.6123825", "0.6081841" ]
0.6948478
0
A check if the jwt token is valid
def _validate_jwt_token(self): # force https so that we don't send around tokens unsecurely url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc) # paranoid: check again that we only send the token to https if urlparse(url).scheme != "https": msg = 'This should not happen, please file a bug report.' raise Exception(msg) if not self.jwt_access_token: raise FDSNUnauthorizedException("Unauthorized, authentication " "required.", ) # convert to json data = json.dumps({"token": self.jwt_access_token}) # encode data = bytes(data, "utf-8") headers = {"Content-Type": "application/json"} html = urllib_request.Request(url, data=data, headers=headers) # decode('utf-8') try: result = urllib_request.urlopen(html).read().decode("utf-8") dic = json.loads(result) valid = not bool(dic) if self.debug: print('Valid token : {}'.format(valid)) return valid except urllib_error.HTTPError as e: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isValid(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY)\n return True\n except:\n return False", "def __token_is_valid(self):\n\n if not self.__login_token or len(self.__login_token) < 10:\n # Token is not set or totally invalid\n return False\n\n try:\n jwt.decode(self.__login_token, verify = False)\n return True\n except:\n # Most likely the token is expired as `exp` is in the past\n return False", "def check_if_token_is_valid(token):\n if token is None:\n return\n try:\n jwt.decode(\n token,\n key=current_app.config['JWT_KEY'],\n audience=current_app.config['AUTH0_BASE_URL'] + '/api/v2/',\n issuer=current_app.config['AUTH0_BASE_URL'] + '/')\n except (jwt.JWTError,\n jwk.JWKError,\n jwt.ExpiredSignatureError,\n jwt.JWTClaimsError,\n AttributeError,\n AssertionError,\n IndexError):\n return False\n else:\n return True", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def validate_token(user, tkn):\n try:\n decoded = jwt.decode(tkn, KEY)\n if decoded['user'] == user:\n stored_token = User.get(User.username == user).token\n if stored_token == tkn:\n return True\n return False\n except jwt.ExpiredSignatureError:\n return HTTPResponse(status=400, body={\"msg\":\"Validation error.\"})", "def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )", "async def validate_token(self, token):", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def test_validate_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key, algorithm='HS256')\n\n self.assertTrue(\n validate_token(token)[0],\n 'Failed to validate token.'\n )", "def verify_token(self, token):\n return False", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def is_token_valid(self):\n try:\n token_details = jwt.decode(self.__token, verify=False)\n self.__admin_id = token_details[\"id\"]\n self.__username = token_details[\"username\"]\n expiry = token_details[\"expiry\"]\n if time.time() > expiry:\n raise TokenExpiredException\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select password from neutron_admin_credential where admin_id=%s and username=%s\",\n (self.__admin_id, self.__username)\n )\n result = cursor.fetchone()\n if result is None:\n self.__message = \"Invalid id details\"\n return False\n passsword = result[\"password\"]\n admin_secret = passsword + get_admin_credential()\n jwt.decode(self.__token, key=admin_secret, verify=True)\n return True\n except jwt.DecodeError:\n self.__message = \"Invalid Token\"\n return False\n except KeyError:\n self.__message = \"Insecure Token\"\n return False\n except ValueError:\n self.__message = \"Insecure Token\"", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def _validar_token(self):\n\n\t\ttoken = request.headers.get(\"Authorization\").split(\" \")[1]\n\n\t\tres = self.autenticador.validarToken(token)\n\t\tif(not res):\n\t\t\treturn False\n\t\treturn True", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def check_token_validate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/Validate/\"\n return self._lr_object._get_json(url, payload)", "def validate_token(self, payload, headers, request):\n token = headers.get(self.TOKEN_NAME, \"\")\n\n # no token\n if self.verify == VerificationMethod.NONE:\n # do nothing as no method was chosen\n pass\n\n # static token\n elif self.verify == VerificationMethod.TOKEN:\n if not compare_digest(token, self.token):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n # hmac token\n elif self.verify == VerificationMethod.HMAC:\n digest = hmac.new(self.secret.encode('utf-8'), request.body, hashlib.sha256).digest()\n computed_hmac = base64.b64encode(digest)\n if not hmac.compare_digest(computed_hmac, token.encode('utf-8')):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n return True", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def check_token_structure(data):\n assert \"token\" in data\n token_structure = data[\"token\"]\n\n assert \"access_token\" in token_structure\n assert \"token_type\" in token_structure\n assert \"expires_in\" in token_structure", "def _validate_exp(self):\n now = timegm(datetime.utcnow().utctimetuple())\n\n if self.authtype == 'jwt':\n if not hasattr(self, 'token'):\n # I pass here only one time, when I request a token\n self.token = None\n return True\n payload = jwt.decode(self.token, verify=False)\n try:\n exp = int(payload['exp'])\n except ValueError:\n raise jwt.DecodeError('Expiration Time claim (exp) must be an'\n ' integer.')\n\n if exp < now:\n # raise jwt.ExpiredSignatureError('Signature has expired')\n return False\n else:\n self.s.auth = JWTAuth(self.token)\n return True\n else:\n return True", "def validate(cls, token, user, service):\n expected = cls.generate(user, service)\n return token == expected", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def verify_jwt(self, token: str):\n try:\n unverified_token = jwt.decode(token, verify=False)\n except DecodeError:\n logger.warning(f\"Failed to decode JWT without verification: {token}\", exc_info=True)\n raise NonDecodableTokenError(token)\n\n try:\n issuer = unverified_token['iss']\n except KeyError:\n raise InvalidTokenError(token)\n\n if not self.is_valid_issuer(issuer):\n logger.warning(f\"Detected a JWT with UNKNOWN ISSUER. ({issuer})\", exc_info=True)\n raise InvalidTokenError(token)\n\n public_keys = self.get_public_keys(issuer)\n token_header = jwt.get_unverified_header(token)\n\n try:\n public_key_id = token_header[\"kid\"]\n except KeyError:\n raise InvalidTokenError(token)\n\n public_key = public_keys[public_key_id]\n verification_options = dict(key=public_key,\n issuer=issuer,\n audience=config.access_token_audience_list,\n algorithms=('RS256',))\n\n try:\n return jwt.decode(token, **verification_options)\n except PyJWTError:\n logger.warning('Detected a JWT with INVALID SIGNATURE.', exc_info=True)\n raise InvalidTokenError(token)", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def validate(validator: JwtValidator, raw_jwt: _raw_jwt.RawJwt) -> None:\n if validator.has_fixed_now():\n now = validator.fixed_now()\n else:\n now = datetime.datetime.now(tz=datetime.timezone.utc)\n if (raw_jwt.has_expiration() and\n raw_jwt.expiration() <= now - validator.clock_skew()):\n raise _jwt_error.JwtInvalidError('token has expired since %s' %\n raw_jwt.expiration())\n if (raw_jwt.has_not_before() and\n raw_jwt.not_before() > now + validator.clock_skew()):\n raise _jwt_error.JwtInvalidError('token cannot be used before %s' %\n raw_jwt.not_before())\n if validator.has_issuer():\n if not raw_jwt.has_issuer():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected issuer %s.' % validator.issuer())\n if validator.issuer() != raw_jwt.issuer():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; expected issuer %s, but got %s' %\n (validator.issuer(), raw_jwt.issuer()))\n if validator.has_subject():\n if not raw_jwt.has_subject():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected subject %s.' % validator.subject())\n if validator.subject() != raw_jwt.subject():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; expected subject %s, but got %s' %\n (validator.subject(), raw_jwt.subject()))\n if validator.has_audience():\n if (not raw_jwt.has_audiences() or\n validator.audience() not in raw_jwt.audiences()):\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; missing expected audience %s.' % validator.audience())\n else:\n if raw_jwt.has_audiences():\n raise _jwt_error.JwtInvalidError(\n 'invalid JWT; token has audience set, but validator not.')", "def verify_token(token):\n if config.API_TOKEN is None:\n logger.error(\n 'API token is not configured, auth will fail!')\n return token == config.API_TOKEN", "def validateAgentJWTToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.8520996", "0.8204527", "0.80866903", "0.7793878", "0.7776991", "0.7762956", "0.77588475", "0.7680332", "0.7579834", "0.7543709", "0.74756145", "0.74731255", "0.74194485", "0.7321448", "0.7219745", "0.71922845", "0.7190927", "0.71824664", "0.7138779", "0.7125586", "0.7120983", "0.7116968", "0.7099627", "0.70848614", "0.70518714", "0.70331436", "0.70223606", "0.70134187", "0.70128095", "0.70103943" ]
0.8363303
1
Helper method to fetch response via get_stations() and attach it to each trace in stream.
def _attach_responses(self, st): netids = {} for tr in st: if tr.id not in netids: netids[tr.id] = (tr.stats.starttime, tr.stats.endtime) continue netids[tr.id] = ( min(tr.stats.starttime, netids[tr.id][0]), max(tr.stats.endtime, netids[tr.id][1])) inventories = [] for key, value in netids.items(): net, sta, loc, chan = key.split(".") starttime, endtime = value try: inventories.append(self.get_stations( network=net, station=sta, location=loc, channel=chan, starttime=starttime, endtime=endtime, level="response")) except Exception as e: warnings.warn(str(e)) st.attach_response(inventories)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_stations(response: Response,\n source: StationSourceEnum = StationSourceEnum.UNSPECIFIED):\n try:\n logger.info('/stations/')\n\n weather_stations = await get_stations_as_geojson(source)\n response.headers[\"Cache-Control\"] = no_cache\n\n return WeatherStationsResponse(features=weather_stations)\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "async def get_detailed_stations(response: Response,\n toi: datetime = None,\n source: StationSourceEnum = StationSourceEnum.WILDFIRE_ONE,\n __=Depends(audit),\n _=Depends(authentication_required)):\n try:\n logger.info('/stations/details/')\n response.headers[\"Cache-Control\"] = no_cache\n if toi is None:\n # NOTE: Don't be tempted to move this into the function definition. It's not possible\n # to mock a function if it's part of the function definition, and will cause\n # tests to fail.\n toi = get_utc_now()\n else:\n toi = get_hour_20(toi)\n weather_stations = await fetch_detailed_stations_as_geojson(toi, source)\n return DetailedWeatherStationsResponse(features=weather_stations)\n\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise", "def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line", "async def _fetch_raw_stations(session: ClientSession, headers: dict, query_builder: BuildQuery) -> dict:\n # We don't know how many pages until our first call - so we assume one page to start with.\n total_pages = 1\n page_count = 0\n while page_count < total_pages:\n # Build up the request URL.\n url, params = query_builder.query(page_count)\n LOGGER.debug('loading station page %d...', page_count)\n async with session.get(url, headers=headers, params=params) as response:\n station_json = await response.json()\n LOGGER.debug('done loading station page %d.', page_count)\n # Update the total page count.\n total_pages = station_json['page']['totalPages']\n for station in station_json['_embedded']['stations']:\n yield station\n # Keep track of our page count.\n page_count = page_count + 1", "def stations(self):\n try:\n stations_api = requests.get(self._stations_url)\n stations = {}\n for station in stations_api.json():\n station_id = station['id']\n station_name = station['name']\n stations[station_id] = station_name\n\n return stations\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None", "def stations():\n\n return station_list", "def lineup_xml() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n xml = render_template('lineup.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port,\n watch=watch).encode(\"utf-8\")\n return Response(xml, mimetype='text/xml')", "def epg() -> Response:\n return jsonify(locast_service.get_stations())", "def prep_stations(url):\n stations = []\n _stations = requests.get(url).json()\n\n for _station in _stations['stationBeanList']:\n if _station['statusKey'] == 1:\n stations.append([_station['stationName'], _station['id'],\n _station['availableDocks'], _station['totalDocks'],\n _station['latitude'], _station['longitude']])\n\n return stations", "def view_station(request,station_id):\n station_url = settings.SODOR_ENDPOINT + 'station/' + str(int(station_id)) + '.json'\n context = {}\n try:\n station_data = client.load(station_url)\n except KeyError:\n return HttpResponseNotFound('Station not found')\n\n context['station'] = station_data.content\n\n # check children callsigns\n # do NOT assume flagship is (all) that we want - that is a bad assumption\n # e.g. WFSU has two children callsigns\n flagship_obj = station_data.related('flagship')\n flagship_callsign = flagship_obj.content.callsign\n children_callsigns = station_data.related('children')\n\n feeds = []\n callsigns = []\n context['callsign'] = flagship_callsign\n context['callsigns'] = []\n updated_callsigns = []\n\n for callsign_obj in children_callsigns.items():\n \"\"\"iterate thru callsigns\"\"\"\n if callsign_obj.content.callsign == flagship_callsign:\n callsign_obj.is_flagship = 'True'\n else:\n callsign_obj.is_flagship = None\n\n updated_callsigns.append(callsign_obj)\n callsigns.append(callsign_obj.content.callsign)\n\n children_feeds = callsign_obj.related('children')\n\n if children_feeds.self:\n for feed in children_feeds.items():\n feed_obj = {}\n # over the air channel\n # aka subchannel\n ota_channel = feed.related('summary').content\n feed_obj['ota_channel'] = ota_channel\n if callsign_obj.content.callsign == flagship_callsign:\n feed_obj['is_callsign'] = 'True'\n else:\n feed_obj['is_callsign'] = None\n feeds.append(feed_obj)\n\n feeds_by_flagship = sorted(feeds, key=itemgetter('is_callsign'),\n reverse=True)\n callsigns_by_flagship = sorted(updated_callsigns,\n key=attrgetter('is_flagship'), reverse=True)\n context['feeds'] = feeds_by_flagship\n context['callsigns'] = callsigns_by_flagship\n context = render_todays_listings(request, context, callsigns)\n\n return render_to_response(\n 'view_station.html',\n context,\n context_instance = RequestContext(request)\n )", "def get_events_stations(\n fname_all_geoNet_stats=None,\n loc_all_geoNet_stats=None,\n loc_Vol1=\"/\".join([os.getcwd(), \"Vol1\"]),\n save_stats=False,\n fname=None,\n loc=os.getcwd(),\n):\n\n all_geoNet_stats = read_statsll(loc_all_geoNet_stats, fname_all_geoNet_stats)\n\n event_stats_V1A = glob(\"/\".join([loc_Vol1, \"data\", \"*.V1A\"]))\n event_stats_V1A = [os.path.basename(_) for _ in event_stats_V1A]\n\n event_stats = {}\n for V1A_file in event_stats_V1A:\n # year, event_id, stat_code, V1A = V1A_file.split(\".\")[0].split(\"_\")\n split_file_name = V1A_file.split(\".\")[0].split(\"_\")\n year, event_id, stat_code = split_file_name[0:3]\n event_stats[stat_code] = (None, None)\n if all_geoNet_stats.has_key(stat_code):\n event_stats[stat_code] = all_geoNet_stats[stat_code]\n\n if save_stats == True:\n # assert fname is not None, \"Specify name of station file to save\"\n # assert loc is not None, \"Specify location for station file to save\"\n if fname is None:\n fname = \"_\".join([year, event_id, \"eventStats\", str(datetime.date.today())])\n fname += \".ll\"\n with open(\"/\".join([loc, fname]), \"w\") as f:\n for key, value in event_stats.items():\n if value[0] is None:\n print(\n \"{:10s} not found in all_geoNet_stats, add this manually to event_stats.ll\".format(\n key\n )\n )\n else:\n line = \"{:10.4f} {:10.4f} {:10s}\".format(\n value[0], value[1], key\n )\n f.write(line + \"\\n\")\n\n return event_stats, fname", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def get_stations(self):\n return self.__request('stations')['stations']", "def __init__ (self, msname, inverse = False, useElementResponse = True,\n useArrayFactor = True, useChanFreq = False):\n self._response = _stationresponse.StationResponse(msname, inverse,\n useElementResponse, useArrayFactor, useChanFreq)", "def parse(self, response):\n theater_list = response.xpath('//div[@class=\"theater_info\"]//li/a')\n for theater_element in theater_list:\n curr_cinema_url = theater_element.xpath(\n './@href').extract_first()\n cinema_name = theater_element.xpath('./text()').extract_first()\n if not cinema_name:\n # partner theater element is different\n cinema_name = ''.join(theater_element.xpath(\n './/text()').extract())\n else:\n curr_cinema_url = response.urljoin(curr_cinema_url)\n data_proto = ShowingLoader(response=response)\n data_proto.add_cinema_name(cinema_name)\n cinema_name = data_proto.get_output_value('cinema_name')\n data_proto.add_cinema_site(curr_cinema_url, cinema_name)\n data_proto.add_value('source', self.name)\n if not self.is_cinema_crawl([cinema_name]):\n continue\n request = scrapy.Request(\n curr_cinema_url, callback=self.parse_cinema)\n request.meta[\"data_proto\"] = data_proto.load_item()\n yield request", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']", "def add_stations(stations, pool):\n\n for station in stations:\n\n print(add_station(pool=pool, name=station.get('name'), latitude=station.get('latitude'),\n longitude=station.get('longitude'), station_type=station.get('station_type'),\n description=station.get('description')))\n print(station.get('name'))", "async def stations():\n with open(\"/data/station_list.json\") as j:\n data = json.load(j)\n return data", "def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations", "async def _get_stations_remote() -> List[WeatherStation]:\n LOGGER.info('Using WFWX to retrieve station list')\n async with ClientSession() as session:\n # Get the authentication header\n header = await _get_auth_header(session)\n stations = []\n # Iterate through \"raw\" station data.\n async for raw_station in _fetch_raw_stations(session, header, BuildQueryAllStations()):\n # If the station is valid, add it to our list of stations.\n if _is_station_valid(raw_station):\n LOGGER.info('Processing raw_station %d',\n int(raw_station['stationCode']))\n stations.append(_parse_station(raw_station))\n LOGGER.debug('total stations: %d', len(stations))\n return stations", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def trailers_received(self, event):\n super().trailers_received(event)\n\n stream_id = event.stream_id\n response_stream = self.receive_streams.get(stream_id)\n if response_stream is None:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n return\n\n trailers = response_stream.trailers\n\n if int(trailers.get(\"grpc-status\", 0)) > 0:\n error = GrpcError.from_headers(trailers)\n response_stream.close(error)\n del self.receive_streams[stream_id]", "def _getGather(self):\r\n if self.gather is None:\r\n print ('loading traces')\r\n if DEBUG:\r\n start_time = time.time()\r\n\r\n nChannels = len(self.channelRange)\r\n print(self.channelRange)\r\n traceList = [None]*nChannels\r\n #\r\n if METHOD==1:\r\n #demean all traces\r\n self.st.detrend('constant')\r\n #detrend\r\n self.st.detrend('linear')\r\n #\r\n #taper all traces on both sides\r\n #self.st.taper(max_percentage=0.05, type='cosine')\r\n print ('original sample rate is', self.st[0].stats.sampling_rate)\r\n self.sampRate = self.st[0].stats.sampling_rate /self.dsfactor\r\n print ('new sample rate is ', self.sampRate)\r\n #self.st.decimate(self.dsfactor)\r\n #process traces in parallel\r\n \r\n with Parallel(n_jobs=12) as parallelPool:\r\n traceList = parallelPool(delayed(getSingleTrace)\r\n (self.st[channelNo], \r\n self.sampRate,\r\n self.isIntegrate)\r\n for channelNo in self.channelRange)\r\n\r\n self.traceList = traceList\r\n self.st = obspy.Stream(traceList) \r\n elif METHOD==2:\r\n #do simple filtering as in Ariel Lellouch paper\r\n #self.st = utils.medianSubtract(self.st)\r\n self.st.detrend('constant')\r\n self.st.detrend('linear')\r\n self.st.filter('bandpass',freqmin=10,freqmax=150)\r\n if self.dsfactor>1:\r\n self.sampRate = self.st[0].stats.sampling_rate /self.dsfactor\r\n self.st.decimate(self.dsfactor, no_filter=True)\r\n print(self.channelRange)\r\n self.traceList=[self.st[channelNo] for channelNo in self.channelRange]\r\n \r\n if DEBUG:\r\n print ('processing time is ', time.time()-start_time)", "def lineup_json() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n\n return jsonify([{\n \"GuideNumber\": station.get('channel_remapped') or station['channel'],\n \"GuideName\": station['name'],\n \"URL\": f\"http://{host_and_port}/{watch}/{station['id']}\"\n } for station in locast_service.get_stations()])", "def stations():\n results = session.query(Station.station,Station.name).all()\n key=[results[i][0] for i in range(len(results))]\n values=[results[i][1] for i in range(len(results))]\n results=dict(zip(key,values))\n print(f\"Route /api/v1.0/stations is being visited\")\n return jsonify(results)", "async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()", "def stations(update, context):\n db_helper.insert_chat_id(update.effective_chat.id)\n message = processor.process_stations_chat(update, context)\n processor.send_message(update, context, message)", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors" ]
[ "0.6360263", "0.6021909", "0.59900856", "0.5956896", "0.5865758", "0.56059825", "0.5558574", "0.548664", "0.5460832", "0.54202706", "0.53476536", "0.5343625", "0.5334587", "0.5327473", "0.52983934", "0.52805454", "0.5267272", "0.525761", "0.52528024", "0.52523386", "0.5234326", "0.52224135", "0.5195955", "0.51932204", "0.5172995", "0.51529455", "0.5140023", "0.5137015", "0.51342463", "0.513179" ]
0.7019285
0
Get full version information of webservice as a string.
def _get_webservice_versionstring(self, service): version = self.get_webservice_version(service) return ".".join(map(str, version))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version():\n return about.get_version()", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def get_version(self) -> str:\n return versioning.get_version()", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def getVersionInfo(cls):\n\n return __version__ + \"\\n\"", "def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')", "def get_version():\n return '%d.%d.%d' % version_info", "def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data", "def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))", "def get_version():\n return '.'.join(map(str, VERSION))", "def version_string(self):\n return self.server_version", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def get_version() -> str:\n return __version__", "def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)", "def get_version(self):\r\n if not self.endpoint_checker(self.endpointurl):\r\n raise Exception(\"Please use a valid ESRI REST url\")\r\n\r\n parsedurl = urlparse(self.endpointurl)\r\n print(f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\")\r\n req = requests.get(\r\n f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\"\r\n )\r\n\r\n if req.status_code == 200:\r\n try:\r\n return req.json()[\"currentVersion\"]\r\n except KeyError:\r\n try:\r\n req = requests.get(\r\n self.endpointurl.split(\"services/\")[0] + \"services/?f=pjson\"\r\n )\r\n return req.json()[\"currentVersion\"]\r\n except Exception as e:\r\n raise e\r\n raise Exception(\r\n f\"An Error occurred retrieving vital information, the response status {str(req.status_code)} associate with {req.json()['error']['message']}\"\r\n )", "def _get_version(self):", "async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]", "def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)" ]
[ "0.75000274", "0.7449257", "0.73129904", "0.7277117", "0.7266704", "0.7241125", "0.72253454", "0.7220058", "0.7202378", "0.7146718", "0.7130265", "0.711573", "0.71138686", "0.71088123", "0.7105085", "0.7094832", "0.7075875", "0.69911677", "0.69908917", "0.6990632", "0.69789076", "0.6966781", "0.69550365", "0.694457", "0.6916093", "0.69060606", "0.6896615", "0.68218845", "0.6819743", "0.68087775" ]
0.7781521
0
Attaches the actually used dataselet URL to each Trace.
def _attach_dataselect_url_to_stream(self, st): url = self._build_url("dataselect", "query") for tr in st: tr.stats._fdsnws_dataselect_url = url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __traces_url(self):\n path = AGENT_TRACES_PATH % self.from_.pid\n return \"http://%s:%s/%s\" % (self.host, self.port, path)", "def getDataUrls(self):\n sub1 = self.id[0:3]\n sub2 = self.id[3:6]\n sub3 = self.id[6:9]\n self.xml = \"%s/static/model/%s/%s/%s/%s.xml\" % (serverString, sub1, sub2, sub3, self.id)\n self.image = \"%s/static/image/%s/%s/%s/%s_lrg.png\" % (serverString, sub1, sub2, sub3, self.id)\n self.thumb = \"%s/static/thumb/%s/%s/%s/%s.png\" % (serverString, sub1, sub2, sub3, self.id)", "def trailers(self, trailers):\n\n self._trailers = trailers", "def __data_url(self):\n path = AGENT_DATA_PATH % self.from_.pid\n return \"http://%s:%s/%s\" % (self.host, self.port, path)", "def urls(self, urls):\n\n self._urls = urls", "def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)", "def addHttp(self, d):\n self.__populateDict(self._http, d)", "def link_dihedra(self, verbose: bool = ...) -> None:\n ...", "def link(cls, traceparent: str, attributes: Optional[Attributes] = None) -> None:\n cls.link_from_headers({\"traceparent\": traceparent}, attributes)", "def __init__(self):\n self._urls = []", "def _set_url(self): \n self.url = self.geturl()", "def trace(self, trace=...):\n ...", "def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)", "def data_url(self):\n raise NotImplementedError", "def data_url(self):\n raise NotImplementedError", "def add_site_url(self):\n #TODO handle ssl properly.\n #This is wrong. Want to send to .sslwhitetrash when the *request* is SSL.\n# if self.redirect_to_ssl:\n# return self._frame_response(\"{protocol}://{domain}.ssl{hostname}:{ssl_port}\" \\\n# \"/addentry?\".format(**self._formatdict()))\n# else:\n return self._frame_response(\"{protocol}://{hostname}/whitelist/addentry?url={url}&\" \\\n \"domain={domain}\".format(**self._formatdict()))", "def add_normal_urls(self, dm):\n for i in dm.links:\n if self.seen.is_new(i):\n data = (dm.depth + 1, i)\n self.normal_store.push(data, dm.depth + 1)\n else:\n self.seen.incr_n(i)", "def initiate(self):\n\n for item in config.WEATHER_PROVIDERS[self.title]:\n self.__setattr__(item, config.WEATHER_PROVIDERS[self.title][item])\n\n # RP5 and Sinoptik have same URLs for hourly and next day weather info\n if self.title in ('RP5', 'Sinoptik'):\n self.URL_hourly = self.URL\n self.URL_next_day = self.URL\n\n self.logger = self._get_logger(self.title, self.app.args.verbosity)", "def addLinks(self, data, package):\n self.db.addLinks(data, package, OWNER)\n self.evm.dispatchEvent(\"packageUpdated\", package)", "def hyperlinks(self, hyperlinks):\n\n self.container['hyperlinks'] = hyperlinks", "def configureTrace(self,traceString):\n configureTrace(traceString)", "def addAttachment(self,data,url,name,description):\n if self.PDFreactorConfiguration.in1[\"attachments\"] == None:\n self.PDFreactorConfiguration.in1[\"attachments\"] = []\n \n stylesArray = {'data':data, 'url':url, 'name':name, 'description':description}\n \n self.PDFreactorConfiguration.in1[\"attachments\"].append(stylesArray)", "def storeUrl(self):\n self.links_list = []\n for x in range(len(self.identificationParams)):\n self.links_list.append(str(self.identificationParams[x][0]) + '.' + str(self.identificationParams[x][1]))\n print(\"Links created\")\n with open('shopee_urls.txt', 'w') as url_storage:\n for link in self.links_list:\n url_storage.write(link + '\\n')\n return 'shopee_urls.txt'", "def links(self, links):\n\n self.container['links'] = links", "def links(self, links):\n self._links = links", "def set_url(self, url):\n super(Cabling, self).set_url(url)", "def register_trace_listener(self, l):\n self.listeners['trace'].append(l)", "def add_reference(self, dataset=None):\n if not dataset:\n raise aspecd.exceptions.MissingDatasetError\n dataset_reference = aspecd.dataset.DatasetReference()\n dataset_reference.from_dataset(dataset=dataset)\n self.references.append(dataset_reference)", "def prepare_presigned_url_audit_log(protocol, indexed_file):\n resource_paths = indexed_file.index_document.get(\"authz\", [])\n if not resource_paths:\n # fall back on ACL\n resource_paths = indexed_file.index_document.get(\"acl\", [])\n if not protocol and indexed_file.indexed_file_locations:\n protocol = indexed_file.indexed_file_locations[0].protocol\n flask.g.audit_data[\"resource_paths\"] = resource_paths\n flask.g.audit_data[\"protocol\"] = protocol", "def datasets(self, datasets):\n\n self._datasets = datasets" ]
[ "0.58800995", "0.557478", "0.5461198", "0.5454662", "0.5379516", "0.5288198", "0.51027226", "0.50803655", "0.5057965", "0.50400174", "0.500458", "0.4987462", "0.49711695", "0.49326184", "0.49326184", "0.49260262", "0.49156776", "0.4909598", "0.4874118", "0.48735803", "0.48605755", "0.4851914", "0.48413795", "0.4827905", "0.4818676", "0.48158312", "0.48108846", "0.479753", "0.4790654", "0.4773607" ]
0.58549017
1
Takes any value and converts it to a string compliant with the FDSN webservices. Will raise a ValueError if the value could not be converted. >>> print(convert_to_string("abcd")) abcd >>> print(convert_to_string(1)) 1 >>> print(convert_to_string(1.2)) 1.2 >>> print(convert_to_string( \ UTCDateTime(2012, 1, 2, 3, 4, 5, 666666)))
def convert_to_string(value): if isinstance(value, str): return value # Boolean test must come before integer check! elif isinstance(value, bool): return str(value).lower() elif isinstance(value, int): return str(value) elif isinstance(value, float): return str(value) elif isinstance(value, UTCDateTime): return str(value).replace("Z", "") else: raise TypeError("Unexpected type %s" % repr(value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_string(value: Any) -> str:\n if isinstance(value, str):\n return value\n\n if isinstance(value, bytes):\n return value.decode(\"utf-8\")\n\n return str(value)", "def convert_to_str(input_string):\n\n if sys.version < '3':\n\n if isinstance(input_string, str) \\\n or isinstance(input_string, unicode): # pragma: no cover py3\n\n return input_string # pragma: no cover py3\n\n else:\n\n if isinstance(input_string, str): # pragma: no cover py3\n\n return input_string # pragma: no cover py3\n\n return str(input_string)", "def convert_to_str(value):\n\tif value is None:\n\t\treturn '-'\n\treturn str(value)", "def time_to_str(value: datetime) -> str:\n\n if value is None or not isinstance(value, datetime):\n raise ValueError(\"provided value is not a valid datetime object\")\n\n return value.strftime(\"%Y-%m-%dT%H:%M:%SZ\")", "def to_string(value: Any) -> str:\n return StringConverter.to_string_with_default(value, '')", "def make_string(value):\n if value:\n return str(value)\n return None", "def date_to_string(date_to_convert: date) -> str:\n return date.strftime(date_to_convert, \"%m/%d/%Y\")", "def to_string(value):\n if isinstance(value, encodingutils.binary_type):\n return encodingutils.bytes_to_string(value)\n else:\n return encodingutils.text_type(value)", "def convert_date_to_string(date_input):\n if isinstance(date_input, date):\n return date_input.strftime(\"%Y-%m-%d\")\n else:\n raise TypeError(\"Input {0} is not a date object\".format(type(date_input)))", "def date2str(datetime_object):\n if datetime_object is None:\n return 'None'\n return datetime_object.strftime('%Y-%m-%dT%H:%M:%S.%f')[0:-3]", "def convert_to_str(string):\n if type(string) is str:\n return string\n else:\n return bytes.decode(string)", "def to_basestring(value):\r\n if value is None:\r\n return 'None'\r\n if isinstance(value, _BASESTRING_TYPES):\r\n return value\r\n elif isinstance(value, unicode_type):\r\n return value.decode(\"utf-8\")\r\n else:\r\n return str(value)", "def test_to_String(self) -> None:\n assert to_String(1) == \"1\", to_String(1)\n assert to_String([1, 2, 3]) == str([1, 2, 3]), to_String([1, 2, 3])\n assert to_String(\"foo\") == \"foo\", to_String(\"foo\")\n assert to_String(None) == 'None'\n # test low level string converters too\n assert to_str(None) == 'None'\n assert to_bytes(None) == b'None'\n\n s1 = UserString('blah')\n assert to_String(s1) == s1, s1\n assert to_String(s1) == 'blah', s1\n\n class Derived(UserString):\n pass\n\n s2 = Derived('foo')\n assert to_String(s2) == s2, s2\n assert to_String(s2) == 'foo', s2", "def to_nullable_string(value: Any) -> Optional[str]:\n if value is None:\n return None\n if type(value) == datetime.date:\n return value.isoformat()\n if type(value) == datetime.datetime:\n if value.tzinfo is None:\n return value.isoformat() + \"Z\"\n else:\n return value.isoformat()\n\n if type(value) == list:\n builder = ''\n for element in value:\n if len(builder) > 0:\n builder = builder + \",\"\n builder = f'{builder}{element}'\n\n return builder\n return str(value)", "def to_str(val, default=None):\n str_val = default\n\n try:\n if not is_empty(val):\n str_val = str(val)\n except Exception as e:\n pass\n\n return str_val", "def myconverter(o: object):\n if isinstance(o, datetime.datetime):\n return o.__str__()", "def datetime_to_str(obj):\n if not obj:\n return False\n return obj.strftime(DEFAULT_SERVER_DATETIME_FORMAT)", "def datetime_to_str(dt: Union[str, datetime]) -> str:\n if isinstance(dt, datetime):\n dt = dt.replace(tzinfo=timezone.utc).isoformat(\" \").replace(\"+00:00\", \"Z\")\n return dt", "def _coerce_string_value(self, value):\n # coerce bool before int as python says a bool is an int\n if isinstance(value, bool):\n # coerce bool to str type\n self.log.warning(f'Coercing bool value ({value}) to a string (\"{str(value).lower()}\").')\n value = str(value).lower()\n\n # coerce int to str type\n if isinstance(value, (float, int)):\n self.log.warning(f'Coercing float/int value ({value}) to a string (\"{str(value)}\").')\n value = str(value)\n\n return value", "def to_str(value: bytes) -> str:\n if value is not None:\n out = value.decode(\"utf-8\")\n else:\n out = \"\"\n\n return out", "def dto_to_str(dto):\n\n try:\n return datetime.strftime(dto, DATE_FORMAT_1)\n except ValueError:\n raise", "def converter_callback(self, value):\n if isinstance(value, datetime.datetime):\n return value.__str__()", "def dateToString(dateToConvert):\n return \"\".join(\n str(i) for i in [\n dateToConvert.year,\n dateToConvert.month,\n dateToConvert.day\n ]\n )", "def _to_str(s, encoding=\"utf8\", errors=\"ignore\"):\n if isinstance(s, bytes):\n return s.decode(encoding=encoding, errors=errors)\n return str(s)", "def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable", "def to_str(s, encoding='utf-8', strings_only=False, errors='strict'):\n\n if strings_only and isinstance(s, (types.NoneType, int)):\n return s\n\n if not isinstance(s, basestring):\n try:\n return str(s)\n except UnicodeEncodeError:\n if isinstance(s, Exception):\n # An Exception subclass containing non-ASCII data that doesn't\n # know how to print itself properly. We shouldn't raise a\n # further exception.\n return ' '.join([smart_str(arg, encoding, strings_only,\n errors) for arg in s])\n return unicode(s).encode(encoding, errors)\n elif isinstance(s, unicode):\n return s.encode(encoding, errors)\n elif s and encoding != 'utf-8':\n return s.decode('utf-8', errors).encode(encoding, errors)\n else:\n return s", "def to_str(value):\n if value is None:\n return \"\"\n if str(value) == value:\n return value\n try:\n return value.to_str()\n except AttributeError:\n try:\n return \"\\n\".join(to_str(v) for v in value)\n except TypeError:\n return str(value)", "def convert(cls, value: Any) -> Optional[str]:\n # Can be optional\n if value is None:\n return None\n\n cls.assert_value_ok(isinstance(value, str), value)\n\n return value", "def to_str(x) -> str:\n return str(x) if x else ''", "def datetime_to_str(date_time):\n\n\treturn (date_time.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))" ]
[ "0.6892246", "0.6521637", "0.64140946", "0.6409152", "0.6387546", "0.630994", "0.6246186", "0.62287396", "0.6162186", "0.61320966", "0.61231935", "0.6070894", "0.6000176", "0.5963317", "0.59516305", "0.5947143", "0.5946218", "0.589599", "0.58766305", "0.5855638", "0.5848334", "0.5824616", "0.5820086", "0.5819021", "0.5797546", "0.57966805", "0.57656014", "0.5753356", "0.5747344", "0.5743988" ]
0.7913156
0
Test `construct_compose_dict` returns expected compose dict.
def test_construct_compose_dict(self): expected_examplescraper_compose_dict = { "version": "3", "services": { "scp1": { "container_name": "scp1", "environment": [ "TOR_PORT=9051", "TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good", "PRIVOXY_PORT=8118", "PRIVOXY_HOST=127.0.0.1", "IPSTORE_PORT=5000", "IPSTORE_HOST=scp1", "URLBROKER_PORT=6000", "URLBROKER_HOST=scp1", "DATASTORE_PORT=7000", "DATASTORE_HOST=scp1", "HEALTHCHECK_PORT=8000", "HEALTHCHECK_HOST=scp1", "SCRAPER_PACKAGE=examplescraper", "DOCKER_HOST_IP=fake_docker_host_ip", "SCRAPER_CONFIG=tests.integration.fake_config", ], "hostname": "scp1", "image": "scp:latest", "volumes": ["/fake_curent_dir:/scp"], "build": { "context": "/fake_curent_dir", "dockerfile": "/fake_curent_dir/Dockerfile", }, "entrypoint": "/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scp1.sh", }, "scp2": { "container_name": "scp2", "environment": [ "TOR_PORT=9051", "TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good", "PRIVOXY_PORT=8118", "PRIVOXY_HOST=127.0.0.1", "IPSTORE_PORT=5000", "IPSTORE_HOST=scp1", "URLBROKER_PORT=6000", "URLBROKER_HOST=scp1", "DATASTORE_PORT=7000", "DATASTORE_HOST=scp1", "HEALTHCHECK_PORT=8000", "HEALTHCHECK_HOST=scp1", "SCRAPER_PACKAGE=examplescraper", "DOCKER_HOST_IP=fake_docker_host_ip", "SCRAPER_CONFIG=tests.integration.fake_config", ], "hostname": "scp2", "image": "scp:latest", "volumes": ["/fake_curent_dir:/scp"], "depends_on": ["scp1"], "entrypoint": "/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scpx.sh", }, }, } self.assertEqual( expected_examplescraper_compose_dict, docker_compose.construct_compose_dict( "examplescraper", "tests.integration.fake_config" ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_construct_compose_dict_nonexisting_scraper(self):\n with self.assertRaises(ModuleNotFoundError):\n docker_compose.construct_compose_dict(\"nonexisting\")", "def test_build_exchange_dictionary(self):\n expected = {\n \"USD\": {\"USD\": Decimal(1.0), \"AUD\": Decimal(2.0), \"CAD\": Decimal(1.25)},\n \"AUD\": {\"USD\": Decimal(0.5), \"AUD\": Decimal(1.0), \"CAD\": Decimal(0.625)},\n \"CAD\": {\"USD\": Decimal(0.8), \"AUD\": Decimal(1.6), \"CAD\": Decimal(1.0)},\n }\n\n output = build_exchange_dictionary({\"USD\": 1, \"AUD\": 2, \"CAD\": 1.25})\n self.assertEqual(output, expected)", "def test_create_mimic_dict_1(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIsInstance(\n result, dict,\n \"The return value of create_mimic_dict() should be a dict.\"\n )", "def test_from_dict(self):\n restart_dict = {'composite_method': '',\n 'conformer_level': 'b97-d3/6-311+g(d,p)',\n 'fine': True,\n 'freq_level': 'wb97x-d3/6-311+g(d,p)',\n 'generate_conformers': True,\n 'initial_trsh': 'scf=(NDump=30)',\n 'model_chemistry': 'ccsd(t)-f12/cc-pvtz-f12',\n 'opt_level': 'wb97x-d3/6-311+g(d,p)',\n 'output': {},\n 'project': 'arc_test',\n 'rxn_list': [],\n 'scan_level': '',\n 'scan_rotors': False,\n 'sp_level': 'ccsdt-f12/cc-pvqz-f12',\n 'species': [{'bond_corrections': {'C-C': 1, 'C-H': 6},\n 'charge': 1,\n 'conformer_energies': [],\n 'conformers': [],\n 'external_symmetry': 1,\n 'generate_thermo': False,\n 'is_ts': False,\n 'label': 'testing_spc1',\n 'mol': '1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\\n3 H u0 p0 c0 {1,S}\\n4 H u0 p0 c0 {1,S}\\n5 H u0 p0 c0 {1,S}\\n6 H u0 p0 c0 {2,S}\\n7 H u0 p0 c0 {2,S}\\n8 H u0 p0 c0 {2,S}\\n',\n 'multiplicity': 1,\n 'neg_freqs_trshed': [],\n 'number_of_rotors': 0,\n 'opt_level': '',\n 'optical_isomers': 1,\n 'rotors_dict': {},\n 'xyzs': []}],\n 'use_bac': True}\n arc1 = ARC(project='wrong')\n project = 'arc_project_for_testing_delete_after_usage1'\n project_directory = os.path.join(arc_path, 'Projects', project)\n arc1.from_dict(input_dict=restart_dict, project='testing_from_dict', project_directory=project_directory)\n self.assertEqual(arc1.project, 'testing_from_dict')\n self.assertTrue('arc_project_for_testing_delete_after_usage' in arc1.project_directory)\n self.assertTrue(arc1.job_types['fine'])\n self.assertTrue(arc1.job_types['1d_rotors'])\n self.assertEqual(arc1.sp_level, 'ccsdt-f12/cc-pvqz-f12')\n self.assertEqual(arc1.arc_species_list[0].label, 'testing_spc1')\n self.assertFalse(arc1.arc_species_list[0].is_ts)\n self.assertEqual(arc1.arc_species_list[0].charge, 1)", "def test_convert(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]).data, test[1])", "def test_from_dict(self):\n cd = ConfigDict.from_dict({\n 'x': 1,\n 'y': {\n 'z': 2,\n 'w': [1,2, {'v': 22}]\n }\n })\n\n self.assertEquals(cd.x, 1)\n self.assertEquals(cd['x'], 1)\n self.assertEquals(cd.y.z, 2)\n self.assertEquals(cd['y']['z'], 2)\n self.assertEquals(cd.y.w[2].v, 22)\n self.assertEquals(cd['y']['w'][2]['v'], 22)", "def test_make_ec_map():\n pass", "def testInitFromDict():\n conf = naiveConf.NaiveConf({})\n conf = naiveConf.NaiveConf({'a':5})\n assert conf.a == 5", "def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])", "def test_get_composition(self):\n pass", "def test_empty_dict_coerce():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test([(\"something\", \"is_true\")])", "def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})", "def test_composition(self):", "def testCasDict(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n self.assertEqual(casDict, self.msTest.getCASDict(),\n \"The dictionary inside the MachineSettings was not validly initialized\")", "def test_elemental_composition_constraint():\n name = \"Property Band gap\"\n minimum = 1\n maximum = 2\n elements = [\"Ga\", \"N\"]\n\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"elementalCompositionConstraint\"\n assert mapped_c[\"name\"] is name\n assert mapped_c[\"options\"][\"min\"] is minimum\n assert mapped_c[\"options\"][\"max\"] is maximum\n assert mapped_c[\"options\"][\"elements\"] is elements", "def test_creates_dict(self):\n c = City()\n dic = c.to_dict()\n self.assertEqual(type(dic), dict)\n for att in c.__dict__:\n self.assertTrue(att in dic)\n self.assertTrue(\"__class__\" in dic)", "def test_composing_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import compose\n tmpdir = tempfile.mkdtemp()\n try:\n # First make sure the simple pipeline can be compiled.\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n compiler.Compiler().compile(compose.save_most_frequent_word, simple_package_path)\n\n # Then make sure the composed pipeline can be compiled and also compare with golden.\n compose_package_path = os.path.join(tmpdir, 'compose.tar.gz')\n compiler.Compiler().compile(compose.download_save_most_frequent_word, compose_package_path)\n with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(compose_package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)", "def test_create_mimic_dict_2(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIn(\n '', result,\n \"Mimic dict should have one key entry for empty string '' \"\n )", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_dotwiz_plus_to_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_dict() == {\n 'hello': [\n {\n 'Another-KEY': {'a': 'b'},\n 'Key': 'value',\n }\n ],\n 'camelCased': {\n 'th@#$%is.is.!@#$%^&*()a{}\\\\:<?>/~`.T\\'e\\'\\\\\"st': True\n },\n }", "def test_call(self):\n c = ConfigDict()\n self.assertEqual(c, c(a=1))\n self.assertTrue('a' in c)\n self.assertEqual(1, c.a)", "def test_build_compose_supports_layers(mocker):\n\n tmp_filename = '/var/folders/xw/yk2rrhks1w72y0zr_7t7b851qlt8b3/T/tmp52bd77s3'\n mock_writer = mocker.patch('juniper.actions.write_tmp_file', return_value=tmp_filename)\n\n processor_ctx = reader('./tests/manifests/layers.yml')\n actual_filename = actions.build_compose(logger, processor_ctx)\n\n expected = read_file('./tests/expectations/layers-compose.yml')\n\n assert tmp_filename == actual_filename\n assert yaml.safe_load(mock_writer.call_args[0][0]) == yaml.safe_load(expected)", "def testGetConfDict():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n confDict = conf.getConfDict()\n assert type(confDict) == dict\n assert confDict['x'] == conf.x\n assert confDict['y'] == conf.y\n assert confDict['L'] == conf.L", "def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n dictionary = key.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"in_car\": True}", "def test_create_mimic_dict_3(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertDictEqual(\n result, imdev,\n \"Mimic dict output for imdev.txt does match expected contents\"\n )", "def test_compose_params(self):\n filter = Bleach(**self.params)\n self.assertEquals(self.params, filter.bleach_params)", "def test_transform(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n assert numpy.all(t.transform(2) == numpy.array((1.0, 0.0, 0.0)))\n assert numpy.all(t.transform(\"asfa\") == numpy.array((0.0, 1.0, 0.0)))\n assert numpy.all(t.transform(\"ipsi\") == numpy.array((0.0, 0.0, 1.0)))\n\n with pytest.raises(KeyError):\n t.transform(\"aafdasfa\")\n\n assert numpy.all(\n t.transform([[\"ipsi\", \"asfa\"], [2, \"ipsi\"]])\n == numpy.array(\n [[(0.0, 0.0, 1.0), (0.0, 1.0, 0.0)], [(1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]]\n )\n )\n\n t = Compose([Enumerate([2, \"asfa\"]), OneHotEncode(2)], \"categorical\")\n assert t.transform(2) == 0.0\n assert t.transform(\"asfa\") == 1.0\n with pytest.raises(KeyError):\n t.transform(\"ipsi\")\n assert numpy.all(\n t.transform([[\"asfa\", \"asfa\"], [2, \"asfa\"]])\n == numpy.array([[1.0, 1.0], [0.0, 1.0]])\n )\n\n # for the crazy enough\n t = Compose([Enumerate([2]), OneHotEncode(1)], \"categorical\")\n assert t.transform(2) == 0.0\n with pytest.raises(KeyError):\n t.transform(\"ipsi\")\n assert numpy.all(t.transform([[2, 2], [2, 2]]) == [[0, 0], [0, 0]])", "def test_credential_get_dict(input_dict):\n creds = Credentials(input_dict).to_dict()\n assert creds[\"url\"] == \"http://example.com\"\n assert creds[\"token\"] == \"ABCDEFGH\"\n assert creds[\"org_key\"] == \"A1B2C3D4\"\n assert not creds[\"ssl_verify\"]\n assert not creds[\"ssl_verify_hostname\"]\n assert creds[\"ssl_cert_file\"] == \"foo.certs\"\n assert creds[\"ssl_force_tls_1_2\"]\n assert creds[\"proxy\"] == \"proxy.example\"\n assert creds[\"ignore_system_proxy\"]", "def test_dotwiz_plus_to_attr_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_attr_dict() == {\n 'hello': [\n {\n 'another_key': {'a': 'b'},\n 'key': 'value',\n }\n ],\n 'camel_cased': {'th_is_is_a_t_e_st': True},\n }", "def test_create_results_dict_1(self):\n dict = find_domains.create_results_dict(self.rps_results)\n with self.subTest():\n self.assertEqual(len(dict.keys()), 4)\n with self.subTest():\n self.assertEqual(len(dict[\"ABCDE\"]), 2)\n with self.subTest():\n self.assertEqual(len(dict[\"FGHIJ\"]), 2)" ]
[ "0.611908", "0.59375006", "0.56059444", "0.5426353", "0.54253703", "0.54189974", "0.5415257", "0.5411503", "0.5383426", "0.537418", "0.52742", "0.52094966", "0.51827496", "0.5154304", "0.51462847", "0.51409686", "0.5090625", "0.5080768", "0.50691545", "0.5063073", "0.50582147", "0.50509274", "0.5030585", "0.50248265", "0.50217175", "0.49956766", "0.4992929", "0.4979142", "0.4967176", "0.4959236" ]
0.70209265
0
Test `construct_compose_dict` raises `ModuleNotFoundError` for a nonexisting scraper.
def test_construct_compose_dict_nonexisting_scraper(self): with self.assertRaises(ModuleNotFoundError): docker_compose.construct_compose_dict("nonexisting")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_construct_compose_dict(self):\n expected_examplescraper_compose_dict = {\n \"version\": \"3\",\n \"services\": {\n \"scp1\": {\n \"container_name\": \"scp1\",\n \"environment\": [\n \"TOR_PORT=9051\",\n \"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good\",\n \"PRIVOXY_PORT=8118\",\n \"PRIVOXY_HOST=127.0.0.1\",\n \"IPSTORE_PORT=5000\",\n \"IPSTORE_HOST=scp1\",\n \"URLBROKER_PORT=6000\",\n \"URLBROKER_HOST=scp1\",\n \"DATASTORE_PORT=7000\",\n \"DATASTORE_HOST=scp1\",\n \"HEALTHCHECK_PORT=8000\",\n \"HEALTHCHECK_HOST=scp1\",\n \"SCRAPER_PACKAGE=examplescraper\",\n \"DOCKER_HOST_IP=fake_docker_host_ip\",\n \"SCRAPER_CONFIG=tests.integration.fake_config\",\n ],\n \"hostname\": \"scp1\",\n \"image\": \"scp:latest\",\n \"volumes\": [\"/fake_curent_dir:/scp\"],\n \"build\": {\n \"context\": \"/fake_curent_dir\",\n \"dockerfile\": \"/fake_curent_dir/Dockerfile\",\n },\n \"entrypoint\": \"/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scp1.sh\",\n },\n \"scp2\": {\n \"container_name\": \"scp2\",\n \"environment\": [\n \"TOR_PORT=9051\",\n \"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good\",\n \"PRIVOXY_PORT=8118\",\n \"PRIVOXY_HOST=127.0.0.1\",\n \"IPSTORE_PORT=5000\",\n \"IPSTORE_HOST=scp1\",\n \"URLBROKER_PORT=6000\",\n \"URLBROKER_HOST=scp1\",\n \"DATASTORE_PORT=7000\",\n \"DATASTORE_HOST=scp1\",\n \"HEALTHCHECK_PORT=8000\",\n \"HEALTHCHECK_HOST=scp1\",\n \"SCRAPER_PACKAGE=examplescraper\",\n \"DOCKER_HOST_IP=fake_docker_host_ip\",\n \"SCRAPER_CONFIG=tests.integration.fake_config\",\n ],\n \"hostname\": \"scp2\",\n \"image\": \"scp:latest\",\n \"volumes\": [\"/fake_curent_dir:/scp\"],\n \"depends_on\": [\"scp1\"],\n \"entrypoint\": \"/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scpx.sh\",\n },\n },\n }\n\n self.assertEqual(\n expected_examplescraper_compose_dict,\n docker_compose.construct_compose_dict(\n \"examplescraper\", \"tests.integration.fake_config\"\n ),\n )", "def test_func_dict_not_imported_module():\n\n plot_toggles = {\"SMF\": True}\n module_name = \"not_a_module.funcs\"\n function_prefix = \"calc_\"\n\n with pytest.raises(KeyError):\n func_dict = generate_func_dict(plot_toggles, module_name, function_prefix)", "def test_get_composition(self):\n pass", "def test_instantiate_non_existent_module(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"non_existent_module.some_class\"},\n {\"_target_\": \"another_non_existent_module.some_class\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ModuleNotFoundError for each test config\n for test_conf in test_configs:\n self.assertRaises(ModuleNotFoundError, instantiate, test_conf)", "def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])", "def test_import_fails_with_no_modules(self):\n with self.assertRaises(ValueError):\n LazyImportTester([])", "def test_constructor_missing_config():\n with pytest.raises(TypeError):\n Unpacker()", "def test_construct_3_bad_bootsraps(self):\n with self.assertRaises(KeyError):\n configerus.new_config(bootstraps=[\"I do not exist\"])", "def composed_url2modules(baseurl):\n import compose\n\n c = compose.Compose(baseurl)\n cid = c.data_id()\n cstat = c.data_status()\n print('Mod Compose:', cid)\n print(' Status:', cstat)\n mdata = c.json_modules()\n m = compose.modules_from_compose(mdata)\n return compose.dedup_modules(m)", "def test_build_compose_supports_layers(mocker):\n\n tmp_filename = '/var/folders/xw/yk2rrhks1w72y0zr_7t7b851qlt8b3/T/tmp52bd77s3'\n mock_writer = mocker.patch('juniper.actions.write_tmp_file', return_value=tmp_filename)\n\n processor_ctx = reader('./tests/manifests/layers.yml')\n actual_filename = actions.build_compose(logger, processor_ctx)\n\n expected = read_file('./tests/expectations/layers-compose.yml')\n\n assert tmp_filename == actual_filename\n assert yaml.safe_load(mock_writer.call_args[0][0]) == yaml.safe_load(expected)", "def test_constructor_missing_logging():\n with pytest.raises(TypeError):\n config = {\n \"PAN_GALACTIC_GARGLE_BLASTER\": \"Yummy\"\n }\n Unpacker(config)", "def test_build_compose_section_supports_layers():\n\n custom_output_dir = './build_not_dist'\n manifest = {\n 'output_dir': custom_output_dir,\n 'layers': {\n 'first': {'requirements': 'requirements/first.txt'},\n 'second': {'requirements': 'requirements/second.txt'},\n }\n }\n\n result = actions._get_compose_template(manifest)\n yaml_result = yaml.safe_load(result)\n\n first_layer = yaml_result['services']['first-layer']\n assert any('requirements/first.txt' in volume for volume in first_layer['volumes'])\n assert 'build_layer.sh first' in first_layer['command']\n\n second_layer = yaml_result['services']['second-layer']\n assert any('requirements/second.txt' in volume for volume in second_layer['volumes'])\n assert 'build_layer.sh second' in second_layer['command']", "def __load_docker_compose(path):\n file_path = __get_docker_file_path(path)\n if file_path is None:\n msg = \"Could not find docker-compose file at {}\".format(path)\n return None, __standardize_result(False, msg, None, None)\n if not os.path.isfile(file_path):\n return (\n None,\n __standardize_result(\n False, \"Path {} is not present\".format(file_path), None, None\n ),\n )\n try:\n with salt.utils.files.fopen(file_path, \"r\") as fl:\n loaded = yaml.load(fl)\n except OSError:\n return (\n None,\n __standardize_result(\n False, \"Could not read {}\".format(file_path), None, None\n ),\n )\n except yaml.YAMLError as yerr:\n msg = \"Could not parse {} {}\".format(file_path, yerr)\n return None, __standardize_result(False, msg, None, None)\n if not loaded:\n msg = \"Got empty compose file at {}\".format(file_path)\n return None, __standardize_result(False, msg, None, None)\n if \"services\" not in loaded:\n loaded[\"services\"] = {}\n result = {\"compose_content\": loaded, \"file_name\": os.path.basename(file_path)}\n return result, None", "def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]:\n try:\n importlib.import_module(name=\"scrapli_community\")\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n \"Scrapli Community package is not installed!\\n\"\n \"To resolve this issue, install the transport plugin. You can do this in one of \"\n \"the following ways:\\n\"\n \"1: 'pip install -r requirements-community.txt'\\n\"\n \"2: 'pip install scrapli[community]'\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n try:\n # replace any underscores in platform name with \".\"; should support any future platforms\n # that dont have \"child\" os types -- i.e. just \"cisco\" instead of \"cisco_iosxe\"\n scrapli_community_platform = importlib.import_module(\n name=f\"scrapli_community.{community_platform_name.replace('_', '.')}\"\n )\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n f\"Scrapli Community platform '{community_platform_name}` not found!\\n\"\n \"To resolve this issue, ensure you have the correct platform name, and that a scrapli \"\n \" community platform of that name exists!\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n platform_details_original = getattr(scrapli_community_platform, \"SCRAPLI_PLATFORM\", {})\n if not platform_details_original:\n msg = \"Community platform missing required attribute `SCRAPLI_PLATFORM`\"\n raise ScrapliException(msg)\n platform_details: Dict[str, Any] = deepcopy(platform_details_original)\n return platform_details", "def test_get_factory_invalid(self):\n order_processor = OrderProcessor()\n self.assertRaises(KeyError,\n order_processor.get_factory('AppleRepublic'))", "def _iep(s):\n d = defaultdict(dict)\n for _ep in iter_entry_points(s):\n try:\n d[_ep.name] = _ep.load()\n except Exception as e:\n d[_ep.name] = functools.partial(_broken_ep, _ep, e)\n return d", "def test_1X_constructor(self):\n path_to_config = '/invalid_path.yml'\n with self.assertRaises(Exception):\n FeatureExtractor(path_to_config)", "def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]:\n try:\n importlib.import_module(name=\"scrapli_community\")\n except ModuleNotFoundError as exc:\n err = f\"Module '{exc.name}' not found!\"\n msg = f\"***** {err} {'*' * (80 - len(err))}\"\n fix = (\n \"To resolve this issue, ensure you have the scrapli community package installed.\"\n \" You can install this with pip: `pip install scrapli_community`.\"\n )\n warning = \"\\n\" + msg + \"\\n\" + fix + \"\\n\" + msg\n raise ModuleNotFoundError(warning) from exc\n\n try:\n # replace any underscores in platform name with \".\"; should support any future platforms\n # that dont have \"child\" os types -- i.e. just \"cisco\" instead of \"cisco_iosxe\"\n scrapli_community_platform = importlib.import_module(\n name=f\"scrapli_community.{community_platform_name.replace('_', '.')}\"\n )\n except ModuleNotFoundError as exc:\n err = f\"Platform '{community_platform_name}' not found!\"\n msg = f\"***** {err} {'*' * (80 - len(err))}\"\n fix = (\n \"To resolve this issue, ensure you have the correct platform name, and that a scrapli \"\n \" community platform of that name exists!\"\n )\n warning = \"\\n\" + msg + \"\\n\" + fix + \"\\n\" + msg\n raise ModuleNotFoundError(warning) from exc\n except Exception as exc:\n msg = \"Unknown error occurred\"\n raise ScrapliException(msg) from exc\n\n platform_details_original = getattr(scrapli_community_platform, \"SCRAPLI_PLATFORM\", {})\n if not platform_details_original:\n msg = \"Community platform missing required attribute `SCRAPLI_PLATFORM`\"\n raise ScrapliException(msg)\n platform_details: Dict[str, Any] = deepcopy(platform_details_original)\n return platform_details", "def test_list_compositions(self):\n pass", "def test_fold_dep_empty_dict() -> None:\n # Given\n global_dep = {}\n fold_module = Module(\"module\")\n\n # When\n global_dep = _fold_dep(global_dep, fold_module)\n\n # Then\n global_dep = {}", "def composed_url2pkgs(baseurl):\n import compose\n\n c = compose.Compose(baseurl)\n cid = c.data_id()\n cstat = c.data_status()\n pdata = c.json_rpms()\n p = compose.packages_from_compose(pdata)\n pb = compose.packages_bin_from_compose(pdata)\n return p, pb, cid, cstat", "def test_loads(self, game=\"SuperMarioKart-Snes\"):\n with self.assertRaises(NameError):\n retro.make(game=game)", "def test_composing_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import compose\n tmpdir = tempfile.mkdtemp()\n try:\n # First make sure the simple pipeline can be compiled.\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n compiler.Compiler().compile(compose.save_most_frequent_word, simple_package_path)\n\n # Then make sure the composed pipeline can be compiled and also compare with golden.\n compose_package_path = os.path.join(tmpdir, 'compose.tar.gz')\n compiler.Compiler().compile(compose.download_save_most_frequent_word, compose_package_path)\n with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(compose_package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)", "def test_absent_imports():\n module, HABEMUS_MODULE = optional_import(\"not_real_module\")\n\n assert not HABEMUS_MODULE\n assert module.__name__ == \"not_real_module\"\n with pytest.raises(ModuleNotFoundError):\n _ = module.layers", "def __load_compose_definitions(path, definition):\n compose_result, err = __load_docker_compose(path)\n if err:\n return None, None, err\n if isinstance(definition, dict):\n return compose_result, definition, None\n elif definition.strip().startswith(\"{\"):\n try:\n loaded_definition = json.deserialize(definition)\n except json.DeserializationError as jerr:\n msg = \"Could not parse {} {}\".format(definition, jerr)\n return None, None, __standardize_result(False, msg, None, None)\n else:\n try:\n loaded_definition = yaml.load(definition)\n except yaml.YAMLError as yerr:\n msg = \"Could not parse {} {}\".format(definition, yerr)\n return None, None, __standardize_result(False, msg, None, None)\n return compose_result, loaded_definition, None", "def test_instantiate_non_existent_class(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"collections.NonExistentClass\"},\n {\"_target_\": \"collections.OtherNonExistentClass\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises AttributeError for each test config\n for test_conf in test_configs:\n self.assertRaises(AttributeError, instantiate, test_conf)", "def test_failure(t):\n objmap = ObjectMap({}, modname=\"py.module.name\", classname=\"ClassName\")\n ret = _create_object(objmap)\n t.assertIsNone(ret)", "def test_launch_composition(self):\n pass", "def _try_extractors(environ, extractors, start_response):\n for extractor_name in extractors:\n try:\n imported_module = __import__('tiddlyweb.web.extractors.%s' %\n extractor_name, {}, {}, ['Extractor'])\n except ImportError:\n try:\n imported_module = __import__(extractor_name, {}, {},\n ['Extractor'])\n except ImportError, exc:\n raise ImportError('could not load extractor %s: %s' %\n (extractor_name, exc))\n extractor = imported_module.Extractor()\n extracted_user = extractor.extract(environ, start_response)\n if extracted_user:\n logging.debug('UserExtract:%s found %s',\n extractor_name, extracted_user)\n return extracted_user\n return False", "def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)" ]
[ "0.6682241", "0.5706916", "0.5090552", "0.5024181", "0.4941659", "0.48633698", "0.48114425", "0.4799155", "0.47261176", "0.4679527", "0.46407944", "0.46372876", "0.46235257", "0.46136138", "0.45923778", "0.45748225", "0.45726362", "0.45706356", "0.4561969", "0.4499325", "0.44740507", "0.44727394", "0.4467899", "0.44651923", "0.44647354", "0.44615728", "0.44548607", "0.44382435", "0.4414671", "0.4406988" ]
0.862838
0
Test the rating limit
def _testRatingLimit(self): comment = models.Comment.objects.all()[0] type = models.RatingType.objects.all()[0] try: val = type.limit + 10 rating = models.Rating(comment=comment, type=type, value=val) rating.save() assert rating.value == type.limit finally: rating.delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unsuccessful_rating_with_rate_value_more_than_five(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 6},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_max_value_error_message)", "def validate_rating(self, key, rating):\n if not rating:\n raise AssertionError('No rating provided')\n if rating < 0 or rating > 10:\n raise AssertionError('Rating not in between 0 and 10')\n return rating", "def bound_rating(self, rating):\n return 1.0 * max(0, min(int(rating + 0.5), 5))\n # return 1.0 * max(0, min(rating, 5))", "def validate_rating(self, key, value):\n assert value is None or value <= 10 and value >= 0\n return value", "def test_is_valid_fujita_rating_f_too_low(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(F_SCALE_RATING_TOO_LOW)\n )", "def test_get_dealer_ratings(self):\n pass", "def k(rating):\n if rating < 100: return 40\n if rating < 200: return 20\n return 10", "def test_show_rating(self):\n self.assertEqual(self.show.rating, None)", "def test_non_logged_in_users_cannot_rate(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.logout()\n\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 1\n }, csrf_token, expected_status_int=401, expect_errors=True\n )", "def user_rating(self, rating: int):\n if type(rating) == int:\n self._user_rating = rating\n if type(rating) != int:\n raise ValueError\n if rating < 0 or rating > 5:\n print('Rating is out of 5. Please enter an integer from 0 to 5.')\n self._user_rating = None", "def test_is_valid_fujita_rating_ef_too_low(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(EF_SCALE_RATING_TOO_LOW)\n )", "def test_show_rating(self):\n self.assertTrue(isinstance(self.show.rating, float))", "def test_is_valid_fujita_rating_f_too_high(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(F_SCALE_RATING_TOO_HIGH)\n )", "def is_rating_allowed(self):\n return self._is_action_allowed('rating')", "def add_rating(self, rating):\n if rating >= 0 and rating <= 4:\n self.ratings.append(rating)\n else:\n print(\"Invalid Rating\")", "def test_only_yield_when_rating_greater_than_two(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n self._create_exploration(self.EXP_ID_1, user_a_id)\n\n # Give two ratings of 1.\n self._rate_exploration(self.EXP_ID_1, 2, 1)\n self._run_computation()\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertIsNone(user_stats_model)\n ModifiedUserImpactAggregator.stop_computation(user_a_id)\n\n # Give two ratings of 2.\n self._rate_exploration(self.EXP_ID_1, 2, 2)\n self._run_computation()\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertIsNone(user_stats_model)\n ModifiedUserImpactAggregator.stop_computation(user_a_id)\n\n # Give two ratings of 3. The impact score should now be nonzero.\n self._rate_exploration(self.EXP_ID_1, 2, 3)\n self._run_computation()\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertIsNotNone(user_stats_model)\n self.assertGreater(user_stats_model.impact_score, 0)", "def test_is_valid_fujita_rating_ef_too_high(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(EF_SCALE_RATING_TOO_HIGH)\n )", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def test_invalid_rating_value(self):\n url = reverse('rate-game')\n negative_rating = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': -1\n }\n big_rating = negative_rating\n big_rating['rating'] = 6\n\n negative = self.client.post(url, negative_rating, format='json')\n big = self.client.post(url, big_rating, format='json')\n\n self.assertEqual(negative.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(big.status_code, status.HTTP_400_BAD_REQUEST)", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_assign_and_read_ratings(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('[email protected]')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()", "def test_unsuccessful_rating_with_negative_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': -4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_min_value_error_message)", "def test_is_valid_fujita_rating_ef_some_caps(self):\n\n self.assertTrue(\n tornado_io._is_valid_fujita_rating(EF_SCALE_RATING_SOME_CAPS)\n )", "def test_post_rating(self):\n\n rating_data = {\n 'user': self.user.pk,\n 'book': self.book.pk,\n 'rating': random.randint(1, 5)\n }\n\n self.client.post(self.list_url, data=rating_data)\n\n self.book.refresh_from_db()\n\n assert self.book.ratings.count() == 1\n self.assertAlmostEqual(self.book.average_rating, rating_data['rating'])", "def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n # TODO distinct up/down rates\n # check limiting rate for resource flow in/out, if any\n if self._rate:\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]\n delta = np.sign(amt) * min(max_rate, abs(amt))\n print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)\n return {res: delta}, meta\n return {res: amt}, meta", "def get_rating(mpg):\n if mpg < 14:\n return 1\n elif mpg < 15:\n return 2\n elif mpg < 17:\n return 3\n elif mpg < 20:\n return 4\n elif mpg < 24:\n return 5\n elif mpg < 27:\n return 6\n elif mpg < 31:\n return 7\n elif mpg < 37:\n return 8\n elif mpg < 45:\n return 9\n return 10", "def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating", "def rate_limit(entity, limit, duration=60):\n\n return current_rate(entity, limit, duration) > limit", "def likely_to_be_offered(self):\n if self.score >= 5:\n return True\n return False", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")" ]
[ "0.70283306", "0.696573", "0.68655944", "0.670783", "0.649507", "0.6459995", "0.6455193", "0.64220494", "0.63810766", "0.63534164", "0.63452977", "0.6326962", "0.6323487", "0.6303592", "0.6256071", "0.6230649", "0.62118495", "0.6192096", "0.61686665", "0.6168111", "0.6138926", "0.61305285", "0.60589045", "0.6026571", "0.6025035", "0.5968701", "0.59218264", "0.5892164", "0.5863082", "0.58534366" ]
0.7624382
0
Test individual comment rating
def _testCommentRating(self): try: host = models.Host.objects.all()[0] comment = models.Comment(text='test', host=host) comment.save() types = models.RatingType.objects.all() items = [] for value, type in zip([3, 4, 5], types): tmp_obj = models.Rating(comment=comment, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment.rating() - 4.0 < .0001, comment.rating() for tmp_obj in items: tmp_obj.delete() items = [] for value, type in zip([3, 3], types): tmp_obj = models.Rating(comment=comment, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment.rating() == 3.0, comment.rating() finally: for tmp_obj in items: tmp_obj.delete() comment.delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)", "def update_comment_score(self, loginID, commentID, attrib_name):\n self.cursor.execute(\"SELECT rating FROM rates WHERE loginID = %s AND commentID = %s\", (loginID, commentID))\n old_rating = self.cursor.fetchall()\n if old_rating:\n # This user already rated this comment. Change the rating.\n if old_rating[0][0] == attrib_name:\n # Remove the rating, because the user already voted for this.\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"-1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"DELETE FROM rates WHERE loginID=%s AND commentID=%s\"\"\",\n (loginID, commentID))\n else:\n self.cursor.execute(\n \"UPDATE comment SET \" + old_rating[0][0] + \"=\" + old_rating[0][0] + \"-1, \" + attrib_name\n + \"=\" + attrib_name + \"+1 WHERE commentID=%s\"\"\", (commentID,))\n self.cursor.execute(\"\"\"UPDATE rates SET rating=%s WHERE loginID=%s AND commentID=%s\"\"\",\n (attrib_name, loginID, commentID))\n else:\n # New rating, just need to update one value and add a new rating tuple to rates\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"+1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"INSERT INTO rates VALUES (%s,%s,%s)\"\"\", (loginID, commentID, attrib_name))\n self.db.commit()\n self.update_comment_avg_score(commentID)", "def test_downvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)", "def test_post_rating(self):\n\n rating_data = {\n 'user': self.user.pk,\n 'book': self.book.pk,\n 'rating': random.randint(1, 5)\n }\n\n self.client.post(self.list_url, data=rating_data)\n\n self.book.refresh_from_db()\n\n assert self.book.ratings.count() == 1\n self.assertAlmostEqual(self.book.average_rating, rating_data['rating'])", "def test_default_score_comment(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)", "def test_assign_and_read_ratings(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('[email protected]')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()", "def test_get_dealer_ratings(self):\n pass", "def test_ratings_by_different_users(self):\n\n self.signup('[email protected]', 'a')\n self.signup('[email protected]', 'b')\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()", "def _testRatingLimit(self):\n\n comment = models.Comment.objects.all()[0]\n type = models.RatingType.objects.all()[0]\n try:\n val = type.limit + 10\n rating = models.Rating(comment=comment, type=type, value=val)\n rating.save()\n assert rating.value == type.limit\n finally:\n rating.delete()", "def test_upvote_then_downvote_same_user_leaves_comment_score_one_less(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)", "def update_comment_usefulness(self):\n self.cursor.execute(\"\"\"UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM rates\"\"\")\n for rating in self.cursor.fetchall():\n self.update_comment_score(rating[0], rating[1], rating[2])", "def test_show_rating(self):\n self.assertEqual(self.show.rating, None)", "def test_average_rating(self):\n self.new_project.save()\n\n review1 = Review.objects.create(project = self.new_project, user = self.new_user, design = 8, usability = 5, content = 9, comment = 'This is a nice website.')\n\n review2 = Review.objects.create(project = self.new_project, user = self.new_user, design = 6, usability = 5, content = 3, comment = 'This is a nice website.')\n\n self.assertEqual(self.new_project.average_rating, 6.0)", "def test_comments_are_moderated(self):\n self.assertFalse(Article().comments_are_moderated, \"comment should not be moderated yet\")\n self.assertTrue(\n Article(publication_date=datetime.min).comments_are_moderated,\n \"old comment should be moderated\",\n )\n\n # Test ranges\n days = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS\n self.assertFalse(\n Article(publication_date=now() - timedelta(days=days - 1)).comments_are_moderated\n )\n self.assertTrue(\n Article(publication_date=now() - timedelta(days=days)).comments_are_moderated\n )\n self.assertTrue(\n Article(publication_date=now() - timedelta(days=days + 1)).comments_are_moderated\n )", "def test_downvote_then_upvote_same_user_leaves_comment_score_one_greater(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)\n\n vote2 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)", "def _testRatingCategories(self):\n\n\n try:\n user = auth.User.objects.all()[0]\n category = models.Category.objects.all()[0]\n host = models.Host(user=user, category=category,\n url='http://blah.com')\n host.save()\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4.0 < .0001, comment.rating()\n\n comment2 = models.Comment(text='test', host=host)\n comment2.save()\n\n for value, type in zip([3, 3, 3], types):\n tmp_obj = models.Rating(comment=comment2, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment2.rating() - 3.0 < .0001, comment2.rating()\n\n assert host.rating() == 3.5, host.rating()\n\n ratings = host.ratings()\n assert ratings['Support'] == 3.5, ratings\n assert ratings['Features'] == 3.0\n assert ratings['Uptime'] == 4.0\n\n finally:\n try:\n for tmp_obj in items:\n tmp_obj.delete()\n \n comment.delete()\n comment2.delete()\n host.delete()\n except:\n pass", "def test_successful_article_rate(self):\n ratings_count = ArticleRating.objects.count()\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count+1)", "def test_like_a_comment(self):\n self.base_test()", "def validate_rating(self, key, rating):\n if not rating:\n raise AssertionError('No rating provided')\n if rating < 0 or rating > 10:\n raise AssertionError('Rating not in between 0 and 10')\n return rating", "def test_show_rating(self):\n self.assertTrue(isinstance(self.show.rating, float))", "def test_non_logged_in_users_cannot_rate(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.logout()\n\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 1\n }, csrf_token, expected_status_int=401, expect_errors=True\n )", "def test_successful_article_rate_update(self):\n\n self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 4},\n format=\"json\",\n **self.headers)\n\n ratings_count = ArticleRating.objects.count()\n update_response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 5},\n format=\"json\",\n **self.headers)\n self.assertEqual(update_response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ArticleRating.objects.count(), ratings_count)", "def _testHostRating(self):\n\n try:\n user = auth.User.objects.all()[0]\n category = models.Category.objects.all()[0]\n host = models.Host(user=user, category=category,\n url='http://blah.com')\n host.save()\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4 < .0001, comment.rating()\n\n comment2 = models.Comment(text='test', host=host)\n comment2.save()\n\n for value, type in zip([3, 3, 3], types):\n tmp_obj = models.Rating(comment=comment2, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment2.rating() - 3.0 < .0001, comment2.rating()\n\n assert host.rating() == 3.5, host.rating()\n\n assert host.rating(100) == 70, host.rating(100)\n\n finally:\n\n try:\n for tmp_obj in items:\n tmp_obj.delete()\n\n comment.delete()\n comment2.delete()\n host.delete()\n except:\n pass", "def test_rate_game(self):\n url = reverse('rate-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': 4.5\n }\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def update_comment_avg_score(self, commentID):\n self.cursor.execute(\"\"\"UPDATE comment SET avg_usefulness=(2*veryUseful+useful)/(veryUseful+useful+useless)\n WHERE commentID=%s\"\"\", (commentID,))\n self.db.commit()", "def with_rating(self):\n return self.annotate(\n rating=F('overall_posts_rating') * 10 + F('overall_comments_rating')\n )", "def test_update_review(self):\n\n user1 = User.objects.create_user('John')\n self.book.reviews.create(\n user=user1,\n rating=5,\n notes=\"It's so awesome\"\n )\n\n user2 = User.objects.create_user('Jane')\n review = self.book.reviews.create(\n user=user2,\n rating=4,\n notes=\"Love it\"\n )\n\n # update rating\n review.rating = 3\n review.save()\n\n # need to reload from database for updated rating value in book\n book = Book.objects.get(id=self.book.id)\n self.assertAlmostEqual(book.rating, 4)", "def vote(request, comment_id, vote):\n rating = {'up': 1, 'down': -1}.get(vote, False)\n if not rating:\n raise Http404, \"Invalid vote\"\n if request.user.is_anonymous():\n raise Http404, \"Anonymous users cannot vote\"\n try:\n comment = comments.get_object(pk=comment_id)\n except comments.CommentDoesNotExist:\n raise Http404, \"Invalid comment ID\"\n if comment.user_id == request.user.id:\n raise Http404, \"No voting for yourself\"\n karma.vote(request.user.id, comment_id, rating)\n # Reload comment to ensure we have up to date karma count\n comment = comments.get_object(pk=comment_id)\n return render_to_response('comments/karma_vote_accepted', {'comment': comment}, context_instance=DjangoContext(request))", "def test_dislike_a_comment(self):\n self.base_test()", "def rate_item(self, trans, user, item, rating, comment=''):\n item_rating = self.get_user_item_rating(trans.sa_session, user, item, webapp_model=trans.model)\n if not item_rating:\n # User has not yet rated item; create rating.\n item_rating_assoc_class = self._get_item_rating_assoc_class(item, webapp_model=trans.model)\n item_rating = item_rating_assoc_class()\n item_rating.user = trans.user\n item_rating.set_item(item)\n item_rating.rating = rating\n item_rating.comment = comment\n trans.sa_session.add(item_rating)\n trans.sa_session.flush()\n elif item_rating.rating != rating or item_rating.comment != comment:\n # User has previously rated item; update rating.\n item_rating.rating = rating\n item_rating.comment = comment\n trans.sa_session.add(item_rating)\n trans.sa_session.flush()\n return item_rating" ]
[ "0.70396376", "0.68224984", "0.6756108", "0.65874946", "0.6557403", "0.65549046", "0.64839643", "0.6482616", "0.64394224", "0.6431737", "0.63834304", "0.6381632", "0.63514405", "0.6258549", "0.62077975", "0.6185496", "0.613789", "0.61266005", "0.6110438", "0.61005175", "0.6096044", "0.60614306", "0.6047696", "0.6008323", "0.60050327", "0.59992003", "0.5955041", "0.59549636", "0.59541154", "0.5946679" ]
0.7833201
0
Test individual host rating
def _testHostRating(self): try: user = auth.User.objects.all()[0] category = models.Category.objects.all()[0] host = models.Host(user=user, category=category, url='http://blah.com') host.save() comment = models.Comment(text='test', host=host) comment.save() types = models.RatingType.objects.all() items = [] for value, type in zip([3, 4, 5], types): tmp_obj = models.Rating(comment=comment, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment.rating() - 4 < .0001, comment.rating() comment2 = models.Comment(text='test', host=host) comment2.save() for value, type in zip([3, 3, 3], types): tmp_obj = models.Rating(comment=comment2, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment2.rating() - 3.0 < .0001, comment2.rating() assert host.rating() == 3.5, host.rating() assert host.rating(100) == 70, host.rating(100) finally: try: for tmp_obj in items: tmp_obj.delete() comment.delete() comment2.delete() host.delete() except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_connection_score(self, current_connection):\n orange_score = int(Setup.orange_score)\n red_score = int(Setup.red_score)\n if Setup.system_status == 'orange':\n if current_connection['score'] <= int(orange_score):\n self.block_ip_address(current_connection['ip_address'])\n elif Setup.system_status == 'red':\n if current_connection['score'] <= red_score:\n self.block_ip_address(current_connection['ip_address'])", "def test_get_dealer_ratings(self):\n pass", "def test_ratings_by_different_users(self):\n\n self.signup('[email protected]', 'a')\n self.signup('[email protected]', 'b')\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()", "def do_score(self, hosts, vm, args):\n try:\n hostScores = []\n # use hosts IDs and VM ID to call the Rest API and make a decision\n for hostID in hosts:\n # Do work\n hostScores.append((hostID, 50))\n print(hostScores)\n except Exception as ex:\n print(ex, file=sys.stderr)", "def host_reputation(host):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': host\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def test(cls, hostname):\n pass", "def test_website_rating(properties, pos, neg, sum_pos, sum_neg, sum_total):\n site = models.Website(**properties)\n\n scores = site.scores\n assert pos == sum(scores['positive'].itervalues())\n assert neg == sum(scores['negative'].itervalues())\n\n ratings = site.ratings\n assert sum_pos == ratings.positive\n assert sum_neg == ratings.negative\n assert sum_total == ratings.total", "def usage(self, host):", "def goodmorning(host):", "def test_assign_and_read_ratings(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('[email protected]')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()", "def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_bytes(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()", "def test_rate_game(self):\n url = reverse('rate-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': 4.5\n }\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_perform_host_action(self):\n pass", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(type='str', required=True),\n destination=dict(type='str', required=True),\n repeat_count=dict(type='int', default=5),\n vrf_name=dict(type='str'),\n min_success_rate=dict(type='int', default=100)\n ),\n supports_check_mode=True\n )\n\n if module.check_mode:\n module.exit_json(changed=False)\n\n try:\n retvals = ping(module.params['host'],\n module.params['destination'],\n module.params['repeat_count'],\n module.params['vrf_name'])\n except Exception as exc:\n module.fail_json(msg='Reachability validation failed ({})'.format(exc))\n\n retvals['changed'] = False\n\n if retvals['success_rate'] >= module.params['min_success_rate']:\n module.exit_json(**retvals)\n else:\n module.fail_json(msg=('Success rate lower than expected ({}<{})').\n format(retvals['success_rate'],\n module.params['min_success_rate']))", "def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_string(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()", "def test_get_host_configuration_metrics(self):\n pass", "def test_check_cost():", "def test_connection(self):\n\n self.speed_test()\n if self.runs >= self.min_runs:\n self.lg.debug('Minimum number of speed tests performed.')\n self.check_performance()\n if self.bad_performance:\n self.lg.debug('Performance is below tolerance level.')\n self.notify_ISP()\n self.results_up.pop(0)\n self.results_down.pop(0)\n self.results_timestamp.pop(0)\n self.runs += 1", "def test_retire_rate_plan(self):\n pass", "def test_get_rating(self):\n url = reverse('rate-game')\n data = {'igdb': self.game.igdb}\n response = self.client.get(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def _testRatingCategories(self):\n\n\n try:\n user = auth.User.objects.all()[0]\n category = models.Category.objects.all()[0]\n host = models.Host(user=user, category=category,\n url='http://blah.com')\n host.save()\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4.0 < .0001, comment.rating()\n\n comment2 = models.Comment(text='test', host=host)\n comment2.save()\n\n for value, type in zip([3, 3, 3], types):\n tmp_obj = models.Rating(comment=comment2, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment2.rating() - 3.0 < .0001, comment2.rating()\n\n assert host.rating() == 3.5, host.rating()\n\n ratings = host.ratings()\n assert ratings['Support'] == 3.5, ratings\n assert ratings['Features'] == 3.0\n assert ratings['Uptime'] == 4.0\n\n finally:\n try:\n for tmp_obj in items:\n tmp_obj.delete()\n \n comment.delete()\n comment2.delete()\n host.delete()\n except:\n pass", "def test_unpopular(self):\n self.assertFalse(self.user3.is_popular())\n self.user3.receive_upvotes(randint(101, 10000))\n self.assertTrue(self.user3.is_popular())", "def check(self,hostNames,getMetricValueForHost):\r\n \r\n #get all hosts which contributed to the metric\r\n filteredHostNames=set()\r\n for metricHostName in self.metricHostNames:\r\n filteredHostNames=filteredHostNames.union(\r\n set(fnmatch.filter(hostNames,metricHostName)))\r\n \r\n #combine values form the contributing hosts\r\n if self.metricCombine==\"max\":\r\n combinedMetric=-1.0*sys.float_info.max\r\n elif self.metricCombine==\"min\":\r\n combinedMetric=sys.float_info.max\r\n elif self.metricCombine==\"sum\" or metricCombine==\"ave\":\r\n combinedMetric=0.0\r\n if self.metricCombine==\"ave\":\r\n count=0\r\n for hostName in filteredHostNames:\r\n value=getMetricValueForHost(self.metricName,hostName)\r\n print(\"hostName=\",hostName)\r\n print(\"self.metricName=\",self.metricName)\r\n print(\"value=\",value)\r\n if value!=None:\r\n if self.metricCombine==\"max\":\r\n if float(value)>combinedMetric:\r\n combinedMetric=float(value)\r\n elif self.metricCombine==\"min\":\r\n if float(value)<combinedMetric:\r\n combinedMetric=float(value)\r\n elif self.metricCombine==\"sum\" or metricCombine==\"ave\":\r\n combinedMetric=combinedMetric+float(value)\r\n if self.metricCombine==\"ave\":\r\n count+=1\r\n if self.metricCombine==\"ave\":\r\n combinedMetric=combinedMetric/float(count)\r\n \r\n #test for triggering an action\r\n print(\"checking \"+self.metricCombine+\" of metric \\\"\"+self.metricName\r\n +\"\\\"=\"+str(combinedMetric)+\" is \"+self.comparison+\" reference=\"\r\n +str(self.reference)+\" ...\")\r\n if self.comparison==\"lt\":\r\n if combinedMetric < self.reference:\r\n return True\r\n elif self.comparison==\"gt\":\r\n if combinedMetric > self.reference:\r\n return True\r\n elif self.comparison==\"eq\":\r\n if combinedMetric == self.reference:\r\n return True\r\n elif self.comparison==\"ne\":\r\n if combinedMetric != self.reference:\r\n return True\r\n elif self.comparison==\"le\":\r\n if combinedMetric <= self.reference:\r\n return True\r\n elif self.comparison==\"ge\":\r\n if combinedMetric >= self.reference:\r\n return True\r\n return False", "def test_average_rating(self):\n self.new_project.save()\n\n review1 = Review.objects.create(project = self.new_project, user = self.new_user, design = 8, usability = 5, content = 9, comment = 'This is a nice website.')\n\n review2 = Review.objects.create(project = self.new_project, user = self.new_user, design = 6, usability = 5, content = 3, comment = 'This is a nice website.')\n\n self.assertEqual(self.new_project.average_rating, 6.0)", "def test_ping_host1(self, chirouter_runner):\n chirouter_runner.start_mininet(\"3router.json\")\n mn = chirouter_runner.mininet\n\n ping = chirouter_runner.ping(\"host4\", \"10.1.0.42\", count=4)\n\n ping.validate_output_success(num_expected=4, expected_source=\"10.1.0.42\")", "def test_ping_host4(self, chirouter_runner):\n chirouter_runner.start_mininet(\"3router.json\")\n mn = chirouter_runner.mininet\n\n ping = chirouter_runner.ping(\"host1\", \"10.4.0.42\", count=4)\n\n ping.validate_output_success(num_expected=4, expected_source=\"10.4.0.42\")", "def test_mirror_ranking(self):\n updater = AptMirrorUpdater()\n # Make sure that multiple discovered mirrors are available.\n assert sum(m.is_available for m in updater.ranked_mirrors) > 10", "def test_get_host_configuration_metrics1(self):\n pass", "def rating_security(addon: AddonModel) -> int:\n rating = 5\n\n # AppArmor\n if addon.apparmor == SECURITY_DISABLE:\n rating += -1\n elif addon.apparmor == SECURITY_PROFILE:\n rating += 1\n\n # Home Assistant Login & Ingress\n if addon.with_ingress:\n rating += 2\n elif addon.access_auth_api:\n rating += 1\n\n # Signed\n if addon.signed:\n rating += 1\n\n # Privileged options\n if (\n any(\n privilege in addon.privileged\n for privilege in (\n Capabilities.BPF,\n Capabilities.DAC_READ_SEARCH,\n Capabilities.NET_ADMIN,\n Capabilities.NET_RAW,\n Capabilities.PERFMON,\n Capabilities.SYS_ADMIN,\n Capabilities.SYS_MODULE,\n Capabilities.SYS_PTRACE,\n Capabilities.SYS_RAWIO,\n )\n )\n or addon.with_kernel_modules\n ):\n rating += -1\n\n # API Supervisor role\n if addon.hassio_role == ROLE_MANAGER:\n rating += -1\n elif addon.hassio_role == ROLE_ADMIN:\n rating += -2\n\n # Not secure Networking\n if addon.host_network:\n rating += -1\n\n # Insecure PID namespace\n if addon.host_pid:\n rating += -2\n\n # UTS host namespace allows to set hostname only with SYS_ADMIN\n if addon.host_uts and Capabilities.SYS_ADMIN in addon.privileged:\n rating += -1\n\n # Docker Access & full Access\n if addon.access_docker_api or addon.with_full_access:\n rating = 1\n\n return max(min(8, rating), 1)", "def test_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(self.log_file.name, NAGIOS_TEST_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n # Write content to log file and run check\n self._write_log('\\t'.join(self.HOST_LOG_DATA))\n nagios.check(config['instances'][0])\n\n # Test metric\n for metric_data in self.HOST_LOG_SERVICEPERFDATA:\n name, info = metric_data.split(\"=\")\n metric_name = \"nagios.host.\" + name\n\n values = info.split(\";\")\n\n index = values[0].find(\"ms\") if values[0].find(\"ms\") != -1 else values[0].find(\"%\")\n index = len(values[0]) - index\n value = float(values[0][:-index])\n expected_tags = ['unit:' + values[0][-index:]]\n if len(values) == 4:\n expected_tags.append('warn:' + values[1])\n expected_tags.append('crit:' + values[2])\n expected_tags.append('min:' + values[3])\n\n aggregator.assert_metric(metric_name, value=value, tags=expected_tags, count=1)\n\n aggregator.assert_all_metrics_covered()" ]
[ "0.6140125", "0.60586387", "0.5988735", "0.5964071", "0.5828688", "0.58104444", "0.5805761", "0.5743598", "0.55654544", "0.55423003", "0.54669577", "0.5444215", "0.5438256", "0.5420251", "0.54023296", "0.5397459", "0.5385342", "0.5385126", "0.5348706", "0.5313948", "0.530791", "0.5289831", "0.5242377", "0.5234552", "0.52195823", "0.52176017", "0.51705843", "0.5170108", "0.51699704", "0.5164476" ]
0.72438645
0
Test the different rating categories
def _testRatingCategories(self): try: user = auth.User.objects.all()[0] category = models.Category.objects.all()[0] host = models.Host(user=user, category=category, url='http://blah.com') host.save() comment = models.Comment(text='test', host=host) comment.save() types = models.RatingType.objects.all() items = [] for value, type in zip([3, 4, 5], types): tmp_obj = models.Rating(comment=comment, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment.rating() - 4.0 < .0001, comment.rating() comment2 = models.Comment(text='test', host=host) comment2.save() for value, type in zip([3, 3, 3], types): tmp_obj = models.Rating(comment=comment2, type=type, value=value) tmp_obj.save() items.append(tmp_obj) assert comment2.rating() - 3.0 < .0001, comment2.rating() assert host.rating() == 3.5, host.rating() ratings = host.ratings() assert ratings['Support'] == 3.5, ratings assert ratings['Features'] == 3.0 assert ratings['Uptime'] == 4.0 finally: try: for tmp_obj in items: tmp_obj.delete() comment.delete() comment2.delete() host.delete() except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def test_get_cat_score_w_negation(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = ['large ears', 'increased pigmentation']\n\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7201759238096741", "def test_get_dealer_ratings(self):\n pass", "def test_ratings_by_different_users(self):\n\n self.signup('[email protected]', 'a')\n self.signup('[email protected]', 'b')\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()", "def test_categories_in_porducts(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.attendant_headers)\n response = self.client.get('api/v1/category/products',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 200)\n self.assertIn('shirt', str(response.data))", "def test_success(self):\n disposable_under_min = Disposable.objects.create(name=self.DISPOSABLE_NAME + '_1')\n disposable_over_min = Disposable.objects.create(name=self.DISPOSABLE_NAME + '_2')\n category_1 = Category.objects.create(name=self.CATEGORY_NAME + '_1')\n category_2 = Category.objects.create(name=self.CATEGORY_NAME + '_2')\n votes = [\n (disposable_under_min, category_1, settings.MIN_NORMALIZE_COUNT/100),\n (disposable_under_min, category_2, settings.MIN_NORMALIZE_COUNT/50),\n (disposable_over_min, category_1, settings.MIN_NORMALIZE_COUNT),\n (disposable_over_min, category_2, settings.MIN_NORMALIZE_COUNT*3)\n ]\n self.make_votes(votes)\n\n # test when total votes is less than settings.MIN_NORMALIZE_COUNT\n votes_under = DisposableVote.objects.filter(disposable=disposable_under_min)\n tuples_under = votes_to_percentages(votes_under)\n expected_under = [(category_2.name, settings.MIN_NORMALIZE_COUNT/50),\n (category_1.name, settings.MIN_NORMALIZE_COUNT/100)]\n self.assertEqual(expected_under, tuples_under)\n # test when total votes is greater than settings.MIN_NORMALIZE_COUNT\n votes_over = DisposableVote.objects.filter(disposable=disposable_over_min)\n tuples_over = votes_to_percentages(votes_over)\n expected_over = [(category_2.name, 3/4*100), (category_1.name, 1/4*100)]\n self.assertEqual(expected_over, tuples_over)", "def test_show_rating(self):\n self.assertEqual(self.show.rating, None)", "def test_photo_classification_view_set_post_category_not_allowed(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'category'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n\n self.assertEquals(request.status_code, 400)\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 11)", "def test_getting_rating_label_to_search(self):\n\n rating = get_rating_label_to_search(\"Hunger Rating\")\n self.assertEqual(rating, Post.hunger)", "def processed_stars(test=False,\n categories=('books', 'dvd', 'electronics', 'kitchen')):\n\n if isinstance(categories, str):\n categories = [categories]\n\n # loop over each category and extract features and labels per line\n # append these to the final\n labeled_features = []\n for category in categories:\n # open the relevant file, either train or test\n file = f'./processed_stars/{category}/'\n if not test:\n file += 'train'\n elif test:\n file += 'test'\n with open(file, encoding='utf-8') as f:\n raw = f.read()\n # one document per line, so split into lines\n reviews = raw.split('\\n')\n # extract features and their counts for each line\n features = [{ftr[0].strip(): int(ftr[1])\n for ftr in re.findall(r'(.*?(?<!#label#)):(\\d)', line)}\n for line in reviews]\n # extract all labels\n labels = re.findall(r'#label#:(\\d+.\\d+)', raw)\n # zip the features list and labels into tuples and add to final list\n labeled_features += [(f_set, float(label))\n for f_set, label in zip(features, labels)]\n\n return labeled_features", "def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category", "def test_extract_categories():\n pass", "def test_get_categories(self):\n pass", "def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings", "def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)", "def test_isNumericCategory(self):\r\n obs = self.overview_map.isNumericCategory('Treatment')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.isNumericCategory('DOB')\r\n self.assertEqual(obs, True)", "def test_isNumericCategory(self):\n obs = self.overview_map.isNumericCategory('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.isNumericCategory('DOB')\n self.assertEqual(obs, True)", "def k(rating):\n if rating < 100: return 40\n if rating < 200: return 20\n return 10", "def test_getCategoryValues(self):\r\n smpl_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',\r\n 'PC.634', 'PC.635', 'PC.636']\r\n\r\n exp = [\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Fast',\r\n 'Fast',\r\n 'Fast',\r\n 'Fast']\r\n obs = self.overview_map.getCategoryValues(smpl_ids, 'Treatment')\r\n self.assertEqual(obs, exp)", "def test_show_rating(self):\n self.assertTrue(isinstance(self.show.rating, float))", "def test_get_category_value_to_sample_ids(self):\r\n test_data = get_test_data()\r\n actual = get_category_value_to_sample_ids(\r\n test_data['map'],\r\n 'SampleType')\r\n expected = {'feces': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6'],\r\n 'L_palm': ['p1', 'p2'],\r\n 'Tongue': ['t1', 't2'],\r\n 'Other': ['not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n actual = get_category_value_to_sample_ids(test_data['map'], 'year')\r\n expected = {'2008': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6',\r\n 'p1', 'p2', 't1', 't2', 'not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n self.assertRaises(ValueError,\r\n get_category_value_to_sample_ids,\r\n test_data['map'],\r\n 'not.a.real.category')", "def categorize(seeds):\n global category_distances\n attr_by_category = {c: [] for c in seeds}\n \n distance = lambda a, c: min(distances[seed][a] for seed in seeds[c])\n for attr in attrs:\n for c in seeds:\n category_distances[attr][c] = distance(attr, c)\n\n (score, category) = min_argmin(\n lambda c: distance(attr, c), seeds)\n attr_by_category[category].append((attr, score))\n\n return {c: sorted(attr_by_category[c], key=itemgetter(1))\n for c in attr_by_category}", "def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def _get_rating(snippet_html, category):\n attr = 'rating-container-{category}'.format(category=category)\n ratings_table_html = snippet_html.find('td', 'listingratings')\n category_html = ratings_table_html.find('div', attr)\n return int(list(list(category_html.children)[1])[0])", "def test_init_ratings():\n env = FixedRating(num_users=50,\n num_items=50,\n rating_frequency=1.0,\n num_init_ratings=100)\n env.seed(0)\n _, _, ratings = env.reset()\n assert len(ratings) == 100\n for (user_id, item_id), (rating, context) in ratings.items():\n assert context.shape == (0,)\n assert user_id < 50\n assert item_id < 50\n if rating == 5.0:\n assert item_id >= 25\n else:\n assert item_id < 25", "def test_assign_and_read_ratings(self):\n\n self.signup('[email protected]', 'user')\n self.login('[email protected]')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('[email protected]')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()", "def test_getCategoryValues(self):\n smpl_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',\n 'PC.634', 'PC.635', 'PC.636']\n\n exp = ['Control','Control','Control','Control','Control','Fast'\n ,'Fast','Fast','Fast']\n obs = self.overview_map.getCategoryValues(smpl_ids, 'Treatment')\n self.assertEqual(obs, exp)", "def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score", "def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)" ]
[ "0.67347944", "0.6602649", "0.6267001", "0.62622076", "0.588277", "0.5818572", "0.57982117", "0.5775886", "0.5757851", "0.57468426", "0.5741155", "0.57397753", "0.5724523", "0.57127404", "0.5693196", "0.5673478", "0.56529504", "0.56025386", "0.5571396", "0.5566037", "0.5560757", "0.5560623", "0.55343187", "0.5533232", "0.5533207", "0.55230886", "0.5516195", "0.5510721", "0.5494801", "0.5489116" ]
0.7431953
0
Hit a BJcard and append it. Then, find all possible sums and the current hand. The current hand is defined as max. of possible sums The current hand should be 1 if burst
def hit(self, card): self.append(card) values=[] values.append(card.value()) if values[0] < 2: values.append(values[0]+ 10) new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21]) new_sums =sorted(new_sums) if len(new_sums) ==0: self.hand=-1 else: self.hand = new_sums[-1] self.possible_sums = new_sums
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_hand(self, cards):\n self.totalValue = 0\n for card in cards:\n self.totalValue += DeckOfCards.value(self, card)\n\n for card in cards:\n if self.totalValue > 21 and 'A' in card:\n self.totalValue -= 10\n \n if self.totalValue > 21:\n self.keepGoing = False\n print(f\"{self.name} busted!\")", "def DealerLogic(hand):\r\n inithand = [0,0,0,0,0]\r\n temphand = [0,0,0,0,0]\r\n for j in range(5):\r\n inithand[j] = hand[j] #just numericalvalues of hand\r\n temphand[j] = hand[j]\r\n possiblecards = []\r\n basesuits = CountSuits(inithand)\r\n\r\n for i in range(5):\r\n for j in range(5):\r\n temphand[j] = inithand[j] #resetting for correct value\r\n temphand[i] = 0 #generic trump value for hand\r\n temphand = sorted(temphand) #putting in ascending order again\r\n temp = CountSuits(temphand)\r\n if temp[4] < basesuits[4]: #if by replacing that card, number of suits decreases \r\n possiblecards.append(i) #save index of card \r\n\r\n if len(possiblecards) == 0: #if can't decrease number of suits, tries to make as close to less suited\r\n if basesuits[4] == 1: #can't make less suited as all one suit already\r\n return max(inithand) #smallest card possible discarded\r\n elif basesuits[4] == 2: #two suited already (2 of 1 suit, 3 of other), can't make less suited\r\n discardsuit = basesuits.index(2) #finds suit that has 2\r\n else: #three suited, can't make less (1 trump, 2 of one, 2 of other)\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r\n if discardsuit == 1: #discard ss\r\n return inithand[1] \r\n elif discardsuit == 2: #discard os1\r\n if basesuits[1] != 0: #other option is ss\r\n return inithand[4]\r\n else: #other option is os2\r\n return inithand[1]\r\n else: #discard os2\r\n return inithand[4]\r\n elif len(possiblecards) == 1: #if only one card makes less suited\r\n return inithand[possiblecards[0]]\r\n else: #multiple choices on proper discard, discard lowest card\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r", "def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum", "def computeHand(i1, i2, i3, i4, i5):\n arr = [Card(i1), Card(i2), Card(i3), Card(i4), Card(i5)];\n\n flushCount = [0, 0, 0, 0];\n rankCount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];\n\n hand = (1 << i1) + (1 << i2) + (1 << i3) + (1 << i4) + (1 << i5);\n\n cards = arr;\n\n for i in range(len(arr)):\n rankCount[arr[i].rank] += 1\n flushCount[arr[i].suit] += 1\n\n # find straight\n scount = 1 if rankCount[12] > 0 else 0; # for the wheel straight\n highestStraight = -1;\n for i in range(len(rankCount)) :\n if (rankCount[i] > 0) :\n scount += 1\n if (scount >= 5) :\n highestStraight = i\n else :\n scount = 0\n\n # find flush\n for i in range(len(flushCount)) :\n if (flushCount[i] >= 5) :\n if (highestStraight != -1) :\n # if its a flush and straight, must be a straight flush\n return Hand(STRAIGHT_FLUSH, [highestStraight], hand, cards)\n else :\n highest = 0\n kickers = []\n for j in range(len(rankCount)):\n if rankCount[j] > 0: \n highest = j\n kickers.append(j)\n return Hand(FLUSH, [highest], hand, cards, kickers[::-1]);\n\n # if its not a flush, then must be ordinary straight\n if highestStraight != -1 :\n return Hand(STRAIGHT, [highestStraight], hand, cards);\n\n # check quads, full house, 3 of a kind, two pair, pair\n kickers = [];\n strength = HIGH_CARD;\n rank = [-1];\n for i in range(len(rankCount)) :\n if rankCount[i] == 4 :\n strength = FOUR_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 3 :\n if strength == PAIR :\n strength = FULL_HOUSE\n rank = [i, rank[0]]\n else :\n strength = THREE_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 2 :\n if strength == THREE_OF_A_KIND :\n strength = FULL_HOUSE;\n rank = [rank[0], i];\n elif strength == PAIR :\n strength = TWO_PAIR\n rank = [i, rank[0]]\n else :\n strength = PAIR\n rank = [i]\n elif rankCount[i] == 1 :\n kickers.append(i)\n\n return Hand(strength, rank, hand, cards, kickers[::-1])", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def sum_(hand: list):\n vals = [card.rank for card in hand]\n intvals = []\n while len(vals) > 0:\n value = vals.pop()\n try:\n intvals.append(int(value))\n except ValueError:\n if value in ['K', 'Q', 'J']:\n intvals.append(10)\n elif value == 'A':\n intvals.append(1) # Keep it simple for the sake of example\n if intvals == [1, 10] or intvals == [10, 1]:\n print(\" Blackjack!\")\n return(21)\n else:\n points = sum(intvals)\n print(\" Current score: {}\".format(str(points)))\n return(points)", "def create_best_hand_smart(cards):\n cards = sorted([Card(c) for c in cards], reverse=True)\n \n # Get all flushes\n flushes = []\n for suit in Card.SUITS.values():\n suited = [str(c) for c in cards if c.suit == suit]\n if len(suited) >= 5:\n combos = unique_combinations(suited, 5)\n for combo in combos: flushes.append(Hand(combo))\n flushes = sorted(flushes, reverse=True)\n if (flushes and flushes[0].rank() >= Hand.STRAIGHT_FLUSH):\n # Straight flush! No need to check anything else\n yield flushes[0]\n \n #Get all sets\n merged = {}\n for c in cards:\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n quads = [c[0] for c in multiples if c[1] == 4]\n quads = [c for c in cards if c.value in quads]\n trips = [c[0] for c in multiples if c[1] == 3]\n trips = [c for c in cards if c.value in trips]\n pairs = [c[0] for c in multiples if c[1] == 2]\n pairs = [c for c in cards if c.value in pairs]\n remaining = [c for c in cards if c.value not in [m[0] for m in multiples]]\n \n if quads:\n h = quads[:4]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:1]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if trips and pairs:\n # Get a full house together\n h = trips[:3]\n remaining = pairs[:2]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if flushes:\n # We've already got a flush, return it!\n yield flushes[0]\n # Look for a straight!\n mvals = sorted(merged.keys(), reverse=True)\n for i in range(0, len(mvals) -4, 1):\n if (mvals[i] - mvals[i + 4]) == 4:\n # Regular straight\n h = [[c for c in cards if c.value == v][0] for v in mvals[i:i + 5]]\n yield Hand([str(c) for c in h])\n elif 14 in [c.value for c in cards] and mvals[i + 1] == 5 and mvals[i + 4] == 2:\n # Ace low straight\n h = [[c for c in cards if c.value == v][0] for v in mvals[i + 1:i + 5]]\n h.append([c for c in cards if c.value == 14][0])\n yield Hand([str(c) for c in h])\n \n if trips:\n h = trips[:3]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:2]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if pairs:\n if len(pairs) > 2:\n h = pairs[:4]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:1]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n else:\n h = pairs\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:3]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n \n # High card, send the top 5 reverse-sorted cards\n yield Hand([str(c) for c in cards[:5]])", "def hit(self, card):\n self.cards.hit(card)\n if self.cards.hand ==-1:\n self.state ='burst'", "def best_hand(hands):\r\n best_val = 0\r\n sum = 0\r\n hand = None\r\n for h in hands:\r\n for t in h:\r\n sum = sum + t[1]\r\n if sum > best_val:\r\n best_val = sum\r\n hand = h\r\n\r\n return hand", "def sum_cards(self, cards_list):\n sum = 0\n for num in cards_list:\n if num > 51: # means it's Joker\n sum += 0\n else:\n sum += self.num_to_card(num)\n\n return sum", "def averagefit(b, h):\n for key in b:\n count = 0\n backcount = 149\n while b[key].filled < b[key].capacity:\n if count > 149 or backcount < 0:\n break\n if h[count].output + h[backcount].output + \\\n b[key].filled < b[key].capacity:\n if h[count].pluggedin is not False:\n count += 1\n elif h[backcount].pluggedin is not False:\n backcount -= 1\n else:\n b[key].connected.append(h[count])\n b[key].connected.append(h[backcount])\n b[key].filled += h[count].output + h[backcount].output\n h[count].pluggedin = b[key]\n h[backcount].pluggedin = b[key]\n count += 1\n backcount -= 1\n else:\n count += 1\n for house in h:\n print(h[house].pluggedin)", "def evalHand(hand):\n # os.system(\"clear\")\n #print(\"dealer hand before evalHand is: \", hand.showHand())\n if (1 in hand.cards) and (21 - hand.handSum() >= 10):\n print(\"found a 1 value Ace in the hand\")\n hand.cards[hand.cards.index(1)] = 11 # Change the first ace from value 1\n # to value 11\n if (11 in hand.cards) and (hand.handSum() >= 22):\n print(\"found an 11 value Ace in the hand and sum > 21\")\n hand.cards[hand.cards.index(11)] = 1 # Change the first ace from value 1\n # to value 11", "def hit(self, hand):\n if hand == \"player\":\n self.player_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"player\")\n elif hand == \"dealer\":\n self.dealer_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"dealer\")\n self.top_card_int += 1\n self.update_card_positions()", "def best_hand(cards):\n\n\tvalues = [card[0:-1] for card in cards]\n\tsuits = [card[-1] for card in cards]\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# Convert card values to real numbers\n\tunsortedValues = [cardNums[value] for value in values]\n\t# unsorted values is necessary for retrieving card + suit\n\t# later\n\tvalues = unsortedValues [:] # make a copy of list\n\tvalues.sort() \t\t# sort values \n\tvalues.reverse()\t# largest # first \n\n\t### Check for possible hands\n\n\n\t# prepare variables for holding potential hands\n\tfourkind = []\n\tflush = [] \t# stores the suit of the flush\n\tstraight = [] \t# stores the highest number of straight \n\tthreekind = [] # stores the best possible 3-of-a-kind \n\tpairs = [] \t# stores one number for each pair\n\n\t# prepare counters for tracking possible hands\n\tstraightCounter = 1 # always have a straight of 1\n\t\n\t# Check for flush\n\tfor suit in suits:\n\t\tif suits.count(suit) >= 5:\n\t\t\tflush = suit\t\n\t\t\tbreak\n\n\t# check for straight, 4-kind, 3-kind, pairs\n\tfor i in range(6): # Don't process the last card\n\n\t\t# Check for straight if still possible\n\t\tif len(straight) == 0:\n\t\t\tprint \"values = \" + str(values)\n\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,5)]\t\n\t\t\tprint \"straightSeq = \" + str(straightSeq)\n\t\t\tif straightSeq.count(True) == 4:\n\t\t\t\tstraight.append(values[i])\t\n\n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif values[i] == 5:\n\t\t\t\t# check for 4-2-3 first\n\t\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,4)]\t\n\t\t\t\t# check for Ace\n\t\t\t\tif straightSeq.count(True) == 3 and \\\n\t\t\t\t\tvalues.count(cardNums[\"A\"]) >= 1:\n\t\t\t\t\tstraight.append(values[i])\t\n\n\t\t# Check for 4-kind\n\t\tif len(fourkind) == 0 and values.count(values[i]) == 4:\n\t\t\tfourkind = [values[i]]\n\t\t# Check for 3-kind but don't add same one twice \n\t\telif values.count(values[i]) == 3 and \\\n\t\t\tthreekind.count(values[i]) == 0:\t\n\t\t\tif len(threekind) == 0:\n\t\t\t\tthreekind.append(values[i])\n\t\t\telse: # add to pairs\n\t\t\t\tpairs.append(values[i])\n\t\t# Check for pairs, don't add same pair twice\n\t\telif values.count(values[i]) == 2 and \\\n\t\t\tpairs.count(values[i]) == 0: \n\t\t\tpairs.append(values[i])\n\n\t\n\n\t### Determine hand strength based on found hands\n\t# Since values are separated from suits, have to iterate\n\t# through unsorted values to find correct index of each card\n\n\tbesthand = []\n\n\t# Straight flush\n\tif len(straight) != 0 and len(flush) != 0:\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush:\n\t\t\t\tbesthand.append(card)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif len(besthand) == 5:\n\t\t\treturn (besthand, Ranks.StraightFlush)\n\t\telse: # not a straight flush, so re-init besthand\n\t\t\tbesthand = []\n\n\t# Four of a kind\n\tif len(fourkind) != 0:\n\t\tcardValue = convNumToCard(fourkind[0])\n\t\t# insert the 4 out of 5 cards b/c suit is known\n\t\tbesthand = [cardValue + \"S\", cardValue + \"H\", cardValue + \"C\", cardValue + \"D\"]\n\t\t# add the highest value card that isn't 4-of-a-kind\n\t\tfor i in range(7):\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != fourkind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FourKind)\n\t# Full House\t\n\telif len(threekind) != 0 and len(pairs) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\t\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FullHouse)\n\t# Flush\n\telif len(flush) != 0:\n\t\t# iterate through sorted cards, add that card if its\n\t\t# suit matches the flush suit\n\t\tfor i in range(7):\n\t\t\t# find card in original unsorted list\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush[0]:\n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Flush)\n\t# Straight\n\telif len(straight) != 0:\n\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\treturn (besthand, Ranks.Straight)\n\t# Three of a kind\n\telif len(threekind) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add two high cards to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != threekind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.ThreeKind)\n\t# Two pair\n\telif len(pairs) == 2:\n\t\tfor i in range(7): # add 1st pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add 2nd pair to besthand\n\t\t\tif unsortedValues[i] == pairs[1]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 4:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0] and values[i] != pairs[1]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.TwoPair)\n\t# Pair\n\telif len(pairs) == 1:\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Pair)\n\t# High card\n\telse:\n\t\tfor i in range(7):\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\t\tif len(besthand) == 5:\n\t\t\t\treturn (besthand, Ranks.HighCard)", "def test_value_hard_hand(self):\n hand = self._hand\n cards = [BjCard('spades', '6'), BjCard('hearts', 'A'), BjCard('clubs', 'K')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 17)", "def calculate_cards(cards):\n if 11 in cards and sum(cards) > 21:\n cards.remove(11)\n cards.append(1)\n print('Changing 11 --> 1')\n print(f'Your hand is now {cards} and your total is {sum(cards)}')\n elif sum(cards) > 21:\n print('Sorry. Looks like you busted!')\n restart_game()\n elif sum(cards) == 21 and len(cards) == 2:\n print('You win with a blackjack!')\n restart_game()", "def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())", "def hand_value_check(self, hand):\r\n hand_value = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n hand_value += a\r\n\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result", "def _get_hand_value(self, hand, allow_soft_limit=True):\n hand_values = [0]\n for face, suit in hand:\n card_value = self._face_value(face)\n hand_values = [value + card_value for value in hand_values]\n if face == 'ace' and allow_soft_limit:\n hand_values_ace = [value - 10 for value in hand_values if value < 21]\n hand_values += hand_values_ace\n # Get the higehst value that's 21 or less. If none found, get the bust value\n hand_values.sort(reverse=True) # Highest number First\n for value in hand_values:\n hand_value = value\n if hand_value <= 21: # Found the highest number <= 21\n break\n return hand_value", "def hand_value_check(self, hand):\r\n hand_value = 0\r\n ace = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n ace += 1\r\n hand_value += a\r\n\r\n if ace > 0: # if hand had aces, return all possible hand values\r\n for i in range(0, ace + 1):\r\n result.append(hand_value)\r\n hand_value -= 10\r\n self.display_hand_val = result\r\n return result\r\n else:\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result", "def get_small_joker_value(deck):\n \n return max(deck) - 1", "def calculate_score(cards):\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n \n if 11 in cards and sum(cards) > 21:\n cards.remove 11\n cards.append 1\n return sum(cards)", "def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val", "def calculateHandlen(hand):\n # TO DO... <-- Remove this comment when you code this function\n sum = 0\n for value in hand.values():\n sum += value\n return sum", "def create_best_hand_bruteforce(cards):\n \n combos = unique_combinations(cards, 5)\n hands = [Hand(combo) for combo in combos]\n hands = sorted(hands, reverse=True)\n return hands[0]", "def hit_or_stand(self, final_sums):\n if final_sums is None:\n final_sums = self.sum_cards()\n if final_sums[0] > 21: # should not happen\n print(\"BUST!\")\n return \"S\"\n highest_smaller_21 = final_sums[0]\n index = 1\n while index < len(final_sums) and final_sums[index] <= 21 :\n highest_smaller_21 = final_sums[index]\n index += 1\n index -= 1 # index of chosen sum in final_sums\n if highest_smaller_21 < 17 or (highest_smaller_21 == 17 and index >= 1): # smaller than 17 or soft 17\n return \"H\"\n else:\n return \"S\"", "def checkDoubles(self,card): # need to check defenders handcount...\n multipleCards = [card]\n for i in range(4): # checking all other possible cards of same rank\n card_plus = card + 13 * i # checking higher values\n card_minus = card - 13 * i # checking lower values\n if card_plus in self.currentHand and card_plus < 51 and card_plus != card and card_plus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt= input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_plus)\n self.currentHand.remove(card_plus)\n else:\n print(\"Did not add\")\n if card_minus in self.currentHand and card_minus > 0 and card_plus != card and card_minus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_minus)\n self.currentHand.remove(card_minus)\n else:\n print(\"Did not add\")\n return multipleCards", "def sumHandReturnPoints(self, valueLs): # card is the element in valueLs\n #Ace is dealt with here, assume Ace to be 11 initially, decreasing by 10 per Ace if sum > 21\n rowSum = 0\n AceCount = 0\n for ele in valueLs:\n rank = ele.get_rank()\n if rank == 1:\n rank = 11\n AceCount += 1 # serve as flag\n rowSum += rank\n while(AceCount!=0):\n if rowSum > 21:\n rowSum -= 10\n AceCount -= 1\n points = self.countPoints(rowSum)\n return points", "def best_hand(cards):\n return max(generate_all_hands(cards))", "def check_for_bust_or_bj(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21] # remove all hand values that exceed 21\r\n if len(phv) == 0: # if no values under 21 are available -> bust\r\n if hand == bj.player1.hand:\r\n bj.player1.final_hand_val = \"bust\"\r\n return\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"\r\n return\r\n elif 21 in phv: # if 21 is among values -> blackjack\r\n if hand == bj.player1.hand:\r\n bj.player1.final_hand_val = 21\r\n return\r\n else:\r\n bj.player1.final_hand2_val = 21\r\n return" ]
[ "0.688753", "0.6112849", "0.6100922", "0.6085325", "0.5997567", "0.5966301", "0.5873897", "0.5858032", "0.58062357", "0.5799236", "0.5777938", "0.57645684", "0.5712703", "0.5693552", "0.5690028", "0.5632079", "0.5579677", "0.557833", "0.55535036", "0.5553048", "0.5537502", "0.5536025", "0.5525127", "0.55243355", "0.55238026", "0.5518778", "0.5514551", "0.5497926", "0.54667723", "0.5465311" ]
0.70891213
0
Is current cards the Blackjack?
def is_blackjack(self): if self.hand == 21 and len(list(self)) ==2: print '%s = Blackjack'%self return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False", "def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False", "def has_cards(self):\n return self.hand.len() > 0", "def test_for_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, True)", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1", "def get_card_holder(self):\n\t\tif self.card_holder is not None:\n\t\t\treturn True\n\t\treturn False", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def deck_has_cards(deck, cards):\n deck_dict = collections.defaultdict(int)\n for card in itertools.chain(deck.draw_pile, deck.discard_pile, deck.hand):\n deck_dict[card] += 1\n return deck_dict == cards", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft", "def cardExists(self, id):\n return id in self.cards", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def can_be_played(cls, card, context={}):\n\t\treturn True", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False", "def can_split(self) -> bool:\n if len(self.cards) == 2 and self.cards[0].value == self.cards[1].value:\n return True\n else:\n return False", "def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems", "def end_game(bank, card_deck) -> bool:\n if bank.balance < MINIMUM_BET():\n print(\"You're broke and you've run out of money! :(\\n\")\n elif not card_deck.cards:\n print(\"We've exhausted the deck of cards.\\n\")\n return bank.balance < MINIMUM_BET() or not card_deck.cards", "def has_theme(cards, theme):\n for card in cards:\n if card.CARDTYPE == theme:\n return True\n\n return False", "def is_bankrupted(self):\n return self.status == self.PLAYER_BANKRUPT", "def check_banned(deck_format, card_name):\n if card_name in consts.BANNINGS[deck_format]:\n return True\n return False", "def hitMe(hand, deck):\n if deck.cardsLeft == 0:\n return False\n hand.getCard(deck.drawCard())\n return True", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def hand_empty(self):\n return len(self.cards) == 0" ]
[ "0.8288965", "0.81736314", "0.7500575", "0.72958475", "0.7241689", "0.7119909", "0.7114075", "0.69564337", "0.68808556", "0.6869995", "0.67837375", "0.6718618", "0.66275245", "0.6555914", "0.6503698", "0.6475533", "0.6465826", "0.64576113", "0.64328235", "0.642792", "0.6424301", "0.6349277", "0.6330889", "0.6317876", "0.624547", "0.6240841", "0.6235753", "0.62093586", "0.61963516", "0.6187936" ]
0.81807315
1
Restart another round. Check the remaining budget and leave the game if budget <= 0. Create new BJCards
def restart(self): self.state ='active' if self.budget <= 0: return self.leave() self.cards =BJCards() self.bet_amount =0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\r\n\r\n self.pot = 0\r\n self.actions = 0\r\n self.previous_bet = self.small_blind\r\n self.initiate_blind(self.small_blind + self.big_blind)\r\n\r\n for player in self.players:\r\n player.credits = self.starting_credits\r\n\r\n # Let the first player begin\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n self.players[self.active_player].active = True\r\n\r\n self.players[self.active_player - 1].flip_cards()\r\n self.community_cards.flip_cards()\r\n\r\n self.deck_model = DeckModel()\r\n\r\n for player in self.players:\r\n player.new_cards(self.deck_model)\r\n\r\n output_text = \"Starting game...\\n{} post the big blind [${}]\\n{} post the small blind [${}]\".format(\r\n self.players[(self.active_player + 1) % len(self.players)].name, self.big_blind,\r\n self.players[self.active_player].name, self.small_blind)\r\n\r\n message = \"Player {} won!\".format(self.players[1].name)\r\n self.game_message.emit(message)\r\n\r\n self.new_pot.emit()\r\n self.new_credits.emit()\r\n self.new_output.emit(output_text)", "def cal_kill_turn(deck): \n #Init board/game state\n goldfish_life = 20\n turn = 0 \n \n #lands in hand\n lands_in_hand = []\n #spell count in hand\n spells_in_hand = []\n #lands in play\n lands_in_play = []\n #creatures in play\n spells_in_play = []\n #creatures' in play power\n #creature_pwr = 1\n \n #shuffle and draw 7 cards, mulls if hand bad\n hand = None\n keep_hand = False\n hand_count = 8\n while keep_hand == False:\n hand_count = hand_count - 1\n deck.shuffle()\n hand = deck.peep(hand_count)\n keep_hand = Mull.keep_or_mull(hand)\n hand = deck.draw_hand(num = hand_count) \n \n #Init Hand state\n for card in hand:\n if card.is_land == True:\n lands_in_hand.append(card)\n else:\n spells_in_hand.append(card) \n \n #SIMULATE GOLDFISH KILL\n while(goldfish_life >= 0 and deck.size() > 0): \n if VERBOSE:\n print(\"+++++++++++++ Turn \" + str(turn) + \"++++++++++++++\") \n print(\" Goldfish life = \" + str(goldfish_life))\n \n print(\" Lands in play\")\n for card in lands_in_play:\n print(card)\n print(\" Spells in play\")\n for card in spells_in_play:\n print(card)\n print(\" Lands in hand\")\n for card in lands_in_hand:\n print(card)\n print(\" Creatures in hand\")\n for card in spells_in_hand:\n print(card) \n \n # Draw a card if not first turn\n if turn > 0:\n card_to_draw = deck.draw() \n if card_to_draw.is_land == True: \n lands_in_hand.append(copy.deepcopy(card_to_draw))\n else: \n spells_in_hand.append(copy.deepcopy(card_to_draw)) \n\n #MAIN PHASE 1 play land card if we have any\n if len(lands_in_hand) > 0: \n lowest_cost = None\n land_to_play = None\n #Play the land card that has the lowest cost creature in hand\n for land in lands_in_hand[:]:\n for creature in spells_in_hand[:]:\n if land.manaEachTurn == creature.manaCost:\n # this land card has a playable creature\n if land_to_play != None:\n temp_cost = creature.manaCost\n if temp_cost < lowest_cost:\n if len(np.where(temp_cost > 0)[0]) <= \\\n len(np.where(lowest_cost > 0)[0]): \n # play the land that corresponds to\n # the creatures that require the \n # least different types\n land_to_play = land\n lowest_cost = temp_cost\n else:\n #first land card, we store it to play\n land_to_play = land\n lowest_cost = creature.manaCost\n if land_to_play == None: #No spell cards in hand\n land_to_play = lands_in_hand[0] #play first land card\n \n lands_in_play.append(copy.deepcopy(land_to_play)) \n lands_in_hand.pop(lands_in_hand.index(land_to_play)) \n \n #ATTACK GOLDFISH \n for creature in spells_in_play:\n goldfish_life = goldfish_life - creature.damageEachTurn\n if goldfish_life <= 0:\n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn\n \n #MAIN PHASE 2 play spells\n if len(spells_in_hand) > 0 and len(lands_in_play) > 0: \n #Spells in hand and mana available --> play a creature\n #GOLDFISH LOGIC\n if p_goldfish:\n if goldfish_interactions > 0:\n pass\n if q_goldfish:\n if r.random(1) < q_goldfish_prob:\n if goldfish_interactions > 0:\n pass\n #Check mana pool\n mana_pool = np.array([0] * Mana.MANA_TYPES)\n for card in lands_in_play:\n mana_pool += card.manaEachTurn\n \n for creature in spells_in_hand:\n temp_pool = np.array(mana_pool - \\\n np.array(creature.manaCost))\n if len(np.where(temp_pool < 0)[0]) == 0: \n #can afford to play card\n mana_pool = temp_pool[:]\n spells_in_play.append(copy.deepcopy(creature))\n spells_in_hand.remove(creature)\n if VERBOSE:\n print(\"++++++++++++ End Turn \" + str(turn) + \"++++++++++\") \n turn += 1 \n #End Gold Fish kill \n \n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn", "def calculate_unfinished_game(test_string=\"\"):\n # get user input\n print \"No old game entered, starting new Game\"\n # which frame we're on,\n # which throw we're on in the frame,\n # and the number of throws so far\n # throw_number checks which throw in the frame you're on,\n # throw_idx tracks the number of throws so far total.\n frame_idx = throw_number = throw_idx = 0\n\n # track if there's an extra throw this frame, only used in 10th frame\n bonus_throw = False\n # create a list of throws as they come in\n throw_list = []\n # create a list of each frame, and what they scored -\n frame_list = []\n # and add the first 9 frames\n for _ in range(0, 9):\n frame_list.append([0, 0])\n\n # add the special 10th frame\n frame_list.append([0, 0, 0])\n # create a list of the total scores for the frame\n score_list = [0]*10\n # track the number of pins standing\n pins = reset_pins()\n # track which throw goes to which frame\n throw_to_frame = {}\n\n if test_string:\n test_list = list(test_string)\n\n # while - a game of bowling has only 10 frames\n while frame_idx < 10:\n print\n # just to add two lines between throws\n print\n\n print_scoreboard(frame_list, score_list, frame_idx, throw_number)\n print \"\"\"You are on frame {} and are on your throw {}.\nYour running score is {}. There are {} pins standing.\"\"\".format(\n # show which frame\n frame_idx+1,\n # show throw they're on\n throw_number+1,\n # show their score\n sum(score_list),\n # show how many pins are standing\n pins,\n )\n\n print\n # just to add two lines between throws\n print\n\n # just for testing: convert X to 10\n if test_string:\n throw = test_list.pop(0)\n if throw == 'X':\n throw = '10'\n else:\n # get user input\n throw = raw_input(\"After your throw, enter a number 0-10. > \")\n # along with numbers for pins, we can also accept characters\n # x, X & / (for strikes and spares)\n if not throw.isdigit():\n # covert X to strike\n if is_strike(str(throw)):\n throw = '10'\n # covert / to spare\n if is_spare(str(throw)):\n # if we're on the first throw\n if not throw_number:\n print 'Cannot get spare on first throw!'\n continue\n else:\n throw = str(10-throw_list[throw_idx-1])\n\n # check that the number isn't greater than the number of pins abailable\n if not check_valid_throw(throw, pins):\n print \"Enter only numbers, please.\"\n # if it's invalid, try again\n continue\n # cast to int\n throw = int(throw)\n # save throw in throw_list\n throw_list.append(throw)\n # save which throw was in which frame\n throw_to_frame[str(throw_idx)] = frame_idx\n # save throw score into the throw score sheet\n frame_list[frame_idx][throw_number] = throw\n # save throw into the frame score sheet\n score_list[frame_idx] += throw\n # update how many pins are standing after the throw\n pins -= throw\n\n # check for strikes and spares\n # if we can check last throw\n if len(throw_list) > 1:\n # if last throw was a strike\n if throw_list[throw_idx-1] == 10:\n # if last frame was in the first 9\n if throw_to_frame[str(throw_idx-1)] < 9:\n # add this throw to the frame for that throw\n score_list[throw_to_frame[str(throw_idx-1)]] += throw\n elif throw_number < 1:\n # if last frame was a spare\n if score_list[frame_idx-1] == 10:\n # add this throw to last frame\n score_list[frame_idx-1] += throw\n # if we can check two throws ago\n if len(throw_list) > 2:\n # if second to last throw was a strike\n if throw_list[throw_idx-2] == 10:\n # if second to last frame was in the first 9\n if throw_to_frame[str(throw_idx-2)] < 9:\n # add this throw to the score from that frame\n score_list[throw_to_frame[str(throw_idx-2)]] += throw\n\n # what to do after the throw depends on several factors\n # first nine frames\n if frame_idx < 9:\n # first throw\n if throw_number < 1:\n # if there are pins left\n if pins > 0:\n # go to second throw in frame\n throw_number += 1\n # if there are no pins left\n else:\n print \"Strike!\"\n print\n # reset pins\n pins = reset_pins(pins)\n # go to next frame\n frame_idx += 1\n throw_number = 0\n # second throw\n else:\n # if there are pins left\n if pins == 0:\n print \"Spare!\"\n print\n # go to next frame\n frame_idx += 1\n # reset throw_number to 0\n throw_number = 0\n # reset pins\n pins = reset_pins(pins)\n # final 10th frame\n else:\n # first throw\n if throw_number < 1:\n # if there no are pins left\n if pins == 0:\n print \"Strike!\"\n print\n # reset pins\n pins = reset_pins(pins)\n # you get a bonus 3rd throw\n bonus_throw = True\n # second throw\n elif throw_number < 2:\n # if there no are pins left\n if pins == 0:\n # if last throw was a strike,\n if throw_list[throw_idx-1] == 10:\n print \"Strike!\"\n print\n # if last throw was not a strike,\n else:\n print \"Spare!\"\n print\n # either way, you get a bonus 3rd throw\n bonus_throw = True\n # reset pins\n pins = reset_pins(pins)\n # if you don't have a bonus throw\n if not bonus_throw:\n # go to next frame, ends the game\n frame_idx += 1\n # third throw\n elif throw_number < 3:\n # go to next frame, ends the game\n frame_idx += 1\n # increment throw_number\n throw_number += 1\n # increment throw_idx\n throw_idx += 1\n\n print_scoreboard(frame_list, score_list, frame_idx, throw_number)\n\n if not test_string:\n _ = raw_input(\"Game Over!\")\n print \" {}\".format(_)\n print\n print \"final score: {}\".format(sum(score_list))\n if raw_input(\n \"\"\"Play again? Enter 'Y' to play again,\n or press enter to quit. \"\"\").lower() == 'y':\n Game() # pragma: no cover\n return sum(score_list)", "def blackjack():\n score_report, bank, game_deck = start_game()\n while not end_game(bank, game_deck):\n user, dealer = Player(), Player(dealer=True)\n print(\"\\n=============== BEGINNING ROUND! ===============\")\n bank.report_balance()\n bank.place_bet()\n play_round(user=user, dealer=dealer, deck=game_deck)\n if game_deck.cards:\n winner = decide_winner(user, dealer)\n end_round(winner_result=winner, bank=bank, report=score_report)\n print(score_report)\n score_report.report_rounds()\n print(f\"This concludes our game of BlackJack 21 and you get to take home ${bank.balance}, thank you for playing!\")", "def __init__(self):\n self.start()\n while self.player.money > 0:\n self.game_loop()\n if self.player.money > 0:\n if not play_again():\n break\n elif self.player.money == 0:\n no_more_money()\n self.reset_table()", "def _new_game():\n\n global dealer_card_frame\n global player_card_frame\n global dealer_hand\n global player_hand\n\n dealer_card_frame.destroy()\n dealer_card_frame = tkinter.Frame(card_frame, background=\"green\")\n dealer_card_frame.grid(row=0, column=1, sticky=\"ew\", rowspan=2)\n\n player_card_frame.destroy()\n player_card_frame = tkinter.Frame(card_frame, background=\"green\")\n player_card_frame.grid(row=2, column=1, sticky=\"ew\", rowspan=2)\n\n # reset the result label\n result_text.set(\"\")\n\n # create a list to store dealer's and player's hands.\n dealer_hand = []\n player_hand = []\n\n # deal the first cards\n _initial_deal()", "def setup_newgame(self):\n global chips\n self.bet = 100\n if chips < self.bet: \n self.game_over = True\n chips -= self.bet\n \n\n self.cards_list = arcade.SpriteList()\n\n #resets on newgame\n self.top_card_int = 0 ## this had to be moved here to make it so that you are not drawing over the 52 card limit\n self.player_hand = []\n self.dealer_hand = []\n self.player_value = 0\n self.dealer_value = 0\n self.player_ace_count = 0\n self.dealer_ace_count = 0\n self.player_almost_bust = 0\n self.dealer_almost_bust = 0\n self.blackjack = False\n self.victory = False\n self.defeat = False\n \n #creates deck\n for card_suit in CARD_SUITS:\n for card_value in CARD_VALUES:\n card = Card(card_suit, card_value, CARD_SCALE)\n self.cards_list.append(card)\n #shuffles deck\n for pos1 in range(len(self.cards_list)):\n pos2 = random.randrange(len(self.cards_list))\n self.cards_list.swap(pos1, pos2)\n \n #Current way to add cards to player and dealer hands since using .pop() on self.cards_list deletes the card itself even in the other hands\n \n #self.dealer_hand.append(self.top_card_int)\n self.hit(\"dealer\")\n self.dealer_hand[0].face_down()\n #first_card = self.dealer_hand[0]\n #first_card.face_down()\n #self.dealer_hand[0].face_down()\n self.hit(\"player\")\n self.player_hand[0].face_down()\n self.hit(\"dealer\")\n self.dealer_hand[1].face_down()\n self.hit(\"player\")\n self.player_hand[1].face_down()\n self.update_card_positions()", "def next_round(self, succeeds_fold=False):\r\n\r\n self.pot = 0\r\n self.actions = 0\r\n self.previous_bet = self.small_blind\r\n self.initiate_blind(self.small_blind + self.big_blind)\r\n\r\n # Let the first player begin\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n self.players[self.active_player].active = True\r\n\r\n self.players[self.active_player-1].flip_cards()\r\n\r\n if not succeeds_fold:\r\n self.community_cards.flip_cards()\r\n if succeeds_fold:\r\n self.community_cards.flip_all_cards()\r\n\r\n # Create a new deck\r\n self.deck_model = DeckModel()\r\n\r\n # Creates new cards\r\n self.community_cards.new_cards(self.deck_model)\r\n for player in self.players:\r\n player.new_cards(self.deck_model)\r\n\r\n output_text = \"Initiating round.\\n{} post the big blind [${}]\\n{} post the small blind [${}]\".format(\r\n self.players[(self.active_player + 1) % len(self.players)].name, self.big_blind,\r\n self.players[self.active_player].name, self.small_blind)\r\n\r\n self.new_pot.emit()\r\n self.new_credits.emit()\r\n self.new_output.emit(output_text)", "def env_step(self, action):\n if action == 0: # Hit\n\n new_state = deepcopy(self.current_state)\n reward = 0\n terminal = False\n \n new_card = min(self.random.randint(1,14), 10)\n # print('new card:', new_card)\n \n if new_card == 1:\n self.player_ace_count += 1\n new_state['player_sum'] = self.current_state['player_sum'] + 11 \n else:\n new_state['player_sum'] = self.current_state['player_sum'] + new_card\n\n while new_state['player_sum'] > 21 and self.player_ace_count > 0:\n self.player_ace_count -= 1\n new_state['player_sum'] -= 10\n\n new_state['usable_ace'] = int(self.player_ace_count > 0)\n\n if new_state['player_sum'] > 21: # Goes bust\n reward = -1\n terminal = True\n\n elif action == 1: # Stick\n\n new_state = deepcopy(self.current_state)\n terminal = True\n\n if self.current_state['dealer_card'] == 1:\n dealer_ace = 1\n dealer_sum = 11\n else:\n dealer_ace = 0\n dealer_sum = self.current_state['dealer_card']\n\n first_two_cards = True\n while dealer_sum < self.dealer_sticks or first_two_cards:\n first_two_cards = False\n # new_card = self.random.choice(range(1,11), p=self.card_probs)\n new_card = min(self.random.randint(1,14), 10)\n if new_card == 1:\n dealer_sum += 11\n dealer_ace += 1\n else:\n dealer_sum += new_card\n\n while dealer_sum > 21 and dealer_ace > 0:\n dealer_sum -= 10\n dealer_ace -= 1\n dealer_ace = int(dealer_ace > 0)\n # print('dealer:', new_card)\n\n # print('dealer sum:', dealer_sum)\n if dealer_sum > 21:\n reward = 1\n else:\n if new_state['player_sum'] > dealer_sum:\n reward = 1\n elif new_state['player_sum'] < dealer_sum:\n reward = -1\n else:\n reward = 0\n # reward = int(new_state['player_sum'] > dealer_sum) - int(new_state['player_sum'] < dealer_sum)\n\n else:\n raise Exception(\"Invalid action.\")\n\n self.current_state = new_state\n\n self.reward_obs_term = (reward, self.observation(self.current_state), terminal)\n\n return self.reward_obs_term", "def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()", "def run(self):\n print \"Welcome to the BlackJack game ......\" # print help function if needed\n deckObj = Deck()\n deckObj.shuffle()\n while(not self.checkGameComplete()):\n self.displayGame()\n card = deckObj.deal()\n # ask user for move\n position = raw_input('Please input a number [1-16] for table, or [17-20] for discard list\\n')\n isPass = self.errorChecking(position)\n while(not isPass):\n position = raw_input('Please input a number [1-16] for table, or [17-20] for discard list\\n')\n isPass = self.errorChecking(position)\n # update table\n self.updateTableAndDiscardLs(position,card)\n ### Score Game\n self.displayGame()\n score = self.scoreGame()\n print 'Congratulations! Your final score is:'\n print score\n print 'Game is done... Thank you!'", "def burn_card():\n\tglobal my_deck\n\tburn=my_deck.draw()\n\tmy_deck.discard(burn)\n\treturn my_deck", "def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)", "async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")", "def stay(self):\n global dealer_hand, deck, outcome, in_play\n \n if in_play:\n while dealer_hand.get_value() < 17:\n dealer_hand.add_card(deck.deal_card())\n \n if dealer_hand.get_value() > 21:\n # print \"Dealer is busted.\\nPlayer wins.\"\n self.outcome.set(\"Dealer is busted. Player wins. New deal?\")\n self.won += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n elif player_hand.get_value() > 21:\n # print \"Player is busted.\\nDealer wins.\"\n self.outcome.set(\"Player is busted. Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n elif dealer_hand.get_value() >= player_hand.get_value():\n # print \"Dealer wins.\"\n self.outcome.set(\"Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n else:\n # print \"Player wins.\"\n self.outcome.set(\"Player wins! New deal?\")\n self.won += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)", "def beginRound(self):\n\t\tself.gameState = Table.PRE_FLOP\n\t\tfor p in self.getPlayers():\n\t\t\tif p.money <= 0:\n\t\t\t\tprint p.name\n\t\t\t\tself.playerRemoveList.append(p)\n\t\tself.removeFromPlayerList()\n\t\tif len(self.getPlayers()) == 1:\n\t\t\tself.isGameEnd = True\n\t\telse:\n\t\t\tself.roundNo += 1\n\t\t\tself.determineBlinds()\n\t\t\tself.curRaise = self.bigBlind\n\t\t\tself.collectSmallBlind()\n\t\t\tself.collectBigBlind()\n\t\t\tself.deal()\n\t\t\tself.setState()\n\t\t\tif self.noOfPlayers() == 2:\n\t\t\t\tself.turn = self.curDealerSeatNo\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.turn, 1)\n\t\t\telse:\n\t\t\t\t_, self.turn = self.findNthPlayerFromSeat(self.curDealerSeatNo, 3)\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.curDealerSeatNo, 2)", "def start_game(self):\n while self.can_deal:\n self.take_turn()", "def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")", "def start_game(self):\n self.init_card = self.next_card.throw_card()\n self.current_card = \"\"\n self.final_score = 0\n\n while self.keep_playing:\n \"\"\"Functions to start the game\"\"\"\n # Calls the output functions, making the game start\n self.output(self.init_card) \n\n #Evaluating if the score is zero or not\n if self.score == 0:\n #if the score is zero, it is game over and end of the game.\n print(\"=============================================================================\")\n print(\"GAME OVER! \\nThank you for you time playing this game. \\nWe hope you have fun\")\n print(\"=============================================================================\")\n break\n else:\n #Else we will ask for the user if they wanted to play or not.\n play = input(\"Keep Playing? [y/n]\")\n if \"n\" in play:\n print(\"Thank you for playing with us. Have a nice day!\")\n break", "async def add_card(self, user_id):\n end_game = False\n check = 0\n\n separator = ','\n current_cards = await self.get_current_cards(user_id)\n game_id = await self.get_game_by_player(user_id)\n game = await self.get_game(game_id)\n channel = await ex.client.fetch_channel(game[5])\n stand = await self.check_player_standing(user_id)\n player1_score = await self.get_player_total(game[1])\n player2_score = await self.get_player_total(game[2])\n player1_cards = await self.get_current_cards(game[1])\n if not stand:\n available_cards = await self.get_available_cards(game_id)\n random_card = random.choice(available_cards)\n current_cards.append(str(random_card))\n cards = separator.join(current_cards)\n current_total = await self.get_player_total(user_id)\n random_card_value = await self.get_card_value(random_card)\n if current_total + random_card_value > 21:\n for card in current_cards: # this includes the random card\n if await self.check_if_ace(card, user_id) and check != 1:\n check = 1\n current_total = (current_total + random_card_value) - 10\n if check == 0: # if there was no ace\n current_total = current_total + random_card_value\n else:\n current_total = current_total + random_card_value\n await ex.conn.execute(\"UPDATE blackjack.currentstatus SET inhand = $1, total = $2 WHERE userid = $3\", cards, current_total, user_id)\n if current_total > 21:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if player1_score > 21 and current_total >= 16:\n end_game = True\n await self.set_player_stand(game[1])\n await self.set_player_stand(game[2])\n elif player1_score > 21 and current_total < 16:\n await self.add_card(game[2])\n elif player1_score < 22 and current_total > 21:\n pass\n else:\n end_game = True\n elif self.check_if_bot(game[2]) and not self.check_if_bot(user_id): # if user_id is not the bot\n if player2_score < 16:\n await self.add_card(game[2])\n else:\n await self.set_player_stand(user_id)\n await self.set_player_stand(game[2])\n end_game = True\n else:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if current_total < 16143478541328187392 and len(player1_cards) > 2:\n await self.add_card(game[2])\n if await self.check_player_standing(game[1]) and current_total >= 16:\n end_game = True\n if not self.check_if_bot(user_id):\n if self.check_if_bot(game[2]):\n await self.send_cards_to_channel(channel, user_id, random_card, True)\n else:\n await self.send_cards_to_channel(channel, user_id, random_card)\n else:\n await channel.send(f\"> **You already stood.**\")\n if await self.check_game_over(game_id):\n await self.finish_game(game_id, channel)\n if end_game:\n await self.finish_game(game_id, channel)", "def restock(self):\n self.money = 9999", "def simulate(deck): \n \n # Initialize Banker and Player\n # player_third_card is initialized to -10 to signify that it doesn't exist.\n banker = 0\n player = 0\n player_third_card = -10\n \n# Deal out two hands of two cards\n player = (player + deck.pop()) % 10\n player = (player + deck.pop()) % 10\n \n banker = (banker + deck.pop()) % 10\n banker = (banker + deck.pop()) % 10\n \n# Check for natural\n if player >= 8 and banker >= 8:\n return 'tie'\n elif banker >= 8:\n return 'banker'\n elif player >= 8:\n return 'player'\n \n\n# Run through Player hand\n if player <= 5:\n player_third_card = deck.pop()\n player = (player + player_third_card) % 10\n \n\n# Run through Banker hand\n if player_third_card == -10 and banker < 6:\n banker = (banker + deck.pop()) % 10\n elif banker <= 2:\n banker = (banker + deck.pop()) % 10\n elif banker == 3 and player_third_card != 8:\n banker = (banker + deck.pop()) % 10\n elif banker == 4 and player_third_card >= 2 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 5 and player_third_card >= 4 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 6 and (player_third_card == 6 or player_third_card == 7):\n banker = (banker + deck.pop()) % 10\n \n \n# Compare hands and return results\n if player > banker:\n return 'player'\n elif banker > player:\n return 'banker'\n else:\n return 'tie'", "def end_turn(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._discard.push(card)\n\n for _ in range(self._active.size()):\n card = self._active.pop()\n self._discard.push(card)\n\n for _ in range(self._handsize):\n if self._deck.size() == 0:\n self._discard.shuffle_collection()\n self._deck.replace(self._discard)\n self._discard.clear_collection()\n card = self._deck.pop()\n self._hand.push(card)\n self._money = 0\n self._attack = 0", "def run():\n \n # Enter player name\n #player_name = raw_input(\"Put your Name: \\n \")\n player1 = Player(raw_input(\"Put Player 1 name: \\n \"))\n player2 = Player(raw_input(\"Put Player 2 name: \\n \")) \n \n # Generate Deck\n cards = gen_deck()\n \n game_on = True\n start_pl = 0\n while game_on == True :\n deck = copy(cards) # Cards being played this hand\n deal_cards(deck, player1, player2)\n \n play_set(player1, player2, start_pl) \n\n game_on = check_score(player1, player2, game_on)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def new_game(self, deal_3=False, auto_flip_tab=True, decks=DEFAULT_DECKS, tableau_qty=DEFAULT_TABLEAUS, custom_seed=None, commandline=False):\n self.deal_3 = deal_3\n self.auto_flip_tab = auto_flip_tab\n self.decks = decks\n self.tableau_qty = tableau_qty\n self.commandline = commandline\n self.deck = self._init_decks(custom_seed)\n self.backup_deck = copy.deepcopy(self.deck)\n self.start_game()", "def step(self, action):\n assert self.completed_rounds < self.num_rounds\n\n player = self.players[self.current_player_id]\n card = action\n\n if card not in player.hand:\n raise ValueError(\"Action not allowed because the card is not in the player's hand\")\n\n player.hand.remove(card)\n player.played.add(card)\n # print(f\"Player {self.current_player_id} with hand {[c.id for c in player.hand]} played the card {card.id}\")\n best_combination_on_the_table = self._get_best_combination(card)\n if best_combination_on_the_table:\n self.last_player_capturing_id = self.current_player_id\n player.captured.add(card)\n for c in best_combination_on_the_table:\n self.table.remove(c)\n player.captured.add(c)\n if not self.table and not (self._is_last_round and self._is_round_over()):\n player.scope += 1\n else:\n self.table.add(card)\n # print(f\"Cards on the table after play: {[c.id for c in self.table]}\")\n\n if self._is_round_over():\n self.completed_rounds += 1\n # print(f\"=========== Round {self.current_round} completed ============\")\n self.current_player_id = (self.current_player_id + 1) % self.num_players\n\n if self.is_over():\n last_player_capturing = self.players[self.last_player_capturing_id]\n # print(f\"Giving the remaining cards to player {last_player_capturing.player_id}\")\n for card in self.table:\n last_player_capturing.captured.add(card)\n self.table = set()\n assert all([len(p.played) == 10 for p in self.players])\n assert all([len(p.hand) == 0 for p in self.players])\n return self.get_state(), self.current_player_id", "def placeBets(self):\n if self.game_state.board_state == PokerGameState.BOARD_STATE_PRE_FLOP:\n self.game_state.current_turn_index = self.dealer\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n else:\n self.game_state.current_turn_index = (self.dealer+1)%self.num_players\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n if DEBUG:\n print \"current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n print \"current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n\n \"\"\" Run an initial decision so that current_turn_index doesn't equal current_final_decision_index \"\"\"\n if DEBUG:\n print \"Getting initial poker decision from \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n poker_decision = self.game_state.player_list[self.game_state.current_turn_index].getPokerDecision(self.game_state, self.decision_list)\n self.handleDecision(poker_decision)\n self.game_state.current_turn_index = (self.game_state.current_turn_index + 1) % self.num_players\n if DEBUG:\n print \"current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n print \"current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n\n while int(self.game_state.current_turn_index) != int(self.game_state.current_final_decision_index):\n if self.game_state.numActive() == 1:\n return\n if DEBUG:\n print \"Getting poker decision from \" + str(self.game_state.player_list[self.game_state.current_turn_index].name) + \"...\"\n poker_decision = self.game_state.player_list[self.game_state.current_turn_index].getPokerDecision(self.game_state, self.decision_list)\n self.handleDecision(poker_decision)\n self.game_state.current_turn_index = (self.game_state.current_turn_index + 1) % self.num_players\n if poker_decision.action_type == PokerDecision.ACTION_TYPE_RAISE:\n self.game_state.current_final_decision_index = self.game_state.current_turn_index\n if DEBUG:\n print \"Next current_turn_index: \" + str(self.game_state.current_turn_index) + \" \" + str(self.game_state.player_list[self.game_state.current_turn_index].name)\n print \"Next current_final_decision_index: \" + str(self.game_state.current_final_decision_index) + \" \" + str(self.game_state.player_list[self.game_state.current_final_decision_index].name)\n print \"while() cond: \" + str(int(self.game_state.current_turn_index) != int(self.game_state.current_final_decision_index))", "def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True", "def end_game(bank, card_deck) -> bool:\n if bank.balance < MINIMUM_BET():\n print(\"You're broke and you've run out of money! :(\\n\")\n elif not card_deck.cards:\n print(\"We've exhausted the deck of cards.\\n\")\n return bank.balance < MINIMUM_BET() or not card_deck.cards" ]
[ "0.61270714", "0.6029881", "0.5949783", "0.5931026", "0.58528876", "0.5843963", "0.5828268", "0.5805273", "0.5793747", "0.5761536", "0.5664966", "0.5653092", "0.5644708", "0.5637475", "0.5613593", "0.5606919", "0.5594004", "0.5582903", "0.55661905", "0.5557825", "0.5552996", "0.5539226", "0.5538811", "0.5538761", "0.55363023", "0.55326605", "0.5529543", "0.5528153", "0.55216116", "0.5519746" ]
0.77978367
0
Bet the amount of money. Cannot exceed player's budget
def bet(self, amount): if amount >self.budget: print 'you cannot bet because of little money' else: self.bet_amount = amount print 'you bet %s' % (amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()", "def get_bet(self) -> int:\n return int(input(f\"How much money would you like to place? \"\n f\"(input an integer between {MINIMUM_BET()}-{self.balance}): \"))", "def raise_bet(value):\r\n\r\n global total_bet, dealer_bet, in_play, bottom_alert\r\n if value > player.get_cash() or not in_play:\r\n bottom_alert = \"You cannot bet $%i right now.\" % (value)\r\n elif in_play:\r\n player.spend_cash(value)\r\n dealer_bet += value\r\n total_bet += value * 2\r\n bottom_alert = \"\"", "def make_bet(self, amount):\n self.update_fear(amount)\n self.bot.bet(amount)", "def bet(self, amount: int = 100):\n if amount <= self.current_balance:\n self.current_bet = amount\n else:\n raise Exception(f\"Can't cover {amount} bet, balance: {current_balance}\")\n return self", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def get_player_bet(self) -> None:\n print(\"Please enter the amount you want to bet.\")\n while self.user.bet == 0:\n input_ = input(\">>> \")\n try:\n input_ = float(input_)\n self.user.bet = input_\n except ValueError as e:\n print(str(e))\n continue", "def bet(self, amount: float) -> None:\n if amount <= self.balance:\n self.__bet = amount\n else:\n raise ValueError(\"Amount exceeds balance.\")", "def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)", "async def bet(message, amount):\n\n # make sure their discord account is linked\n mixcord_user = await database.get_user(message.user_id)\n if mixcord_user is None:\n return \"your mixer account must be linked to your discord via mixcord to use this command.\"\n\n # make sure they have sufficient balance\n if amount == \"all\":\n amount = mixcord_user[\"balance\"]\n if amount == 0:\n return \"amount must be a positive integer.\"\n else:\n amount = utils.get_positive_int(amount)\n if amount is None:\n return \"amount must be a positive integer.\"\n if mixcord_user[\"balance\"] < amount:\n return \"you have insufficient balance. ({}/{} {})\".format(mixcord_user[\"balance\"], amount, currency_name)\n\n won = random.randint(0, 1) == 1\n if won:\n await database.add_balance(message.user_id, amount)\n return \"you won :D you now have {} {}.\".format((mixcord_user[\"balance\"] + amount), currency_name)\n else:\n await database.add_balance(message.user_id, -amount)\n return \"you lost :( you now have {} {}.\".format((mixcord_user[\"balance\"] - amount), currency_name)", "def bet(self, amount):\n self._chips -= amount\n self._bet += amount\n assert self._chips >= 0", "def test_bet(self):\n hand = self._hand\n self.assertEqual(hand.bet.amount, 150)", "def bet(self):\n while True:\n try:\n self.round_bet = float(\n input(f'{self.name}, please enter an amount to bet for this round: '))\n if self.round_bet > self.bankroll:\n print('You have bet more than you have!')\n continue\n if self.round_bet <= 0:\n self.out_of_round = True\n else:\n self.bankroll -= self.round_bet\n break\n except TypeError:\n print('Please enter in a valid bet!')\n continue\n except ValueError:\n print('Please enter in a valid bet!')\n return self.name, self.round_bet", "def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)", "async def bet(message, user: ParamType.MIXER_USER, amount):\n\n username = user.username.lower()\n username_sender = message.username.lower()\n\n mixcord_user = await database.get_user(message.user_id)\n\n # handle if somebody is trying to accept or deny\n if amount == \"accept\" or amount == \"deny\":\n\n # get the pending bet\n bet = pending_bets.get(username)\n if bet is None or bet[\"username\"] != username_sender:\n return \"failed to find the bet you're responding to.\"\n\n # delete the pending bet, because we're handling it\n del pending_bets[username]\n\n # if the user wants to deny the bet, don't do anything\n if amount == \"deny\":\n return \"you have denied the pending bet from @{}.\".format(username)\n\n # if the user wants to accept the bet, continue\n if amount == \"accept\":\n\n # make sure they have enough money to accept\n if bet[\"amount\"] > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to accept this bet.\"\n\n # make sure the issuer of the challenge still has enough money\n competitor_mixcord_user = await database.get_user(user.id)\n if bet[\"amount\"] > competitor_mixcord_user[\"balance\"]:\n return \"@{} no longer has sufficient funding to run this bet.\".format(username)\n\n # determine winner/loser\n pick = random.randint(0, 1) == 1\n winner_id = user.id if pick else message.user_id\n loser_id = message.user_id if pick else user.id\n winner_username = username if pick else username_sender\n loser_username = message.username if pick else username\n\n # affect balances accordingly\n await database.add_balance(winner_id, bet[\"amount\"])\n await database.add_balance(loser_id, -bet[\"amount\"])\n\n # end the bet!\n await chat.send_message(\"@{} has won {} {}! better luck next time, @{}.\".format(winner_username, bet[\"amount\"], currency_name, loser_username))\n return None\n\n # make sure the amount is numeric by converting it to an int\n amount = utils.get_positive_int(amount)\n if amount is None: return \"amount must be a positive integer.\"\n\n # make sure they're not trying to start a bet against themself :/\n if message.username == username:\n return \"you're not able to start a bet against yourself.\"\n\n # make sure we don't already have a pending bet\n if pending_bets.get(message.username) is not None:\n return \"you already have a pending bet.\"\n\n # make sure the challenger has enough money to start the bet\n if amount > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to request this bet.\"\n\n # store challenge information\n pending_bets[message.username] = {\n \"username\": username,\n \"amount\": amount\n }\n\n # send messages indicating the challenge has been issued\n await chat.send_message(\"@{} has challenged @{} to a bet of {} {}!\".format(message.username, username, amount, currency_name))\n await asyncio.sleep(0.5)\n await chat.send_message(\"use {}bet @{} [accept/deny] to respond to your pending bet!\".format(chat.commands.prefix, message.username), username)\n\n # automatically timeout the bet in 30 seconds\n await asyncio.sleep(30)\n bet = pending_bets.get(message.username)\n if bet is not None:\n del pending_bets[message.username]\n await chat.send_message(\"@{} your pending bet has timed out.\".format(message.username))", "def balance(self, player):\n print 'hand of %s: %s'%(player.name,player.cards.hand)\n print 'hand of %s: %s'%(self.name,self.cards.hand)\n if player.cards.hand == self.cards.hand:\n return 0\n elif player.cards.hand > self.cards.hand:\n return player.bet_amount*2\n else:\n return -player.bet_amount", "def bet_check(m):\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False", "async def process_bj_game(self, ctx, amount, user_id):\n if amount >= 0:\n if not await self.check_in_game(user_id, ctx):\n if amount > await ex.u_currency.get_balance(user_id):\n await ctx.send(f\"> **{ctx.author}, you can not bet more than your current balance.**\")\n else:\n return True\n else:\n await ctx.send(f\"> **{ctx.author}, you can not bet a negative number.**\")", "def setBigBlindBetAmount(self):\n\t\tif sum(self.currentBet) < self.bigBlind:\n\t\t\tif len(self.pots) > 1:\n\t\t\t\tnewbet = self.bigBlind - sum(self.currentBet)\n\t\t\telse:\n\t\t\t\tnewbet = self.bigBlind\n\t\t\tself.currentBet[-1] = newbet", "def amount_bet(self, user):\n try:\n return Bet.objects.get(group_match = self, user=user).amount\n except Bet.DoesNotExist:\n return Decimal(0)", "def user_balance_lost(user_pokemon: str, computer_pokemon: str, bet_amount) -> int:\n user_pokemon = get_pokemon(user_pokemon)\n computer_pokemon = get_pokemon(computer_pokemon)\n user_attack = type_logic.damage_to(user_pokemon.first_type, computer_pokemon.first_type) + type_logic.damage_to(user_pokemon.first_type, computer_pokemon.second_type)\n computer_attack = type_logic.damage_to(computer_pokemon.first_type, user_pokemon.first_type) + type_logic.damage_to(computer_pokemon.first_type, user_pokemon.second_type)\n difference = abs(user_attack - computer_attack)\n print(\"{} attacks {} for {}\".format(user_pokemon.name, computer_pokemon.name, difference))\n\n money_exchange = 0\n if difference == 0:\n # 45% of this happening\n money_exchange = 0\n elif .5 <= difference <= 1.5:\n # 33% of this happening\n money_exchange = .5 * bet_amount\n else:\n # 22% of this happening\n money_exchange = bet_amount\n\n return -money_exchange if user_attack < computer_attack else money_exchange", "def win(self, amount):\n self.balance += amount", "def post_bet(self, bot_name, amount):\n canPost = self.parent.post_bet(bot_name, amount)\n if canPost:\n self.pot += amount\n return True\n else:\n return False", "def playRound(budget: int) -> tuple:\n sum = sumOfDice(random.randint(1,6), random.randint(1,6))\n if sum == 7:\n budget += 4\n return (\"Win\",budget)\n else:\n budget -= 1\n return (\"Loss\",budget)", "async def balance(self, ctx):\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n history = db.query(CompanyHistory).filter(CompanyHistory.company == company.id).order_by(CompanyHistory.date.desc()).limit(2).all()\r\n net_worth = history[0].value\r\n delta = history[0].value - history[1].value if len(history) == 2 else 0\r\n percent = delta * 100 / history[1].value if len(history) == 2 else 0\r\n symbol = '⮝' if delta >= 0 else '⮟'\r\n embed = discord.Embed(title=f'{company.name}', description=f'{symbol}{round(percent, 2)}%', inline=True)\r\n embed.add_field(name='Cash Assets:', value=f'{round(company.balance, 2)} USD')\r\n embed.add_field(name='Net worth:', value=f'{round(net_worth, 2)} USD')\r\n await ctx.send(embed=embed)", "def do_call(self, money_each_player: int) -> None:\r\n delta = money_each_player - self.round_money.get_money() # How much more need to pay\r\n if self.balance.get_money() > delta:\r\n self.balance.pay_money(delta)\r\n self.round_money.add_money(delta)\r\n else:\r\n self.round_money.add_money(self.balance.pay_all_money())", "def get_money(self, fromobj):\n val, currency = money_from_args(self.args, fromobj)\n if val > currency:\n raise CommandError(\n \"Not enough money. You tried to {verb} {val}, but can only {verb} {currency}.\".format(\n verb=self.cmdstring, val=val, currency=currency\n )\n )\n fromobj.pay_money(val, self.caller)\n return val", "def lose(self) -> None:\n self._actual_money -= self._bet", "def betting_round(self, method, params):\n self.bet_history += [[]]\n current_bets = [self.starting_player] * len(self.agents)\n \n max_bet = 0\n if method == self.deal_cards:\n max_bet = big_blind\n current_bets[self.starting_player] = small_blind\n current_bets[(self.starting_player + 1) % len(self.agents)] = big_blind\n\n (self.all_in[self.starting_player], bet) = self.normalize_bet(self.chips[self.starting_player], method(self.agents[self.starting_player], params[self.starting_player]), max_bet)\n self.in_game[self.starting_player] = (not self.all_in[self.starting_player])\n current_bets[self.starting_player] = bet\n self.chips[self.starting_player] -= bet\n check = True if bet == 0 else False\n max_bet = max(max_bet, bet)\n self.pot += bet\n self.bet_history[-1] += [bet]\n\n raised_player = self.starting_player\n i = (raised_player + 1) % len(self.agents)\n\n if method == self.deal_cards:\n # raised_player = (self.starting_player + 1) % len(agents)\n check = False\n if bet > max_bet:\n raised_player = i\n max_bet = bet\n\n if bet == 0:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n while (i != raised_player) and (not self.all_in[i]) and (current_bets[i] <= max_bet):\n if self.in_game[i]:\n (self.all_in[i], bet) = self.normalize_bet(self.chips[i], method(self.agents[i], params[i]), max_bet)\n self.in_game[i] = (not self.all_in[i])\n delta_bet = max(0, bet - current_bets[i])\n current_bets[i] = bet\n self.chips[i] -= delta_bet\n self.pot += delta_bet\n self.bet_history[-1] += [bet]\n\n if bet > max_bet:\n check = False\n raised_player = i\n max_bet = bet\n\n if bet == 0 and not check:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n i = (i + 1) % len(self.agents)", "def pay_round_money(self) -> int:\r\n return self.round_money.pay_all_money()" ]
[ "0.7924277", "0.77066535", "0.7635262", "0.7549707", "0.73498285", "0.7271914", "0.7244077", "0.7236459", "0.7130203", "0.70558107", "0.70258206", "0.69620943", "0.6960594", "0.6937988", "0.6884604", "0.68403745", "0.67884886", "0.6780848", "0.6771703", "0.67711383", "0.6735188", "0.67231965", "0.66651106", "0.6648153", "0.6628875", "0.6613099", "0.657532", "0.65228945", "0.65218925", "0.65139395" ]
0.8240085
0
Hit a card and check if bust
def hit(self, card): self.cards.hit(card) if self.cards.hand ==-1: self.state ='burst'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit(player):\n deal_random_card(player)", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def hit(self, player):\n\n hit_card = self.deck.draw()\n hit_card.flip()\n player.take_card(hit_card)\n\n if self.verbose:\n print(player, 'receives', hit_card)", "def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())", "def deal_self(self):\n self.cards.hit(self.get_card())\n if self.cards.hand < 17 and self.cards.hand>=0:\n self.state = 'active'\n elif self.cards.hand >= 17 and self.cards.hand <= 21:\n self.state = 'stand'\n elif self.cards.hand==-1:\n self.state = 'burst'", "def hitMe(hand, deck):\n if deck.cardsLeft == 0:\n return False\n hand.getCard(deck.drawCard())\n return True", "def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand", "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums", "def hit():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Hand\" is in play, hit the \"player\". \n if in_play:\n outcome = outcome_plus = outcome_plus_plus = \"\"\n player.add_card(deck_of_cards.deal_card())\n else:\n return None\n \n # If busted, update messages, score and the player's \n # \"Hand\" status.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL \n score -= SCORE_POINTS\n in_play = False\n \n return None", "def hit(self, card):\n self.hand.append(card)", "def hit(hand=bj.player1.hand):\r\n hand.append(bj.deck.remove_card())", "def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())", "def player_hit(self):\r\n if self.in_progress:\r\n self.player_hand.add(self.deck.deal())\r\n if self.player_hand.total > 21:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "def hit(self, hand_idx=0):\n player = self.players[hand_idx]\n if player['active']:\n player['hand'].append(self._pick_card())\n if self.dealer_hand_value() < self.dealer_min:\n self.dealer_hand.append(self._pick_card())\n if self.is_bust(hand_idx):\n self.stand(hand_idx) # Force Stand and compute game result\n # Turn Off Split and Double Down after the first hit\n if player['allow_dd']: # Don't allow double down after the first hit\n player['allow_dd'] = False\n if self.allow_split: # Don't allow split after the first hit\n self.allow_split = False", "def hit(self):\n assert not self.damaged\n self.damaged = True\n self.game_piece.hit()", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def hit(self, hand):\n if hand == \"player\":\n self.player_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"player\")\n elif hand == \"dealer\":\n self.dealer_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"dealer\")\n self.top_card_int += 1\n self.update_card_positions()", "def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def hit(self, deck):\n try:\n self.hand.append(deck.pop(0))\n except IndexError:\n print('There are no more cards in the deck!')", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def hit(self, deck):\n self.cards.append(deck.draw_card())", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def someBalls(self):\n self.action.transaction(self.cardUid, 2)\n self.start()", "def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()", "def is_bust(self, hand_idx=0):\n if self.player_hand_value(hand_idx) > 21:\n return True\n else:\n return False", "def hit():\r\n new_card = deck[random.randint(1, len(deck))]\r\n deck.remove(new_card)\r\n return new_card", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "def hits(self, player1):\n newcard1 = self.deck.draw()\n player.hand.append(cards)\n print(\" Drew the {}.\".format(str(newcard)))\n\n while True:\n points = sum_hand(player.hand)\n\n if points < 17:\n print(\" Hit.\")\n self.hit(player)\n elif points == 21:\n print(\" {} wins!\".format(player.name))\n sys.exit(0) # End if someone wins\n elif points > 21:\n print(\" Bust!\")\n break\n else: # Stand if between 17 and 20 (inclusive)\n print(\" Standing at {} points.\".format(str(points)))\n self.scores[player.name] = points\n break" ]
[ "0.74350667", "0.71425444", "0.70626", "0.7058777", "0.70395243", "0.69725394", "0.691524", "0.68729144", "0.6838547", "0.68238586", "0.6760089", "0.6660787", "0.6624675", "0.6563094", "0.6508721", "0.6479972", "0.6469464", "0.642803", "0.63956666", "0.63560945", "0.63471985", "0.6343658", "0.63173115", "0.631177", "0.62771684", "0.6255673", "0.6208037", "0.61673963", "0.61631286", "0.6154547" ]
0.76179534
0
Face up dealer's hidden card and balance with players in the game
def showdown(self): print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards for player in self.game.players: win = self.balance(player) if win > 0: print player.name, 'wins', win elif win == 0: print player.name, 'draws' elif win <0: print player.name, 'loses', -(win) self.budget -= win player.budget += win print 'budget of %s : %s'%(player.name,player.budget) print 'budget of %s : %s'%(self.name,self.budget)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def balance(self, player):\n print 'hand of %s: %s'%(player.name,player.cards.hand)\n print 'hand of %s: %s'%(self.name,self.cards.hand)\n if player.cards.hand == self.cards.hand:\n return 0\n elif player.cards.hand > self.cards.hand:\n return player.bet_amount*2\n else:\n return -player.bet_amount", "def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.take_card(dealer_first)\n self.dealer.take_card(dealer_second)\n\n player_first = self.deck.draw()\n player_second = self.deck.draw()\n player_first.flip()\n player_second.flip()\n self.player.take_card(player_first)\n self.player.take_card(player_second)\n\n if self.verbose:\n print('Player bets:', self.player_bet)\n for player in (self.player, self.dealer):\n print(player, 'dealt:')\n for card in player:\n if card.face():\n print(' '*3, str(card)+':', 'face up')\n else:\n print(' '*3, str(card)+':', 'face down')", "def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()", "def dealer_turn(self):\n self.dealer.reveal()\n show_table_later(self.player, self.dealer, self.pot)\n while self.dealer.hand.value < 17:\n self.dealer.take_card(self.deck)\n show_table_later(self.player, self.dealer, self.pot)", "def hide_card(self):\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')", "def _deal_player():\n\n # we append the dealed card to the player's hand.\n player_hand.append(_deal_card(player_card_frame))\n\n # calculate and return the score of the player's hand.\n player_score = _score_hand(player_hand)\n\n # set the score to the respective label.\n player_score_label.set(player_score)\n\n # if the score surpasses 21, dealer wins.\n if player_score > 21:\n result_text.set(\"Dealer wins!\")", "async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")", "def dealer_turn(dealer, deck):\n print(\"\\n======== DEALER'S TURN ========\")\n while deck.cards and not bust(dealer) and dealer.total < STAND_LIMIT():\n draw_card(dealer, deck)\n print(f\"\\nThe dealer Stands, their total is \\033[33m{dealer.total}\\033[0m.\\n\")\n time.sleep(1)", "async def draw_start(self):\n for i, player in enumerate(self.players):\n def bet_check(m):\n \"\"\"If the value can be converted to a float and is within the bounds return true, else false\"\"\"\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False\n\n if not player.out:\n await self.ctx.send(f\"{self.users[i].name}, How much would you like to bet? You have {player.coins} in the bank: \")\n try:\n bet = await self.client.wait_for('message', timeout=120.0, check=bet_check)\n bet = float(bet.content)\n if bet == 0:\n player.out = True\n self.total_players_out += 1\n else:\n player.debit(bet)\n player.bet = bet\n except:\n await self.ctx.send(\"Timed Out!\")\n player.out = True\n self.total_players_out += 1\n # shuffle cards and dealer draws one, send the dealers hand to the channel, loop through all players that aren't out and show their hand\n # if all players arent out\n if self.total_players_out < len(self.players):\n self.deck.shuffle()\n self.dealer.clear()\n self.deck.move_cards(self.dealer, 1)\n\n embed_dealer = discord.Embed(title='Dealer', color=0x00ff00)\n embed_dealer.add_field(\n name=\"Hand\", value=self.dealer, inline=False)\n self.dealer_msg = await self.ctx.send(embed=embed_dealer)\n\n embed_players = discord.Embed(title='Players', color=0x0000fd)\n for i, player in enumerate(self.players):\n if not player.out:\n player.clear()\n self.deck.move_cards(player, 2)\n # name=their discord name and value = their hand\n embed_players.add_field(\n name=self.users[i].name, value=player, inline=True)\n if player.get_value() == 21:\n player.has_bj = True\n self.players_msg = await self.ctx.send(embed=embed_players)", "def draw_card(dealer,player): \n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n\n bj_board.clear()\n for i in range(len(dealer)):\n if dealer[i].state==True:\n bj_board.add(dealer[i].image)\n dealer[i].image.moveTo(x0+i*20,y0)\n dealer[i].image.setDepth(depth-10*i)\n elif dealer[i].state==False:\n img=Image(img_path+\"Back.png\")\n bj_board.add(img)\n img.moveTo(x0+i*20,y0)\n img.setDepth(depth-10*i)\n for i in range(len(player)):\n bj_board.add(player[i].image)\n player[i].image.moveTo(x1+i*20,y1)\n player[i].image.setDepth(depth-10*i) \n \n text=Text(\"Your Total: \" + str(hand_value(player)))\n text.moveTo(300,300)\n bj_board.add(text)\n \n if dealer[0].state==True:\n text=Text(\"Dealer Total: \" + str(hand_value(dealer)))\n text.moveTo(300,100)\n bj_board.add(text)", "def __play_delear(self, state : State):\n # print (\"Playing as dealer\")\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n while (0 <= dealer_sum < 25):\n # Keep hitting\n card, suite = self.draw()\n state.update_state (card, suite, dealer=True)\n dealer_sum = state.max_safe_sum(dealer=True)\n assert (-1 <= dealer_sum <= 31)\n\n return dealer_sum", "def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True", "def reveal_card(self):\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()", "def draw_card(dealer,player):\n # hidden_img = Image(img_path+\"back.png\")\n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n ix = 30\n\n bj_board.clear()\n for card in dealer:\n if card.state:\n card.image.moveTo(x0, y0)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"Back.png\")\n img.moveTo(x0, y0)\n img.setDepth(depth)\n bj_board.add(img)\n x0 += ix\n \n for card in player:\n if card.state:\n card.image.moveTo(x1, y1)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"back.png\")\n img.moveTo(x1, y1)\n img.setDepth(depth)\n bj_board.add(img)\n x1 += ix", "def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()", "def _deal_dealer():\n\n # calculate and return the score of the dealer's hand.\n dealer_score = _score_hand(dealer_hand)\n\n while 0 < dealer_score < 17:\n\n # Up to a score of 17 or higher, deal a card to the dealer\n # and append it to their hand.\n dealer_hand.append(_deal_card(dealer_card_frame))\n\n # calculate and return the score of the dealer's hand.\n dealer_score = _score_hand(dealer_hand)\n\n # set the score to the respective label.\n dealer_score_label.set(dealer_score)\n\n # since the dealer always goes last, we can check for the players's\n # score here, and check if they won or lost.\n player_score = _score_hand(player_hand)\n\n if player_score > 21:\n result_text.set(\"Dealer wins!\")\n elif dealer_score > 21 or dealer_score < player_score:\n result_text.set(\"Player wins!\")\n elif dealer_score > player_score:\n result_text.set(\"Dealer wins!\")\n else:\n result_text.set(\"Draw!\")", "def on_place_bet (self,event):\n global placed_bet\n placed_bet = True\n arcade.draw_text(f\"Value: {self.dealer_value}\", 280, 450, arcade.color.BLACK, 16)\n arcade.draw_text(f\"Value: {self.player_value}\", 280, 250, arcade.color.BLACK, 16)\n \n self.final_bet = self.bet\n self.dealer_hand[1].face_up()\n self.player_hand[0].face_up()\n self.player_hand[1].face_up()", "def endgame(self):\n #reveals the dealer's first card then the dealer hits until the dealer's hand's value is above 16\n self.dealer_hand[0].face_up()\n if self.dealer_hand[0].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[0].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[0].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n #House always wins Ties\n elif self.dealer_value == 21:\n self.player_lose()\n\n while self.dealer_value < 17:\n self.hit(\"dealer\")\n\n if (self.player_value - self.dealer_value) > 0:\n self.player_win()\n else:\n self.player_lose()", "def card_balance(self):\n return self._card_balance", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def stay(self):\n global dealer_hand, deck, outcome, in_play\n \n if in_play:\n while dealer_hand.get_value() < 17:\n dealer_hand.add_card(deck.deal_card())\n \n if dealer_hand.get_value() > 21:\n # print \"Dealer is busted.\\nPlayer wins.\"\n self.outcome.set(\"Dealer is busted. Player wins. New deal?\")\n self.won += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n elif player_hand.get_value() > 21:\n # print \"Player is busted.\\nDealer wins.\"\n self.outcome.set(\"Player is busted. Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n elif dealer_hand.get_value() >= player_hand.get_value():\n # print \"Dealer wins.\"\n self.outcome.set(\"Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n else:\n # print \"Player wins.\"\n self.outcome.set(\"Player wins! New deal?\")\n self.won += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)", "def show_balances(self):\n print 'Pot: %d' % (self.account.balance,)\n for player in self.players:\n balance = player.account.balance\n if balance > 0:\n print '%s: %d' % (player, balance,)", "def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def blackjack():\n score_report, bank, game_deck = start_game()\n while not end_game(bank, game_deck):\n user, dealer = Player(), Player(dealer=True)\n print(\"\\n=============== BEGINNING ROUND! ===============\")\n bank.report_balance()\n bank.place_bet()\n play_round(user=user, dealer=dealer, deck=game_deck)\n if game_deck.cards:\n winner = decide_winner(user, dealer)\n end_round(winner_result=winner, bank=bank, report=score_report)\n print(score_report)\n score_report.report_rounds()\n print(f\"This concludes our game of BlackJack 21 and you get to take home ${bank.balance}, thank you for playing!\")", "def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())", "async def add_card(self, user_id):\n end_game = False\n check = 0\n\n separator = ','\n current_cards = await self.get_current_cards(user_id)\n game_id = await self.get_game_by_player(user_id)\n game = await self.get_game(game_id)\n channel = await ex.client.fetch_channel(game[5])\n stand = await self.check_player_standing(user_id)\n player1_score = await self.get_player_total(game[1])\n player2_score = await self.get_player_total(game[2])\n player1_cards = await self.get_current_cards(game[1])\n if not stand:\n available_cards = await self.get_available_cards(game_id)\n random_card = random.choice(available_cards)\n current_cards.append(str(random_card))\n cards = separator.join(current_cards)\n current_total = await self.get_player_total(user_id)\n random_card_value = await self.get_card_value(random_card)\n if current_total + random_card_value > 21:\n for card in current_cards: # this includes the random card\n if await self.check_if_ace(card, user_id) and check != 1:\n check = 1\n current_total = (current_total + random_card_value) - 10\n if check == 0: # if there was no ace\n current_total = current_total + random_card_value\n else:\n current_total = current_total + random_card_value\n await ex.conn.execute(\"UPDATE blackjack.currentstatus SET inhand = $1, total = $2 WHERE userid = $3\", cards, current_total, user_id)\n if current_total > 21:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if player1_score > 21 and current_total >= 16:\n end_game = True\n await self.set_player_stand(game[1])\n await self.set_player_stand(game[2])\n elif player1_score > 21 and current_total < 16:\n await self.add_card(game[2])\n elif player1_score < 22 and current_total > 21:\n pass\n else:\n end_game = True\n elif self.check_if_bot(game[2]) and not self.check_if_bot(user_id): # if user_id is not the bot\n if player2_score < 16:\n await self.add_card(game[2])\n else:\n await self.set_player_stand(user_id)\n await self.set_player_stand(game[2])\n end_game = True\n else:\n if user_id == game[2] and self.check_if_bot(game[2]):\n if current_total < 16143478541328187392 and len(player1_cards) > 2:\n await self.add_card(game[2])\n if await self.check_player_standing(game[1]) and current_total >= 16:\n end_game = True\n if not self.check_if_bot(user_id):\n if self.check_if_bot(game[2]):\n await self.send_cards_to_channel(channel, user_id, random_card, True)\n else:\n await self.send_cards_to_channel(channel, user_id, random_card)\n else:\n await channel.send(f\"> **You already stood.**\")\n if await self.check_game_over(game_id):\n await self.finish_game(game_id, channel)\n if end_game:\n await self.finish_game(game_id, channel)", "def betting_round(self, method, params):\n self.bet_history += [[]]\n current_bets = [self.starting_player] * len(self.agents)\n \n max_bet = 0\n if method == self.deal_cards:\n max_bet = big_blind\n current_bets[self.starting_player] = small_blind\n current_bets[(self.starting_player + 1) % len(self.agents)] = big_blind\n\n (self.all_in[self.starting_player], bet) = self.normalize_bet(self.chips[self.starting_player], method(self.agents[self.starting_player], params[self.starting_player]), max_bet)\n self.in_game[self.starting_player] = (not self.all_in[self.starting_player])\n current_bets[self.starting_player] = bet\n self.chips[self.starting_player] -= bet\n check = True if bet == 0 else False\n max_bet = max(max_bet, bet)\n self.pot += bet\n self.bet_history[-1] += [bet]\n\n raised_player = self.starting_player\n i = (raised_player + 1) % len(self.agents)\n\n if method == self.deal_cards:\n # raised_player = (self.starting_player + 1) % len(agents)\n check = False\n if bet > max_bet:\n raised_player = i\n max_bet = bet\n\n if bet == 0:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n while (i != raised_player) and (not self.all_in[i]) and (current_bets[i] <= max_bet):\n if self.in_game[i]:\n (self.all_in[i], bet) = self.normalize_bet(self.chips[i], method(self.agents[i], params[i]), max_bet)\n self.in_game[i] = (not self.all_in[i])\n delta_bet = max(0, bet - current_bets[i])\n current_bets[i] = bet\n self.chips[i] -= delta_bet\n self.pot += delta_bet\n self.bet_history[-1] += [bet]\n\n if bet > max_bet:\n check = False\n raised_player = i\n max_bet = bet\n\n if bet == 0 and not check:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n i = (i + 1) % len(self.agents)" ]
[ "0.75316435", "0.6755466", "0.6731837", "0.66685313", "0.6465247", "0.6453599", "0.644847", "0.63940364", "0.63748866", "0.6362192", "0.63309693", "0.6303815", "0.6255874", "0.6199245", "0.61905754", "0.61843073", "0.6167011", "0.61617035", "0.6152823", "0.61407375", "0.61354715", "0.61347896", "0.61287206", "0.61282545", "0.6122239", "0.6077156", "0.6062014", "0.60487765", "0.6020023", "0.5991947" ]
0.71721745
1
Dealer have no choice. Stand if hand >= 17, otherwise hit
def deal_self(self): self.cards.hit(self.get_card()) if self.cards.hand < 17 and self.cards.hand>=0: self.state = 'active' elif self.cards.hand >= 17 and self.cards.hand <= 21: self.state = 'stand' elif self.cards.hand==-1: self.state = 'burst'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stand(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21]\r\n if hand == bj.player1.hand:\r\n if len(phv) > 0:\r\n bj.player1.final_hand_val = max(phv)\r\n else:\r\n bj.player1.final_hand_val = \"bust\"\r\n else:\r\n if len(phv) > 0:\r\n bj.player1.final_hand2_val = max(phv)\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"", "def play_for_dealer(self):\n while self.dealer.sum_cards() < 17:\n self.dealer.hit(self.deck)\n else:\n self.round_winner = True\n self.print_hands()\n self.determine_winner()", "def stand(self, hand_idx=0):\n dealer_value = self.dealer_hand_value()\n # Dealer has to hit until it's atleast Dealer Min (Default 17)\n while dealer_value < self.dealer_min:\n self.dealer_hand.append(self._pick_card())\n dealer_value = self.dealer_hand_value()\n #Compute Player Hand value\n player = self.players[hand_idx]\n player_value = self.player_hand_value(hand_idx)\n if player['active']:\n # When Player has Blackjack in 2 cards and dealer doesn't\n if player_value == 21 and len(player['hand']) == 2 and \\\n (dealer_value != 21 or (dealer_value == 21 and len(self.dealer_hand) > 2)):\n player['result'] = 'blackjack'\n self.wager_earned += 1.5 * player['wager']\n # When Both Player and Dealer values are equal or both are Bust, then \"Push\".\n elif player_value == dealer_value or (player_value > 21 and dealer_value > 21):\n player['result'] = 'push'\n # When Only Player is Bust, then \"Bust\". You lost the wager\n elif player_value > 21:\n player['result'] = 'bust'\n self.wager_earned -= player['wager']\n # When Only Dealer is Bust, then \"Won\". You won the wager\n elif dealer_value > 21:\n player['result'] = 'won'\n self.wager_earned += player['wager']\n # When both Player and delaer are not bust and Player has higher number than Dealer\n elif player_value > dealer_value:\n player['result'] = 'won'\n self.wager_earned += player['wager']\n # When both Player and delaer are not bust and Player has lower number than Dealer\n elif dealer_value > player_value:\n player['result'] = 'lost'\n self.wager_earned -= player['wager']\n player['active'] = False # Set Player Hand Active to False after a Stand\n return None", "def handDecision(handIn):", "def hit(self, hand_idx=0):\n player = self.players[hand_idx]\n if player['active']:\n player['hand'].append(self._pick_card())\n if self.dealer_hand_value() < self.dealer_min:\n self.dealer_hand.append(self._pick_card())\n if self.is_bust(hand_idx):\n self.stand(hand_idx) # Force Stand and compute game result\n # Turn Off Split and Double Down after the first hit\n if player['allow_dd']: # Don't allow double down after the first hit\n player['allow_dd'] = False\n if self.allow_split: # Don't allow split after the first hit\n self.allow_split = False", "def player_stand(self):\r\n if self.in_progress:\r\n while self.dealer_hand.total < 17:\r\n self.dealer_hand.add(self.deck.deal())\r\n if self.dealer_hand.total > 21 or self.dealer_hand.total < self.player_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"Player WINS... Press 'r' to start game\"\r\n self.player_wins += 1\r\n elif self.player_hand.total == self.dealer_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"TIE Game... Press 'r' to start game\"\r\n else:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "def stand():\n \n # Update message, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Player\" has busted, remind the \"Player\" that \n # they have busted.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL\n elif in_play:\n # If the \"Hand\" is in play, repeatedly hit \"Dealer\" \n # until his \"Hand\" has value 17 or more. \n while dealer.get_value() < 17:\n dealer.add_card(deck_of_cards.deal_card())\n\n # If busted, update messages, score and the \n # player's \"Hand\" status. \n if dealer.get_value() > 21:\n outcome = PLAYER_WINS\n outcome_plus = DEALER_BUSTED\n outcome_plus_plus = \"\"\n action = NEW_DEAL \n score += SCORE_POINTS \n in_play = False\n # Else compare the value of the \n # player's and dealer's \"Hands\". If the value of \n # the player's \"Hand\" is less than or equal to \n # the dealer's \"Hand\", the \"dealer\" wins. \n # Otherwise the \"player\" has won. Again,\n # update messages, score and the player's \"Hand\" \n # status. \n else: \n in_play = False\n action = NEW_DEAL\n outcome_plus = outcome_plus_plus = \"\"\n if player.get_value() > dealer.get_value():\n outcome = PLAYER_WINS \n score += SCORE_POINTS \n else:\n outcome = PLAYER_LOSES \n score -= SCORE_POINTS\n \n return None", "def check_hand(self, player):\n\n total = player.score()\n if total > 21:\n status = 'bust'\n elif total == 21:\n status = 'win'\n else:\n status = 'okay'\n\n if self.verbose:\n print(total, 'points')\n \n return status", "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand", "def policy(self, s):\r\n if s.dealer_sum >= 16:\r\n return Action.STICK\r\n else:\r\n return Action.HIT", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def player_hit(self):\r\n if self.in_progress:\r\n self.player_hand.add(self.deck.deal())\r\n if self.player_hand.total > 21:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def main():\n\n deck1 = Deck()\n deck1.shuffle()\n\n dealerHand = Hand()\n playerHand = Hand()\n\n startGame(dealerHand, playerHand, deck1)\n while dealerHand.handSum() < 16:\n evalHand(dealerHand)\n hitMe(dealerHand, deck1)\n # print(\"dealer hand after evalHand is: \", dealerHand.showHand())\n if dealerHand.handSum() > 21:\n os.system(\"clear\")\n displayHands(playerHand, dealerHand)\n print(\"Dealer sum exceeded 21.\"\n \" Dealer hand {}. Dealer sum {}. Player wins!!\".format(dealerHand.handSum(), dealerHand.showHand()))\n return\n\n displayHands(playerHand, dealerHand)\n evalHand(dealerHand)\n\n \"\"\"\n if dealerHand.handSum() > 21:\n game_status = stand(playerHand, dealerHand)\n print(\"Game status is: \", game_status)\n \"\"\"\n # print(\"\\nYour hand score is: \", playerHand.handSum())\n displayHands(playerHand, dealerHand)\n ans = input(\"\\nDo you want another card (y or n)? \")\n\n while ans == 'y':\n hitMe(playerHand, deck1)\n evalHand(playerHand)\n displayHands(playerHand, dealerHand)\n if playerHand.handSum() > 21:\n os.system(\"clear\")\n displayHands(playerHand, dealerHand)\n print(\"Player exceeded 21. \"\n \"Player hand {}. Dealer sum {}. Dealer wins!!\".format(playerHand.showHand(), playerHand.handSum()))\n return\n ans = input(\"Do you want another card (y or n)?\")\n\n displayHands(playerHand, dealerHand)\n print(\"\\nGame outcome is: \", stand(playerHand, dealerHand))", "def assess_hand(self, r):\n bidIndex = -1\n while bool(random.getrandbits(1)): # Coin flip\n bidIndex += 1\n if bidIndex == -1:\n self.maxBid = LEGAL_BIDS[0] - 1 # Pass immediately.\n else:\n self.maxBid = LEGAL_BIDS[bidIndex]", "def stand(p_hand, d_hand):\n\n PLAYER_WIN = \"Player Wins!\\n\\n\\n\"\n DEALER_WIN = \"Dealer Wins!\\n\\n\\n\"\n DRAW = \"Game a draw\\n\\n\\n\"\n MAX = 22\n\n # Tie Game\n if p_hand.handSum() >= MAX and d_hand.handSum() >= MAX:\n return \"Both lose. Both hands over 21.\"\n\n if p_hand.handSum() == d_hand.handSum():\n return DRAW\n\n # Player Wins\n if p_hand.handSum() > d_hand.handSum() and (\n p_hand.handSum() < MAX):\n return PLAYER_WIN\n\n if p_hand.handSum() < MAX and d_hand.handSum() >= MAX:\n return PLAYER_WIN\n\n # Dealer Wins\n if d_hand.handSum() > p_hand.handSum() and (\n d_hand.handSum() < MAX):\n return DEALER_WIN\n\n if p_hand.handSum() > d_hand.handSum() and (\n p_hand.handSum() >= MAX):\n return DEALER_WIN", "def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True", "def hit():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Hand\" is in play, hit the \"player\". \n if in_play:\n outcome = outcome_plus = outcome_plus_plus = \"\"\n player.add_card(deck_of_cards.deal_card())\n else:\n return None\n \n # If busted, update messages, score and the player's \n # \"Hand\" status.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL \n score -= SCORE_POINTS\n in_play = False\n \n return None", "def is_bust(self, hand_idx=0):\n if self.player_hand_value(hand_idx) > 21:\n return True\n else:\n return False", "def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()", "def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums", "def get_player_action(self) -> None:\n print(f\"\\nYou have: {self.user.hand.cards} totalling to {self.user.hand.value}\")\n while not self.get_game_ending_hands():\n action = self.validate_input(\"Do you want to 1. hit or 2. stand?\", ('1', '2'))\n if action == '1':\n self.action_hit()\n elif action == '2':\n self.action_stand()\n break", "def dealer_turn(self):\n self.dealer.reveal()\n show_table_later(self.player, self.dealer, self.pot)\n while self.dealer.hand.value < 17:\n self.dealer.take_card(self.deck)\n show_table_later(self.player, self.dealer, self.pot)", "def user_play(play_shoe, player, dealer):\n print(\"\\nDealer shows:\" + dealer.get_viewable_hand())\n hit = True\n while hit == True:\n decision = \" \"\n if len(player.get_hand()) == 2:\n print(\"\\nPlayer \" + player.get_name() + \" your hand is:\" + player.get_viewable_hand())\n else:\n print(\"\\nYour hand is now:\" + str(player.get_viewable_hand()))\n decide_soft_score_print(player)\n if not(check_blackjack(player.get_score(), player.get_hand())):\n if not(player.check_bust()) and player.get_score() < 21:\n while not(decision[0] == \"h\") and not(decision[0] == \"s\"):\n decision = input(\"Would you like to Hit or Stand? \").lower()\n if decision[0]==\"h\":\n player.hit_hand(play_shoe)\n else:\n hit = False\n else:\n hit = False\n else:\n hit = False\n check_stand(player)", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def evalHand(hand):\n # os.system(\"clear\")\n #print(\"dealer hand before evalHand is: \", hand.showHand())\n if (1 in hand.cards) and (21 - hand.handSum() >= 10):\n print(\"found a 1 value Ace in the hand\")\n hand.cards[hand.cards.index(1)] = 11 # Change the first ace from value 1\n # to value 11\n if (11 in hand.cards) and (hand.handSum() >= 22):\n print(\"found an 11 value Ace in the hand and sum > 21\")\n hand.cards[hand.cards.index(11)] = 1 # Change the first ace from value 1\n # to value 11", "def test_play_nobane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\")\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertIn(self.g[self.g._bane].cost, (2, 3))\n self.assertEqual(self.attacker.piles[Piles.HAND].size(), 5 + 2 - 2)\n self.assertIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def check_for_bust_or_bj(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21] # remove all hand values that exceed 21\r\n if len(phv) == 0: # if no values under 21 are available -> bust\r\n if hand == bj.player1.hand:\r\n bj.player1.final_hand_val = \"bust\"\r\n return\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"\r\n return\r\n elif 21 in phv: # if 21 is among values -> blackjack\r\n if hand == bj.player1.hand:\r\n bj.player1.final_hand_val = 21\r\n return\r\n else:\r\n bj.player1.final_hand2_val = 21\r\n return", "def who_won(self):\n if self.dealer.hand.value > 21:\n dealer_busts(self.pot)\n self.player.get_money(self.pot)\n elif self.dealer.hand.value > self.player.hand.value:\n dealer_win(self.dealer.hand, self.player.hand, self.pot)\n self.player.pay_out(self.pot)\n elif self.dealer.hand.value == self.player.hand.value:\n push(self.dealer.hand.value, self.player.hand.value)\n else:\n player_win_text(self.pot)\n self.player.get_money(self.pot)" ]
[ "0.7471603", "0.74018157", "0.72898954", "0.7104887", "0.7001504", "0.6908795", "0.6888352", "0.68266976", "0.6750696", "0.6739227", "0.6719116", "0.66749984", "0.6645196", "0.66190434", "0.65957236", "0.6576811", "0.6549812", "0.65478945", "0.65337396", "0.6461222", "0.6454176", "0.6431715", "0.6429486", "0.6429173", "0.6428685", "0.641232", "0.64010525", "0.6397024", "0.638221", "0.6362012" ]
0.74406487
1
Finds valid positions for Tile mover in Skilaverkefni 8 Takes in current position of the game
def validpositions(tile): if tile == 11 or tile == 21: valid_pos = "n" elif tile == 12: valid_pos = "nes" elif tile == 13: valid_pos = "es" elif tile == 22 or tile == 33: valid_pos = "sw" elif tile == 23: valid_pos = "ew" elif tile == 32: valid_pos = "ns" possible_directions(valid_pos) return valid_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves", "def winningMove():\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\tnoWin=True\r\n\tmove=False\r\n\tif turn==\"Player1\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove=1\t\r\n\t\t\ttile1+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-1\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\t\r\n\t\t\ttile5+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\t\t\r\n\t\t\ttile9+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\r\n\telif turn==\"Player2\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 1\t\t\t\t\r\n\t\t\ttile1+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\r\n\t\t\ttile5+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\r\n\t\t\ttile9+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\tif noWin:\r\n\t\treturn False", "def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def getAllValidMoves(x0, y0):\n deltas = [\n (-2, -1),\n (-2, +1),\n (+2, -1),\n (+2, +1),\n (-1, -2),\n (-1, +2),\n (+1, -2),\n (+1, +2),\n ]\n validPositions = []\n\n for (x, y) in deltas:\n xCandidate = x0 + x\n yCandidate = y0 + y\n if 0 < xCandidate < 8 and 0 < yCandidate < 8:\n validPositions.append([xCandidate, yCandidate])\n\n return validPositions", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here", "def update_map(self, screenshot=None):\n # Get the visible tiles\n nearby = self.game_map[\n (self.player_position[0] - 10): (self.player_position[0] + 11),\n (self.player_position[1] - 10): (self.player_position[1] + 11)\n ]\n\n # Clear NPCs in the nearby as they may have moved\n nearby[nearby == self.TILES.WEAPON_SHOPKEEPER.value] = self.TILES.UNKNOWN.value\n nearby[nearby == self.TILES.BLACKSMITH.value] = self.TILES.UNKNOWN.value\n\n # Take screenshot and isolate the gamplay region\n if screenshot is None:\n screenshot = utils.take_screenshot()\n play = screenshot[8:344, 8:344]\n\n # Loop through all unknown tiles in the nearby\n for i, j in zip(*np.where(nearby == self.TILES.UNKNOWN.value)):\n # Scale up the dimensions\n tile_x = i * self.TILE_DIM\n tile_y = j * self.TILE_DIM\n\n # The center cell is always the player\n if i == 10 and j == 10:\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n self.game_map[(tile_x, tile_y)] = self.TILES.PLAYER.value\n continue\n\n # Slice the tile from the play region\n tile = play[tile_y:tile_y + self.TILE_DIM,\n tile_x:tile_x + self.TILE_DIM]\n\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n\n # Go through all tile types looking for a high confidence match\n template = None\n for potential_template in self.templates:\n if np.allclose(potential_template[0], tile, 1, 1):\n template = potential_template\n break\n\n # No match, assume it is inaccessible\n if template is None:\n self.game_map[(tile_x, tile_y)] = self.TILES.INACCESSIBLE.value\n continue\n\n # By default, mark tile as inaccessible\n label = None\n\n # Mark as mineable\n if re.search(r'rock', template[1], re.M | re.I):\n label = self.TILES.MOUNTAIN.value\n elif re.search(r'door', template[1], re.M | re.I):\n label = self.TILES.DOOR.value\n elif re.search(r'gravel', template[1], re.M | re.I):\n label = self.TILES.GRAVEL.value\n elif re.search(r'shopkeeper', template[1], re.M | re.I):\n label = self.TILES.WEAPON_SHOPKEEPER.value\n elif re.search(r'blacksmith', template[1], re.M | re.I):\n label = self.TILES.BLACKSMITH.value\n elif re.search(r'guard', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'inaccessible', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'accessible', template[1], re.M | re.I):\n label = self.TILES.ACCESSIBLE.value\n\n # Calculate coordinates of tile in the map relative to the player\n self.game_map[(tile_x, tile_y)] = label\n\n # Go through all tiles in the gameplay region to find the mountains\n for i, j in zip(*np.where(nearby == self.TILES.MOUNTAIN.value)):\n # Get the tile to the left of the mountain\n tile_left = nearby[(i-1, j)]\n\n # Only allow mountains to be minable if they are beside gravel\n if not tile_left == self.TILES.GRAVEL.value:\n nearby[(i, j)] = self.TILES.INACCESSIBLE.value\n\n # Save the game map to disk\n np.savetxt('map.txt', self.game_map, fmt='%d')", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def shuffle_pos(self, ):\n x, y = 0, 0\n while self.maze.structure[int(y / 40)][int(x / 40)] != \"0\" \\\n or (x, y) in self.forbiden_tulpes:\n x = random.randint(0, 14) * sprite_size\n y = random.randint(0, 14) * sprite_size\n self.forbiden_tulpes.append((x, y))\n return x, y", "def move_second_inbox(self, i, j):\n \"\"\"the function achieves the above statement by not filling in the third line of any possible sqaure \"\"\"\n if i == 0: #top\n return check_two_spots(self.board[i+1][j-1], self.board[i+1][j+1], self.board[i+2][j])\n\n if i == 6: #bottom\n return check_two_spots(self.board[i-1][j-1], self.board[i-1][j+1], self.board[i-2][j])\n\n if j == 0: #left\n return check_two_spots(self.board[i-1][j+1], self.board[i+1][j+1], self.board[i][j+2])\n\n\n if j == 6: #right\n return check_two_spots(self.board[i-1][j-1], self.board[i+1][j-1], self.board[i][j-2])\n\n\n if i == 2 or i == 4: # possible horizontal moves\n return check_two_spots(self.board[i-1][j-1], self.board[i-1][j+1], self.board[i-2][j]) and check_two_spots(self.board[i+1][j-1], self.board[i+1][j+1], self.board[i+2][j])\n\n if j == 2 or j == 4: # possible vertical moves\n return check_two_spots(self.board[i-1][j-1], self.board[i+1][j-1], self.board[i][j-2]) and check_two_spots(self.board[i-1][j+1], self.board[i+1][j+1], self.board[i][j+2])", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def find_valid_posse(board: 'List') -> 'List':\n for i, a in enumerate(board):\n for j, b in enumerate(board):\n if j != i:\n for k, c in enumerate(board):\n if k not in (i, j) and \\\n is_valid_posse((a, b, c)):\n # print((i, j, k))\n return [a, b, c]", "def validMove(move):\r\n\r\n\tglobal tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\ta=eval(\"tile\"+str(move)+\"==0\")\r\n\treturn a", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def find_player():\r\n global current_position, previous_position #say that we want to refer to the one above\r\n for row in range(0, len(map1)):\r\n for column in range(0, len(map1[0])):\r\n if map1[row][column] == 'p':\r\n current_position = (row, column)\r\n previous_position = (row, column)\r\n return 0 #exit from the function\r", "def _calc_missile_scan(self):\n self._scan_line = [False] * Stella.FRAME_WIDTH\n\n if self.enam & 0x02:\n for n in range(self._number):\n # Uses same stretching as 'ball'\n width = 1 << ((self.nusiz & 0x30) >> 4)\n # Uses similar position to 'player'\n for i in range(width):\n x = (i +self.resm + n*self._gap*8 - Stella.HORIZONTAL_BLANK) % Stella.FRAME_WIDTH \n self._scan_line[x] = True", "def moles(board):\n return (pos for pos in range(1, length+1) if at(board, pos))", "def extensions(self):\n def swap(marker, mx, x2, my, y2):\n \"\"\"\n If proper conditions are met, jump over the peg depending on the\n condition\n @param marker: map, list of list\n @param mx: Original x coordinate\n @param x2: Replacement x coordinate\n @param my: Original y coordinate\n @param y2: Replacement y coordinate\n @return: list[list[str]]\n \"\"\"\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map\n\n def legal_move(marker, x, y, direction):\n \"\"\"\n Checks if there is a potential move at the direction of\".\"\n coordinate\n @param marker: map of the board\n @param x: x coordinate\n @param y: y coordinate\n @param direction : North South East West of the \".\"\n @return: boolean\n \"\"\"\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False\n\n combos = []\n # For loops go through the coordinates\n # each if statement checks and appends the new scenario\n # iff there is a legal move available\n for y in range(len(self._marker)):\n for x in range(len(self._marker[0])):\n if self._marker[y][x] == '.':\n if legal_move(self._marker, x, y, 'N'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y-2), self._marker_set))\n if legal_move(self._marker, x, y, 'S'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y+2), self._marker_set))\n if legal_move(self._marker, x, y, 'W'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x-2, y, y), self._marker_set))\n if legal_move(self._marker, x, y, 'E'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x+2, y, y), self._marker_set))\n return combos", "def attack(self, somerow, somecol):\n valid_move = True\n for i in range(self.size):\n if self.is_valid_position(somerow, i):\n if self.board[somerow][i] != \"0\":\n #checks the same row\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(i, somecol):\n if self.board[i][somecol] != \"0\":\n #checks the same column\n valid_move = False \n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol+i):\n if self.board[somerow+i][somecol+i] != \"0\":\n #checks diagonal\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol-i):\n if self.board[somerow+i][somecol-i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol+i):\n if self.board[somerow-i][somecol+i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol-i):\n if self.board[somerow-i][somecol-i] != \"0\":\n valid_move = False\n return valid_move", "def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def possibleMovements(self,numIterations:int=50)->list[tuple]:\n x=random.randint(0,self._side-1); y=random.randint(0,self._side-1)\n possible_positions=[]\n positionsCovered=[(x,y)]\n for _ in range(numIterations):\n if x+2<self._side and y+1<self._side:\n possible_positions.append((x+2,y+1))\n \n if x+2<self._side and y-1<self._side and y-1>0:\n possible_positions.append((x+2,y-1))\n \n if x-2<self._side and y+1<self._side and x-2>0:\n possible_positions.append((x-2,y+1))\n \n if x-2<self._side and y-1<self._side and x-2>0 and y-1>0:\n possible_positions.append((x-2,y-1)) \n\n if x+1<self._side and y+2<self._side:\n possible_positions.append((x+1,y+2))\n \n if x+1<self._side and y-2<self._side and y-1>0:\n possible_positions.append((x+1,y-2))\n\n if x-1<self._side and y+2<self._side and x-1>0:\n possible_positions.append((x-1,y+2))\n \n if x-1<self._side and y-2<self._side and x-1>0 and y-2>0:\n possible_positions.append((x-1,y-2))\n\n newX,newY=random.choice(possible_positions) #choose randomly among the possible positions,and then repeat this \n x,y=newX,newY\n positionsCovered.append((newX,newY)) \n\n return positionsCovered", "def tinyMazeSearch(problem):\r\n from game import Directions\r\n s = Directions.SOUTH\r\n w = Directions.WEST\r\n return [s,s,w,s,w,w,s,w]", "def get_played_positions(board):\n return np.argwhere(board.state != -1)", "def boarderPosition(self, gameState):\n if gameState.isOnRedTeam(self.index):\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions", "def opponentBoarderPosition(self, gameState):\n if self.red:\n i = self.midWidth\n else:\n i = self.midWidth - 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions" ]
[ "0.6583925", "0.6443366", "0.642417", "0.6352862", "0.62645966", "0.62173724", "0.61744225", "0.6168146", "0.61642736", "0.61634237", "0.61494106", "0.6132909", "0.61250454", "0.6120502", "0.6100356", "0.6093781", "0.6078856", "0.6061042", "0.6057439", "0.6056023", "0.60472375", "0.6044866", "0.6044468", "0.60435283", "0.60423476", "0.603911", "0.6035927", "0.6012476", "0.60028964", "0.6001387" ]
0.68247694
0
Changes the tile according to what letter was put in a string Takes 2 arguments one for which direction was chosen and one for which tile it is currently located at Returns new tile
def tile_change(direction, tile): lower_direction = direction.lower() if lower_direction == "n": tile += 1 elif lower_direction == "s": tile -= 1 elif lower_direction == "e": tile += 10 else: tile -= 10 return tile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile", "def move_character(self, old_y, old_x, y_pos, x_pos):\n self.map[old_y][old_x] = ' '\n self.map[y_pos][x_pos] = 'G'", "def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\": # gates\n return self.tiles[8 * 32 : 9 * 32, 3 * 32 : 4 * 32] \n elif char == \"W\": # window\n return self.tiles[8 * 32 : 9 * 32, 4 * 32 : 5 * 32]\n elif char == \"C\": # checkout\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"F\": # fruits\n return self.tiles[1 * 32 : 2 * 32, 4 * 32 : 5 * 32] \n elif char == \"S\": # spices\n return self.tiles[1 * 32 : 2 * 32, 3 * 32 : 4 * 32] \n elif char == \"R\": # dairy\n return self.tiles[8 * 32 : 9 * 32, 7 * 32 : 8 * 32] \n elif char == \"D\": # drinks\n return self.tiles[6 * 32 : 7 * 32, 13 * 32 : 14 * 32] \n elif char == \"c\": # customer/shopping cart\n return self.tiles[8 * 32 : 9 * 32, 6 * 32 : 7 * 32] \n else:\n return self.tiles[32:64, 64:96]", "def make_move(self, move, letter):\n self.positions[move] = letter", "def draw_tile(tile_id):\n if tile_id == 0:\n return \" \"\n if tile_id == 1:\n return \"#\"\n if tile_id == 2:\n return \"+\"\n if tile_id == 3:\n return \"-\"\n return \"o\"", "def move_character(self, direction, game_resolution_function):\n pos = self.tiles.index(2) # Catch player's position\n if direction == self.LEFT and pos % self.row_len != 0 \\\n and self.tiles[pos-1] != 1:\n self.tiles[pos-1] = 2\n elif direction == self.RIGHT and (pos+1) % self.row_len != 0 \\\n and self.tiles[pos+1] != 1:\n self.tiles[pos+1] = 2\n elif direction == self.UP and pos-self.row_len > 0 \\\n and self.tiles[pos-self.row_len] != 1:\n self.tiles[pos-self.row_len] = 2\n elif direction == self.DOWN and pos+self.row_len < len(self.tiles) \\\n and self.tiles[pos+self.row_len] != 1:\n self.tiles[pos+self.row_len] = 2\n else:\n return\n self.tiles[pos] = 0\n self.update_components_count()\n game_resolution_function()", "def switch(self, tile):\n self.tiles[self.tiles.index(tile)], self.opentile, self.prev = self.opentile, tile, self.opentile\n self.nb_move += 1", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument", "def set_tile(self, point, glyph=\".\"):\n self.matrix[point.y][point.x] = glyph", "def move_1_piece(context: GUI, old_coordinate, new_coordinate):\n\n old_tile = context.board.board_dict[old_coordinate]\n new_tile = context.board.board_dict[new_coordinate]\n\n new_tile.piece = old_tile.piece\n old_tile.piece = None\n\n context.update_move_printer(old_coordinate + \" \" + new_coordinate)", "def place_tile(self, rack_ind, row, col):\n self.board.board[col][row].letter = self.rack[rack_ind]\n self.placed_tiles[self.selected_tile] = (self.rack[self.selected_tile], (col, row))\n # set the rack tile to an empty string\n self.rack[self.selected_tile] = ''", "def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans", "def play_game(grid, instruction_list):\n location_x = instruction_list[1]\n location_y = instruction_list[0]\n tile = instruction_list[2]\n\n if tile == 0:\n grid[location_x][location_y] = ' '\n elif tile == 1:\n grid[location_x][location_y] = 'W'\n elif tile == 2:\n grid[location_x][location_y] = 'B'\n elif tile == 3:\n grid[location_x][location_y] = 'H'\n elif tile == 4:\n grid[location_x][location_y] = 'O'\n else:\n print('And I oop...play_game')\n\n return grid", "def set_character(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'G'", "def move_character(direction: str, character: list):\n if direction == \"d\":\n character[1] += 1\n if direction == \"a\":\n character[1] -= 1\n if direction == \"w\":\n character[0] -= 1\n if direction == \"s\":\n character[0] += 1", "def translate_level_char(self, c, x, y):\n\n if c == \".\":\n return None\n\n elif c == \",\":\n return GrassCellLight()\n\n elif c == \"p\":\n return PathCellSandy()\n\n elif c == \"#\":\n return WallCellStone()\n \n elif c == \"l\":\n return LavaCell()\n \n elif c == \"-\":\n return MountainCellFloor()\n\n elif c == \"w\":\n return WaterCellLight()\n\n elif c == \"s\":\n return SnowCell()\n\n elif c == \"<\":\n return GrassCellLong()\n\n elif c == \"P\":\n return PathCellStone()\n\n elif c == \">\":\n return GrassCellFlowers()\n\n elif c == \"*\":\n return SandCell()\n\n elif c == \"v\":\n return VolcanicCellFloor()\n\n elif c == \"_\":\n return StoneCellFloor()\n\n elif c == \"b\":\n return BoulderCell()\n\n elif c == \"@\":\n assert self.player == None\n self.player_spawn = (x, y)\n self.player = PlayerEnt(self, x, y, img_player_sprites_down_standing)\n self.ents.append(self.player)\n return GrassCellLight()\n\n else:\n raise Exception(\"invalid level char: %s\" % repr(c))", "def change_position(board: Board, position: Position, character: str) -> Board:\n board = list(board)\n \n row = board[position[0]]\n new_row = row[:position[-1]] + character + row[position[-1] + 1:]\n board[position[0]] = new_row\n\n board = tuple(board) \n\n return board", "def replace_character_at_index(self, index, character):\n self._game_board = self.get_game()[:index] + character + self.get_game()[index + 1:]\n return self._game_board", "def add_tile(self, tile: str, row: int, col: int) -> Optional[str]:\n previous_tile = self.board[row][col]\n self.board[row][col] = tile\n return previous_tile", "def update_cell(self, player: str, letter: str) -> 'Cell':\n if self.player == 0 and self.letter == letter:\n self.player = 1 if player == 'p1' else 2\n return self", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def update_player(self, old_y, old_x, new_y, new_x):\n self.maze[old_y][old_x] = \" \"\n self.maze[new_y][new_x] = \"m\"", "def position_tile(self, target_row, target_col, cur_row, cur_col, need_ld=True):\n move_str = ''\n if cur_row == target_row:\n if cur_col < target_col:\n move_str += 'l' * (target_col - cur_col)\n if target_col - cur_col > 1:\n move_str += 'ur'\n move_str += 'druldru' * (target_col - cur_col - 1)\n else:\n move_str += 'ur' if not need_ld else ''\n need_ld = False\n else:\n move_str += 'r' * (cur_col - target_col)\n if cur_col - target_col > 1:\n move_str += 'ul'\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n else:\n need_ld = False\n else:\n move_str += 'u' * (target_row - cur_row)\n if cur_col < target_col:\n move_str += ('l' * (target_col - cur_col) + 'dru')\n move_str += 'druldru' * (target_col - cur_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n elif cur_col > target_col:\n move_str += ('r' * (cur_col - target_col) + 'dlu')\n move_str += 'dlurdlu' * (cur_col - target_col - 1)\n move_str += 'lddru' * (target_row - cur_row - 1)\n else:\n move_str += 'lddru' * (target_row - cur_row - 1)\n if need_ld:\n move_str += 'ld'\n return move_str", "def move_pieces(self, starting_loc, ending_loc):\n\n self._game_board[ending_loc[0]][ending_loc[1]] = \\\n self._game_board[starting_loc[0]][starting_loc[1]]\n self._game_board[starting_loc[0]][starting_loc[1]] = \"____\"", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def add_char(self, coord, char, modify=False):\n if modify:\n range_y, range_x = self._map_dims\n new_coord = [coord[0]+range_y[0]-1, coord[1]+range_x[0]-1]\n self._screen.addch(new_coord[0], new_coord[1], char)\n self._screen.refresh()\n return new_coord\n else:\n self._screen.addch(coord[0], coord[1], char)\n self._screen.refresh()\n return coord" ]
[ "0.65772283", "0.63926816", "0.63005394", "0.6223129", "0.6160206", "0.5935179", "0.59308314", "0.5923742", "0.58938473", "0.5884255", "0.58656144", "0.5764994", "0.5759222", "0.57555896", "0.57501453", "0.5739916", "0.5730088", "0.5726009", "0.5652192", "0.559732", "0.5553417", "0.55496866", "0.5544832", "0.5528261", "0.5511959", "0.5509711", "0.54797", "0.54797", "0.54797", "0.5475018" ]
0.7330949
0
Initialize appointment's creation workflow; Pass to date definition
def create_appointment(): msg = render_template('date') return question(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_workflow():\n pass", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def _create_schedules(self):\n\n ''''''", "def adc_api_workflow_create():\n workflow_json = request.get_json(force=True)\n\n return jsonify(adc.workflow_create(workflow_json=workflow_json))", "def __init__(__self__, *,\n absolute_monthly: Optional[pulumi.Input['AbsoluteMonthlyScheduleArgs']] = None,\n daily: Optional[pulumi.Input['DailyScheduleArgs']] = None,\n relative_monthly: Optional[pulumi.Input['RelativeMonthlyScheduleArgs']] = None,\n weekly: Optional[pulumi.Input['WeeklyScheduleArgs']] = None):\n if absolute_monthly is not None:\n pulumi.set(__self__, \"absolute_monthly\", absolute_monthly)\n if daily is not None:\n pulumi.set(__self__, \"daily\", daily)\n if relative_monthly is not None:\n pulumi.set(__self__, \"relative_monthly\", relative_monthly)\n if weekly is not None:\n pulumi.set(__self__, \"weekly\", weekly)", "def __init__(self):\n\n # Application handle\n self.application = None\n\n # Workflow name\n self.name = None\n\n # Workflow data\n self.data = None", "def __init__(__self__, *,\n object_type: pulumi.Input[str],\n absolute_criteria: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'AbsoluteMarker']]]]] = None,\n days_of_month: Optional[pulumi.Input[Sequence[pulumi.Input['DayArgs']]]] = None,\n days_of_the_week: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]] = None,\n months_of_year: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'Month']]]]] = None,\n schedule_times: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n weeks_of_the_month: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'WeekNumber']]]]] = None):\n pulumi.set(__self__, \"object_type\", 'ScheduleBasedBackupCriteria')\n if absolute_criteria is not None:\n pulumi.set(__self__, \"absolute_criteria\", absolute_criteria)\n if days_of_month is not None:\n pulumi.set(__self__, \"days_of_month\", days_of_month)\n if days_of_the_week is not None:\n pulumi.set(__self__, \"days_of_the_week\", days_of_the_week)\n if months_of_year is not None:\n pulumi.set(__self__, \"months_of_year\", months_of_year)\n if schedule_times is not None:\n pulumi.set(__self__, \"schedule_times\", schedule_times)\n if weeks_of_the_month is not None:\n pulumi.set(__self__, \"weeks_of_the_month\", weeks_of_the_month)", "def create_appointment():\n\n form = AppointmentForm()\n\n if form.validate_on_submit():\n\n appointment = Appointment(\n title = form.title.data,\n description = form.description.data,\n location = form.location.data,\n start = form.start.data,\n client = form.client.data,\n user = current_user\n )\n\n try:\n db.session.add(appointment)\n db.session.commit()\n\n flash('Successfully created the appointment.')\n\n return redirect(url_for('appointment.read_appointments'))\n except:\n flash('Error creating the appointment')\n\n return render_template('appointments/form.html.j2', form=form, title='Create appointment')", "def __init__(self, event, recurrence=None):\n super().__init__(protocol=event.protocol,\n main_resource=event.main_resource)\n\n self._event = event\n recurrence = recurrence or {}\n # recurrence pattern\n recurrence_pattern = recurrence.get(self._cc('pattern'), {})\n\n self.__interval = recurrence_pattern.get(self._cc('interval'), None)\n self.__days_of_week = recurrence_pattern.get(self._cc('daysOfWeek'),\n set())\n self.__first_day_of_week = recurrence_pattern.get(\n self._cc('firstDayOfWeek'), None)\n if 'type' in recurrence_pattern.keys():\n if 'weekly' not in recurrence_pattern['type'].lower():\n self.__first_day_of_week = None\n \n self.__day_of_month = recurrence_pattern.get(self._cc('dayOfMonth'),\n None)\n self.__month = recurrence_pattern.get(self._cc('month'), None)\n self.__index = recurrence_pattern.get(self._cc('index'), 'first')\n\n # recurrence range\n recurrence_range = recurrence.get(self._cc('range'), {})\n\n self.__occurrences = recurrence_range.get(\n self._cc('numberOfOccurrences'), None)\n self.__start_date = recurrence_range.get(self._cc('startDate'), None)\n self.__end_date = recurrence_range.get(self._cc('endDate'), None)\n self.__recurrence_time_zone = recurrence_range.get(\n self._cc('recurrenceTimeZone'),\n get_windows_tz(self.protocol.timezone))\n # time and time zones are not considered in recurrence ranges...\n # I don't know why 'recurrenceTimeZone' is present here\n # Sending a startDate datetime to the server results in an Error:\n # Cannot convert the literal 'datetime' to the expected type 'Edm.Date'\n if recurrence_range:\n self.__start_date = parse(\n self.__start_date).date() if self.__start_date else None\n self.__end_date = parse(\n self.__end_date).date() if self.__end_date else None", "def create_instance(self, date):\n raise NotImplementedError", "def do_create(service,summary,description,startday,\\\n starttime,endtime,username,email):\n event = {\n 'summary': 'Code Clinic: {}'.format(summary),\n 'description': '{}.'.format(description),\n 'start': {\n 'dateTime': '{}T{}:00'.format(startday, starttime),\n 'timeZone': 'GMT+02',\n },\n 'end': {\n 'dateTime': '{}T{}:00'.format(startday,endtime),\n 'timeZone': 'GMT+02',\n },\n 'recurrence': [\n 'RRULE:FREQ=DAILY;COUNT=1'\n ],\n 'attendees': [\n {\n 'displayName': username,\n 'email': email,\n 'optional': True,\n 'comment': 'Creator',\n 'responseStatus': 'accepted',\n },\n ],\n 'anyoneCanAddSelf': True,\n\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event,\\\n sendUpdates='all').execute()\n\n return event", "def action_makeMeeting(self, cr, uid, ids, context=None):\n opportunity = self.browse(cr, uid, ids[0], context)\n res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)\n res['context'] = {\n 'default_opportunity_id': opportunity.id,\n 'default_partner_id': opportunity.partner_id and opportunity.partner_id.id or False,\n 'default_partner_ids' : opportunity.partner_id and [opportunity.partner_id.id] or False,\n 'default_user_id': uid,\n 'default_section_id': opportunity.section_id and opportunity.section_id.id or False,\n 'default_email_from': opportunity.email_from,\n 'default_state': 'open',\n 'default_name': opportunity.name,\n }\n return res", "def __init__(self,id,appointment_time,description):\n self.id = id\n self.appointment_time = appointment_time\n self.description = description", "def __init__(__self__, *,\n can_defer: bool,\n can_reschedule: bool,\n schedule_deadline_time: str,\n start_time: str):\n pulumi.set(__self__, \"can_defer\", can_defer)\n pulumi.set(__self__, \"can_reschedule\", can_reschedule)\n pulumi.set(__self__, \"schedule_deadline_time\", schedule_deadline_time)\n pulumi.set(__self__, \"start_time\", start_time)", "def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]", "def __init__(__self__, *,\n duration_hours: pulumi.Input[int],\n schedule: pulumi.Input['ScheduleArgs'],\n start_time: pulumi.Input[str],\n not_allowed_dates: Optional[pulumi.Input[Sequence[pulumi.Input['DateSpanArgs']]]] = None,\n start_date: Optional[pulumi.Input[str]] = None,\n utc_offset: Optional[pulumi.Input[str]] = None):\n if duration_hours is None:\n duration_hours = 24\n pulumi.set(__self__, \"duration_hours\", duration_hours)\n pulumi.set(__self__, \"schedule\", schedule)\n pulumi.set(__self__, \"start_time\", start_time)\n if not_allowed_dates is not None:\n pulumi.set(__self__, \"not_allowed_dates\", not_allowed_dates)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if utc_offset is not None:\n pulumi.set(__self__, \"utc_offset\", utc_offset)", "def create(self, validated_data):\n tags = validated_data.pop(\"tags\", [])\n attachments = validated_data.pop(\"attachments\", [])\n request_user = validated_data.pop(\"request_user\") # this should always be there\n agenda_create = validated_data.pop(\"agenda_create\", None)\n agenda_type = validated_data.pop(\"agenda_type\", None)\n agenda_parent_id = validated_data.pop(\"agenda_parent_id\", None)\n\n assignment = Assignment(**validated_data)\n if has_perm(request_user, \"agenda.can_manage\"):\n assignment.agenda_item_update_information[\"create\"] = agenda_create\n assignment.agenda_item_update_information[\"type\"] = agenda_type\n assignment.agenda_item_update_information[\"parent_id\"] = agenda_parent_id\n\n assignment.save()\n assignment.tags.add(*tags)\n assignment.attachments.add(*attachments)\n inform_changed_data(assignment)\n return assignment", "def create():\n config = request.data\n return add_scheduling_block(config)", "def __init__(self, initial_date=None, until_date=None):\n self.initial_date = initial_date\n self.until_date = until_date\n\n log.debug('self.initial_date: {}'.format(self.initial_date))\n log.debug('self.until_date: {}'.format(self.until_date))", "def test_meeting_create(self):\n pass", "def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()", "def i_see_the_set_dates(_step):\r\n verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')\r\n verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')\r\n verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')\r\n verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')\r\n\r\n verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)\r\n # Unset times get set to 12 AM once the corresponding date has been set.\r\n verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)", "def test_change_workflow_definition(self):\n pass", "def __init__(self, workflow):\n self.workflow = workflow", "def __init__(self, valid_days=None, start_date=None, schedule_typicality=None, schedule_type=None, schedule_name=None, removed_dates_notes=None, removed_dates=None, rating_start_date=None, rating_end_date=None, rating_description=None, end_date=None, description=None, added_dates_notes=None, added_dates=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._valid_days = None\n self._start_date = None\n self._schedule_typicality = None\n self._schedule_type = None\n self._schedule_name = None\n self._removed_dates_notes = None\n self._removed_dates = None\n self._rating_start_date = None\n self._rating_end_date = None\n self._rating_description = None\n self._end_date = None\n self._description = None\n self._added_dates_notes = None\n self._added_dates = None\n self.discriminator = None\n\n if valid_days is not None:\n self.valid_days = valid_days\n if start_date is not None:\n self.start_date = start_date\n if schedule_typicality is not None:\n self.schedule_typicality = schedule_typicality\n self.schedule_type = schedule_type\n self.schedule_name = schedule_name\n if removed_dates_notes is not None:\n self.removed_dates_notes = removed_dates_notes\n if removed_dates is not None:\n self.removed_dates = removed_dates\n self.rating_start_date = rating_start_date\n self.rating_end_date = rating_end_date\n self.rating_description = rating_description\n if end_date is not None:\n self.end_date = end_date\n self.description = description\n if added_dates_notes is not None:\n self.added_dates_notes = added_dates_notes\n if added_dates is not None:\n self.added_dates = added_dates", "def create_appointment_form(request, post):\n # string_date = \"{0}-{1}-{2}\".format(year, month, day)\n # date = datetime.datetime.strptime(string_date, '%Y-%m-%d').date()\n new_appointment = None\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n the_user = request.user\n notes = post.get(\"notes\")\n\n if the_user.userprofile.is_doctor():\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n doctor = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n elif request.user.userprofile.is_patient():\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n patient = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n return new_appointment", "def init_date( self ) -> datetime:\n return datetime( 2011 ,2 ,1 )", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'start_date': 'datetime',\n 'length_minutes': 'int',\n 'activities': 'list[BuAgentScheduleActivity]',\n 'manually_edited': 'bool',\n 'schedule': 'BuScheduleReference'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'start_date': 'startDate',\n 'length_minutes': 'lengthMinutes',\n 'activities': 'activities',\n 'manually_edited': 'manuallyEdited',\n 'schedule': 'schedule'\n }\n\n self._id = None\n self._start_date = None\n self._length_minutes = None\n self._activities = None\n self._manually_edited = None\n self._schedule = None", "def appointment_date(begin_date):\n\n session.attributes['begin_date'] = str(begin_date)\n qs = render_template('time')\n return question(qs)", "def initCreate(self , initialconfig):\n return" ]
[ "0.6407841", "0.5967415", "0.58864933", "0.57573587", "0.5667341", "0.5589588", "0.5531992", "0.55230147", "0.5515364", "0.54900604", "0.5475908", "0.5444646", "0.544376", "0.54394084", "0.54327065", "0.541028", "0.5404114", "0.5402616", "0.5401141", "0.5400178", "0.5390388", "0.53760487", "0.53687614", "0.5352877", "0.5351499", "0.53275573", "0.53219986", "0.53214395", "0.53160495", "0.531021" ]
0.65402335
0
Set appointment's begin date; Pass to appointment's begin time
def appointment_date(begin_date): session.attributes['begin_date'] = str(begin_date) qs = render_template('time') return question(qs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)", "def begin_date(self, value):\n\n if not isinstance(value, datetime):\n raise TypeError(_pretty_message(\n '''\n begin_date must be an instance of datetime.datetime, not %s\n ''',\n _type_name(value)\n ))\n\n self._begin_date = value", "def appointment_time(begin_time):\n\n session.attributes['begin_time'] = str(begin_time)\n msg = render_template('end_date')\n return question(msg)", "def set_start_date(self, date):\n pass", "def begin_time(self, begin_time):\n if begin_time is None:\n raise ValueError(\"Invalid value for `begin_time`, must not be `None`\") # noqa: E501\n\n self._begin_time = begin_time", "def set_adjustment_charge_begin_date(self, begin_date):\n self.set_value_into_input_field(self.adjustment_begin_date_locator, begin_date)", "def set_billing_cycle_begin_date(self, begin_date):\n if begin_date == \"\":\n current_date = datetime.date.today()\n begin_date = current_date.replace(day=1)\n begin_date = begin_date.strftime(\"%m/%d/%Y\")\n self.set_value_into_input_field(self.billing_cycle_begin_date_inputbox_locator, begin_date)", "def set_startTime(self, startTime):\n self.startTime = mktime(startTime)", "def set_bulk_add_begin_date(self, begin_date):\n if begin_date == \"\":\n begin_date = self.get_date(current_date=True)\n self.set_value_into_input_field(self.bulk_add_begin_date_inputbox_locator, begin_date)\n global bulk_add_begin_date\n bulk_add_begin_date = datetime.datetime.strptime(begin_date, \"%m/%d/%Y\")\n return begin_date", "def set_statement_begin_date(self, begin_date):\n begin_date_to_set = None\n if begin_date != \"\":\n begin_date_to_set = begin_date\n else:\n self.first_day_of_previous_month = self.get_date(first_day_of_last_month=True)\n begin_date_to_set = self.first_day_of_previous_month\n self.set_value_into_input_field(self.statement_begin_date_locator, begin_date_to_set)\n return begin_date_to_set", "def start_date(self, start_date):\n self._start_date = start_date", "def setStartTime(self, t0):\n self._simulator_.update(t0=t0)\n return", "def set_start_date(self, start_date):\n self.set_value_into_input_field(self.start_date_inputbox_locator, start_date)", "def set_start_time():\n __start = current_time_milli()", "def start_date(self, start_date):\n \n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_time(self, start_time):\n self._start_time = start_time", "def start_time(self, start_time):\n self._start_time = start_time", "def set_begin_date_for_search(self, begin_date):\n self.set_value_into_input_field(self.begin_date_locator, begin_date)", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time" ]
[ "0.7065128", "0.7005564", "0.697397", "0.6942992", "0.65339845", "0.64918524", "0.6489678", "0.647476", "0.6436995", "0.6375545", "0.62956077", "0.61982006", "0.61860764", "0.61311483", "0.6118447", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.6097708", "0.60419124", "0.60419124", "0.60390633", "0.598063", "0.598063", "0.598063" ]
0.7202706
0
Set appointment's begin_time; Pass to apppointment's end date
def appointment_time(begin_time): session.attributes['begin_time'] = str(begin_time) msg = render_template('end_date') return question(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appointment_date(begin_date):\n\n session.attributes['begin_date'] = str(begin_date)\n qs = render_template('time')\n return question(qs)", "def begin_time(self, begin_time):\n if begin_time is None:\n raise ValueError(\"Invalid value for `begin_time`, must not be `None`\") # noqa: E501\n\n self._begin_time = begin_time", "def set_startTime(self, startTime):\n self.startTime = mktime(startTime)", "def setStartTime(self, t0):\n self._simulator_.update(t0=t0)\n return", "def start_time(self, start_time):\n self._start_time = start_time", "def start_time(self, start_time):\n self._start_time = start_time", "def begin_date(self, value):\n\n if not isinstance(value, datetime):\n raise TypeError(_pretty_message(\n '''\n begin_date must be an instance of datetime.datetime, not %s\n ''',\n _type_name(value)\n ))\n\n self._begin_date = value", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def appointment_end_date(end_date):\n\n session.attributes['end_date'] = str(end_date)\n msg = render_template('end_time')\n return question(msg)", "def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)", "def start_time(self, value):\n self._start_time = value", "def appointment_end_time(end_time):\n\n session.attributes['end_time'] = str(end_time)\n form = AppointmentForm(session.attributes)\n form.submit()\n return render_result(form)", "def set_begin(self, tp):\n if isinstance(tp, sppasPoint) is False:\n raise AnnDataTypeError(tp, \"sppasPoint\")\n\n if sppasInterval.check_types(tp, self.__end) is False:\n raise AnnDataEqTypeError(tp, self.__end)\n\n if sppasInterval.check_interval_bounds(tp, self.__end) is False:\n raise IntervalBoundsError(tp, self.__end)\n\n # assign the reference\n self.__begin = tp", "def set_start_date(self, date):\n pass", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def __init__(__self__, *,\n end_time: pulumi.Input[str],\n start_time: pulumi.Input[str]):\n pulumi.set(__self__, \"end_time\", end_time)\n pulumi.set(__self__, \"start_time\", start_time)", "def set_start_time():\n __start = current_time_milli()", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def __set_starting_time(self, starting_time):\n if not isinstance(starting_time, int):\n raise TypeError('The starting time should be an integer')\n if starting_time < 0:\n raise ValueError('The starting time should be positive')\n self.__starting_time = starting_time", "def start(self, start):\n # type: (datetime) -> None\n\n if start is not None:\n if not isinstance(start, datetime):\n raise TypeError(\"Invalid type for `start`, type has to be `datetime`\")\n\n self._start = start", "def set_time_data(self, start_time, end_time):\n ev_time_start = datetime.fromtimestamp(start_time) or datetime.now()\n ev_time_end = datetime.fromtimestamp(end_time) or datetime.now()\n self._timer_begins_entry.set_text(ev_time_start.strftime(self._TIME_STR))\n self._timer_ends_entry.set_text(ev_time_end.strftime(self._TIME_STR))\n self._timer_begins_calendar.select_day(ev_time_start.day)\n self._timer_begins_calendar.select_month(ev_time_start.month - 1, ev_time_start.year)\n self._timer_ends_calendar.select_day(ev_time_end.day)\n self._timer_ends_calendar.select_month(ev_time_end.month - 1, ev_time_end.year)\n self._timer_begins_hr_button.set_value(ev_time_start.hour)\n self._timer_begins_min_button.set_value(ev_time_start.minute)\n self._timer_ends_hr_button.set_value(ev_time_end.hour)\n self._timer_ends_min_button.set_value(ev_time_end.minute)", "def test_set_begin_and_end_for_emp(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n expected_begin = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n expected_end = timezone.make_aware(dt.datetime(2016, 6, 2, 14, 32))\n\n example_employee = RawClockData.objects.first()\n begin, end = set_begin_and_end_for_emp(\n employee=example_employee,\n start=start,\n stop=stop,\n )\n\n self.assertEqual(expected_begin, begin)\n self.assertEqual(expected_end, end)", "def setTimepoint(self, tp):\n\t\tpass", "def set_available_time_slot():\n if request.content_type != 'application/json':\n error = json.dumps({'error': 'Invalid Content Type'})\n return make_response(error, 400, InterviewCalendarApi.HEADERS)\n\n data = request.json\n # For Temporary purpose, stored in flat file database\n with open(InterviewCalendarApi.DB_FILE, \"a+\") as fd:\n record = \"%s|%s|%s|%s\\n\" %(data[\"Category\"], data[\"Name\"],\n data[\"Email\"], \",\".join(data[\"AvailablityDateTime\"]))\n fd.write(record)\n msg = json.dumps({\"Status\": \"Success\"})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_start_time(self, *args, **kwargs)", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_start_time(self, *args, **kwargs)" ]
[ "0.6985581", "0.65532726", "0.64899975", "0.6089194", "0.60122746", "0.60122746", "0.60034096", "0.59809184", "0.59809184", "0.59809184", "0.59809184", "0.59809184", "0.59553194", "0.5918142", "0.5905272", "0.5746406", "0.5744839", "0.57072085", "0.56862944", "0.5659291", "0.5643778", "0.5636407", "0.5543021", "0.5538996", "0.55298233", "0.5514213", "0.55130285", "0.54792607", "0.54751194", "0.54633623" ]
0.77024466
0
Set appointment's end date; Pass to appointment's end time
def appointment_end_date(end_date): session.attributes['end_date'] = str(end_date) msg = render_template('end_time') return question(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_end_date(self, date):\n pass", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def end_date(self, end_date):\n self._end_date = end_date", "def change_end_date(self, new_end_date):\n self.end_date = new_end_date", "def appointment_end_time(end_time):\n\n session.attributes['end_time'] = str(end_time)\n form = AppointmentForm(session.attributes)\n form.submit()\n return render_result(form)", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, value):\n\n if not isinstance(value, datetime):\n raise TypeError(_pretty_message(\n '''\n end_date must be an instance of datetime.datetime, not %s\n ''',\n _type_name(value)\n ))\n\n self._end_date = value", "def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)", "def end_date(self, end_date):\n if end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\")\n\n self._end_date = end_date", "def end_time(self, end_time):\n self._end_time = end_time", "def end_time(self, end_time):\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end(self, end):\n # type: (datetime) -> None\n\n if end is not None:\n if not isinstance(end, datetime):\n raise TypeError(\"Invalid type for `end`, type has to be `datetime`\")\n\n self._end = end", "def _set_end_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"end_time must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__end_time = t\n if hasattr(self, '_set'):\n self._set()", "def set_statement_end_date(self, end_date):\n end_date_to_set = None\n if end_date != \"\":\n end_date_to_set = end_date\n else:\n end_date_to_set = self.get_date(last_day_of_last_month=True)\n self.set_value_into_input_field(self.statement_end_date_locator, end_date_to_set)\n return end_date_to_set", "def model_end_date(self, model_end_date):\n\n self._model_end_date = model_end_date", "def end(self, end):\n if end is None:\n self._set('end', end)\n else:\n try:\n self._set('end', Timestamp.to_datetime(end))\n except (TypeError, ValueError) as e:\n raise ValidationError(e)", "def set_adjustment_charge_end_date(self, end_date):\n self.set_value_into_input_field(self.end_date_locator, end_date)", "def end_time(self, end_time):\n if end_time is None:\n raise ValueError(\"Invalid value for `end_time`, must not be `None`\") # noqa: E501\n\n self._end_time = end_time", "def end_date(self, end_date):\n if self.local_vars_configuration.client_side_validation and end_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n end_date is not None and len(end_date) < 1):\n raise ValueError(\"Invalid value for `end_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._end_date = end_date", "def end_times(self, end_times):\n\n self._end_times = end_times" ]
[ "0.76225775", "0.7512484", "0.7296414", "0.72335875", "0.72125554", "0.71424943", "0.71424943", "0.71424943", "0.71424943", "0.71424943", "0.71424943", "0.71424943", "0.71424943", "0.7120749", "0.7087847", "0.6900266", "0.6880423", "0.6880423", "0.6826085", "0.6826085", "0.6826085", "0.6818457", "0.67594266", "0.6691261", "0.6601742", "0.65855616", "0.6448249", "0.63577175", "0.63454247", "0.6309113" ]
0.78313226
0
Set appointment's end time; Create new appointment and rendere result
def appointment_end_time(end_time): session.attributes['end_time'] = str(end_time) form = AppointmentForm(session.attributes) form.submit() return render_result(form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appointment_time(begin_time):\n\n session.attributes['begin_time'] = str(begin_time)\n msg = render_template('end_date')\n return question(msg)", "def appointment_end_date(end_date):\n\n session.attributes['end_date'] = str(end_date)\n msg = render_template('end_time')\n return question(msg)", "def create_appointment():\n\n msg = render_template('date')\n return question(msg)", "def appointment_date(begin_date):\n\n session.attributes['begin_date'] = str(begin_date)\n qs = render_template('time')\n return question(qs)", "def set_available_time_slot():\n if request.content_type != 'application/json':\n error = json.dumps({'error': 'Invalid Content Type'})\n return make_response(error, 400, InterviewCalendarApi.HEADERS)\n\n data = request.json\n # For Temporary purpose, stored in flat file database\n with open(InterviewCalendarApi.DB_FILE, \"a+\") as fd:\n record = \"%s|%s|%s|%s\\n\" %(data[\"Category\"], data[\"Name\"],\n data[\"Email\"], \",\".join(data[\"AvailablityDateTime\"]))\n fd.write(record)\n msg = json.dumps({\"Status\": \"Success\"})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)", "def create_appointment(request):\n dates = get_dates()\n users = User.objects.all()\n\n if request.POST:\n new_appointment = create_appointment_form(request, request.POST)\n if new_appointment:\n messages.add_message(request, messages.SUCCESS, 'Your appointment as been created successfully.')\n else:\n messages.add_message(request, messages.ERROR, 'An error occurred. Your appointment could not be created.'\n 'If this error persists, try contacting our service desk at'\n '1-800-RIX-AJAZ')\n return redirect('view_appointments')\n\n return render(request, 'create_appointment.html', {'the_user': request.user,\n 'dates': dates,\n 'users': users,\n 'hours': range(1, 13),\n 'minutes': range(1, 60)})", "def send(self):\n event = gdata.calendar.CalendarEventEntry()\n event.title = atom.Title(text=self.title)\n event.content = atom.Content(text='')\n event.where.append(gdata.calendar.Where(value_string=self.location))\n # Set start time in 6 minutes\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 6 * 60))\n # Set end time in an hour\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n event.when.append(gdata.calendar.When(start_time=start_time,\n end_time=end_time))\n minutes = 5\n for a_when in event.when:\n if len(a_when.reminder) > 0:\n # Adding reminder in 5 minutes before event (start_time)\n a_when.reminder[0].minutes = 5\n else:\n a_when.reminder.append(\n gdata.calendar.Reminder(minutes=minutes))\n # Insert new event\n new_event = self.calendar_service.InsertEvent(event,\n self.calendar_link)\n return new_event", "def create_appointment():\n\n form = AppointmentForm()\n\n if form.validate_on_submit():\n\n appointment = Appointment(\n title = form.title.data,\n description = form.description.data,\n location = form.location.data,\n start = form.start.data,\n client = form.client.data,\n user = current_user\n )\n\n try:\n db.session.add(appointment)\n db.session.commit()\n\n flash('Successfully created the appointment.')\n\n return redirect(url_for('appointment.read_appointments'))\n except:\n flash('Error creating the appointment')\n\n return render_template('appointments/form.html.j2', form=form, title='Create appointment')", "def create_patient_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response = requests.post(server_url + 'patient/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n\n response = response.json()\n\n if response.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('patients/appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('patients/dashboard.html')", "def save_appointment_details(request, calendar_id):\n def schedule_mail(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_mail, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n def schedule_sms(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_sms, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n start_time = request.GET['start_time'][:19]\n end_time = request.GET['end_time'][:19]\n \n start_time = datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%S\")\n end_time=datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%S\")\n \n calendar_obj = Calendar.objects.get(pk=calendar_id)\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n\n # create a form instance and populate it with data from the request:\n form = AppointmentForm(request.POST)\n\n # check whether it's valid and save it\n if form.is_valid():\n # Save appointment details\n \n mobilephone = form.data['mobilephone']\n email = form.data['email']\n first_name = form.data['first_name']\n last_name = form.data['last_name']\n notes = form.data['notes']\n\n appointment = Appointment(start_time=start_time, end_time=end_time, first_name=first_name, \n last_name=last_name, email=email, mobilephone=mobilephone, notes=notes)\n \n appointment.calendar = calendar_obj\n appointment.save()\n\n try:\n send_appointment_mail(appointment) # send appointment details email\n except Exception as exp:\n print(exp)\n \n try:\n send_appointment_sms(appointment) # send appointment details sms\n except Exception as exp:\n print(exp)\n \n # Calculate reminder schedule dates\n reminder1 = start_time - timedelta(hours=2)\n reminder2 = start_time - timedelta(hours=24)\n reminder3 = start_time - timedelta(days=7)\n\n # Schedule mails\n schedule_mail(reminder1, appointment)\n schedule_mail(reminder2, appointment)\n schedule_mail(reminder3, appointment)\n \n # Schedule sms\n schedule_sms(reminder1, appointment)\n schedule_sms(reminder2, appointment)\n schedule_sms(reminder3, appointment)\n \n return redirect(reverse('appointment:complete_appointment', args=[calendar_id]))\n \n # if a GET (or any other method) we'll create a blank form\n else:\n form = AppointmentForm()\n return render(request, 'appointment_form.html', {'form': form, 'start_time': start_time, 'end_time': end_time,\n 'office_location': calendar_obj.office_location})", "def __init__(self,id,appointment_time,description):\n self.id = id\n self.appointment_time = appointment_time\n self.description = description", "def end_time(self, end_time):\n self._end_time = end_time", "def end_time(self, end_time):\n self._end_time = end_time", "def create_appointment_form(request, post):\n # string_date = \"{0}-{1}-{2}\".format(year, month, day)\n # date = datetime.datetime.strptime(string_date, '%Y-%m-%d').date()\n new_appointment = None\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n the_user = request.user\n notes = post.get(\"notes\")\n\n if the_user.userprofile.is_doctor():\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n doctor = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n elif request.user.userprofile.is_patient():\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n patient = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n return new_appointment", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def end_time(self, end_time):\n\n self._end_time = end_time", "def _set_end_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"end_time must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__end_time = t\n if hasattr(self, '_set'):\n self._set()", "def test_get_end_date(self):\n # Creating booking object\n book_time = datetime.utcnow()\n duration = 3\n booking = Booking(1, \"dummy\", book_time, duration)\n\n # Checking end time is correct\n end_time = book_time + timedelta(hours=duration)\n assert (end_time == booking.get_end_time())", "def appointments(request):\n now = timezone.localtime(timezone.now())\n data = {}\n tables = {}\n rows = []\n seen = Appointment.objects.filter(seen_time__isnull=False).filter(\n checkin_date__iexact=now.date())\n # Today's COMPLETE patients\n complete = seen.filter(finish_time__isnull=False)\n for a in complete:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['Completed'] = rows\n rows = []\n # Today's IN_SESSION patients\n in_session = seen.filter(finish_time__isnull=True)\n for a in in_session:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['In Session'] = rows\n data['tables'] = tables\n return render(request, 'doctor/appointments.html', data)", "def create_appointments(\n data: AppointmentCreate,\n background_tasks: BackgroundTasks, \n user: User = Depends(deps.get_user),\n db: Session = Depends(deps.get_db),\n rdc: RedisCache = Depends(deps.get_redis)\n) -> Any:\n db_provider = crud_user.get_user_by_id(db, str(data.provider_id))\n if not db_provider:\n raise HTTPException(\n status_code=404, \n detail=\"Cabeleireiro não encontrado\"\n )\n\n current_date = datetime.now()\n compare_date = data.date.replace(tzinfo=None)\n if compare_date < current_date:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marcar agendamento em datas passadas\"\n )\n \n if data.date.hour < 8 or data.date.hour > 17:\n raise HTTPException(\n status_code=400, \n detail=\"Você só pode cria agendamentos entre 8:00 e 17:00\"\n )\n\n if data.provider_id == user.id:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marca agendamento consigo mesmo\"\n )\n\n validate_date = crud_appointment.get_appointment_by_date(db, data.provider_id, data.date)\n if validate_date:\n raise HTTPException(status_code=400, detail=\"Este horario já esta agendado\")\n\n appointment = crud_appointment.create(db, data, user)\n msg = f\"Novo agendamento de {user.name} {user.surname} para o {date.format_date(data.date)}\"\n background_tasks.add_task(crud_notification.create, str(data.provider_id), msg)\n date_time = data.date\n rdc.invalidate_cache(\n f\"providers-appointments:{data.provider_id}:{date_time.year}:{date_time.month}:{date_time.day}\"\n )\n rdc.invalidate_cache(f\"user-appointments:{user.id}\")\n\n return appointment", "def clerk_create_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response_clerk_create_appointment = requests.post(server_url + 'medical_clerk/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n response_clerk_create_appointment = response_clerk_create_appointment.json()\n\n if response_clerk_create_appointment.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('clerks/clerk_appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('clerks/home.html')", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def appointment():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )", "def clean(self):\n\n appointment_time = arrow.get(self.time, self.time_zone.zone)\n\n if appointment_time < arrow.utcnow():\n raise ValidationError(\n 'You cannot schedule an appointment for the past. '\n 'Please check your time and time_zone')", "def patient_done(request, appointment_id):\n appt_obj = Appointment.objects.get(pk=appointment_id)\n if appt_obj.finish_time is None:\n # we have not set this appointment as complete yet\n appt_obj.finish_time = timezone.localtime(timezone.now()).time()\n response = patch_appointment_status(request, appointment_id, COMPLETE)\n if response != 204:\n return index(request, {'msg': err_patch})\n appt_obj.save()\n # TODO: Create a notes log to allow doctor to view history and revert\n return HttpResponseRedirect(reverse('doctor:appointments'))", "def set_time_data(self, start_time, end_time):\n ev_time_start = datetime.fromtimestamp(start_time) or datetime.now()\n ev_time_end = datetime.fromtimestamp(end_time) or datetime.now()\n self._timer_begins_entry.set_text(ev_time_start.strftime(self._TIME_STR))\n self._timer_ends_entry.set_text(ev_time_end.strftime(self._TIME_STR))\n self._timer_begins_calendar.select_day(ev_time_start.day)\n self._timer_begins_calendar.select_month(ev_time_start.month - 1, ev_time_start.year)\n self._timer_ends_calendar.select_day(ev_time_end.day)\n self._timer_ends_calendar.select_month(ev_time_end.month - 1, ev_time_end.year)\n self._timer_begins_hr_button.set_value(ev_time_start.hour)\n self._timer_begins_min_button.set_value(ev_time_start.minute)\n self._timer_ends_hr_button.set_value(ev_time_end.hour)\n self._timer_ends_min_button.set_value(ev_time_end.minute)", "def complete_appointment(request, calendar_id):\n calendar = Calendar.objects.get(pk=calendar_id)\n return render(request, 'complete_appointment.html', {'calendar': calendar})", "def schedule_meeting(intent_request):\n \n meeting_person = intent_request['currentIntent']['slots']['Person']\n meeting_type = intent_request['currentIntent']['slots']['MeetingType']\n meeting_date = intent_request['currentIntent']['slots']['Date']\n meeting_time = intent_request['currentIntent']['slots']['Time']\n meeting_duration = intent_request['currentIntent']['slots']['Duration']\n meeting_address = intent_request['currentIntent']['slots']['Address']\n invitation_link = intent_request['currentIntent']['slots']['InvitationLink']\n phone_number = intent_request['currentIntent']['slots']['Phone']\n source = intent_request['invocationSource']\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n booking_map = json.loads(try_ex(lambda: output_session_attributes['bookingMap']) or '{}')\n\n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n slots = intent_request['currentIntent']['slots']\n validation_result = validate_schedule_meeting(meeting_duration, date, meeting_time)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message']\n )\n\n if not meeting_person:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Person',\n {'contentType': 'PlainText', 'content': 'Who is gonna be that with?'}\n )\n \n if meeting_person and not meeting_type:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'MeetingType',\n {'contentType': 'PlainText', 'content': 'What type of meeting would you like to schedule?'}\n )\n\n if meeting_person and meeting_type and not meeting_date:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Date',\n {'contentType': 'PlainText', 'content': 'When would you like to schedule your {} ?'.format(meeting_type)}\n )\n\n if meeting_type and meeting_date:\n # Fetch or generate the availabilities for the given date.\n booking_availabilities = try_ex(lambda: booking_map[meeting_date])\n if booking_availabilities is None:\n booking_availabilities = get_availabilities(meeting_date)\n booking_map[meeting_date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n\n meeting_type_availabilities = get_availabilities_for_duration(get_duration(meeting_type), booking_availabilities)\n if len(meeting_type_availabilities) == 0:\n # No availability on this day at all; ask for a new date and time.\n slots['Date'] = None\n slots['Time'] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Date',\n {'contentType': 'PlainText', 'content': 'There is not any availability on that date, is there another day which works for you?'}\n )\n\n message_content = 'What time on {} works for you? '.format(meeting_date)\n if meeting_time:\n output_session_attributes['formattedTime'] = build_time_output_string(meeting_time)\n # Validate that proposed time for the meeting can be booked by first fetching the availabilities for the given day. To\n # give consistent behavior in the sample, this is stored in sessionAttributes after the first lookup.\n if is_available(meeting_time, get_duration(meeting_type), booking_availabilities):\n return delegate(output_session_attributes, slots)\n message_content = 'The time you requested is not available. '\n\n if len(meeting_type_availabilities) == 1:\n # If there is only one availability on the given date, try to confirm it.\n slots['Time'] = meeting_type_availabilities[0]\n return confirm_intent(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n {\n 'contentType': 'PlainText',\n 'content': '{}{} is our only availability, does that work for you?'.format\n (message_content, build_time_output_string(meeting_type_availabilities[0]))\n },\n build_response_card(\n 'Confirm Meeting',\n 'Is {} on {} okay?'.format(build_time_output_string(meeting_type_availabilities[0]), date),\n [{'text': 'yes', 'value': 'yes'}, {'text': 'no', 'value': 'no'}]\n )\n )\n\n available_time_string = build_available_time_string(meeting_type_availabilities)\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Time',\n {'contentType': 'PlainText', 'content': '{}{}'.format(message_content, available_time_string)},\n build_response_card(\n 'Specify Time',\n 'What time works best for you?',\n build_options('Time', meeting_type, meeting_date, booking_map)\n )\n )\n \n if meeting_type = 'online' and meeting_person and meeting_date and meeting_time and not invitation_link:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'InvitationLink',\n {'contentType': 'PlainText', 'content': 'Can you paste your invitation link in here, please?'}\n )\n \n if (meeting_type = 'personal' or meeting_type = 'inperson') and meeting_person and meeting_date and meeting_time and not meeting_address:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Address',\n {'contentType': 'PlainText', 'content': 'Where the {} will take place?', .format(meeting_type)}\n )\n \n if meeting_person and meeting_type and meeting_date and meeting_time and (invitation_link or meeting_address) and not contact_phone\"\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Phone',\n {'contentType': 'PlainText', 'content': 'Can you leave your contact phone number here, please?'}\n\n return delegate(output_session_attributes, slots)\n \n \n \"\"\" --- Check avalibility --- \"\"\"\n\n\n # Book the meeting.\n booking_availabilities = booking_map[meeting_date]\n if booking_availabilities:\n # Remove the availability slot for the given date as it has now been booked.\n booking_availabilities.remove(meeting_time)\n if meeting_duration == 60:\n second_half_hour_time = increment_time_by_thirty_mins(meeting_time)\n booking_availabilities.remove(second_half_hour_time)\n\n booking_map[date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n else:\n # This is not treated as an error as this code sample supports functionality either as fulfillment or dialog code hook.\n logger.debug('Availabilities for {} were null at fulfillment time. '\n 'This should have been initialized if this function was configured as the dialog code hook'.format(meeting_date))\n\n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': 'Okay, I have booked your meeting. See you at {} on {}'.format(build_time_output_string(meeting_time), meeting_date)\n }\n )", "def test_modify_slot_end_early(self):\n coupon = COUPON_FACTORY.create_coupon()\n slot = Slot.objects.create(site_id=2,\n business_id=coupon.offer.business.id,\n start_date=datetime.date.today(),\n end_date=datetime.date.today() + datetime.timedelta(2))\n SlotTimeFrame.objects.create(slot=slot, coupon_id=coupon.id,\n start_datetime=datetime.datetime.today(),\n end_datetime=datetime.datetime.today() + datetime.timedelta(1))\n slot.end_date = datetime.date.today() + datetime.timedelta(1)\n with self.assertRaises(ValidationError) as context_manager:\n slot.save()\n self.fail('Invalid slot saved.')\n LOG.debug(context_manager.exception)" ]
[ "0.7324631", "0.707775", "0.64345574", "0.61964524", "0.6116282", "0.6012545", "0.59737736", "0.5958537", "0.58682275", "0.5739396", "0.56900465", "0.5664058", "0.5664058", "0.5635635", "0.5621515", "0.5621515", "0.5621515", "0.56020176", "0.5596119", "0.5588781", "0.5569192", "0.5547617", "0.55380714", "0.5532021", "0.5514789", "0.5509564", "0.5503694", "0.5488271", "0.5474761", "0.5459975" ]
0.75025403
0
Significant duration model by Abrahamson and Silva (1996) Empirical ground motion models, report prepared for Brookhaven National Laboratory. Input
def abrahamson_silva_ds_1999(magnitude=7.0,distance=10.0,soil=True,duration_type='DS575H'): # map the duration_type to integer key dur_map = {'DS575H': 0, 'DS575V': 1, 'DS595H': 2, 'DS595V': 3} dur_tag = dur_map.get(duration_type.upper(),None) if dur_tag is None: print("SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?") return None, None # modeling coefficients beta = [3.2, 3.2, 3.2, 3.2] b1 = [5.204, 4.610, 5.204, 4.610] b2 = [0.851, 1.536, 0.851, 1.536] m_star = [6, 6, 6, 6] c1 = [0.805, 1.076, 0.805, 1.076] c2 = [0.063, 0.107, 0.063, 0.107] rc = [10, 10, 10, 10] Drat = [0.000, 0.000, 0.845, 0.646] sigma = [0.55, 0.46, 0.49, 0.45] # median if distance > rc[dur_tag]: ds_median = np.exp(np.log((np.exp(b1[dur_tag]+b2[dur_tag]* \ (magnitude-m_star[dur_tag]))/(10**(1.5*magnitude+ \ 16.05)))**(-1/3)/(4.9e6*beta[dur_tag])+soil* \ c1[dur_tag]+c2[dur_tag]*(distance-rc[dur_tag]))+Drat[dur_tag]) else: ds_median = np.exp(np.log((np.exp(b1[dur_tag]+b2[dur_tag]* \ (magnitude-m_star[dur_tag]))/(10**(1.5*magnitude+ \ 16.05)))**(-1/3)/(4.9e6*beta[dur_tag])+soil* \ c1[dur_tag])+Drat[dur_tag]) # sigma ds_sigma = sigma[dur_tag] # return return np.log(ds_median), ds_sigma
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bommer_stafford_alarcon_ds_2009(magnitude=7.0, distance=10.0, vs30=760.0, ztor=0.0, duration_type='DS575H'):\n\n # duration type map\n dur_map = {'DS575H':0, 'DS595H': 1}\n dur_tag = dur_map.get(duration_type.upper(), None)\n if dur_tag is None:\n print(\"SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?\")\n return None, None, None, None\n \n # modeling coefficients\n c0 = [-5.6298, -2.2393]\n m1 = [1.2619, 0.9368]\n r1 = [2.0063, 1.5686]\n r2 = [-0.2520, -0.1953]\n h1 = [-2.3316, 2.5000]\n v1 = [-0.2900, -0.3478]\n z1 = [-0.0522, -0.0365]\n tauCoeff = [0.3527, 0.3252]\n phiCoeff = [0.4304, 0.3460]\n sigma_c = [0.1729, 0.1114]\n sigma_Tgm = [0.5289, 0.4616]\n\n # median\n ds_median = np.exp(c0[dur_tag]+m1[dur_tag]*magnitude+(r1[dur_tag]+ \\\n r2[dur_tag]*magnitude)*np.log(np.sqrt(distance**2+h1[dur_tag]**2))+ \\\n v1[dur_tag]*np.log(vs30)+z1[dur_tag]*ztor)\n # standard deviations\n ds_sigma = sigma_Tgm[dur_tag]\n ds_tau = tauCoeff[dur_tag]\n ds_phi = phiCoeff[dur_tag]\n\n # return\n return np.log(ds_median), ds_sigma, ds_tau, ds_phi", "def pm_precision(seed=425, th=150):\n \n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n xi, eta, veq, xi0, eta0, veq0 = pk['xi'], pk['eta'], pk['veq'], pk['xi0'], pk['eta0'], pk['veq0']\n \n # velocity differences\n dv = []\n for i in range(3):\n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n dv += [veq[i]-vexp]\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n #onstream_mask = ((g['phi1']<-27) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n #spur_mask = ((g['phi1']<-27) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n np.random.seed(seed+1)\n fgaia = np.sqrt(2/5)\n pmra_error = fgaia * g['pmra_error']*u.mas/u.yr\n pmdec_error = fgaia * g['pmdec_error']*u.mas/u.yr\n print(np.median(pmra_error), np.median(pmdec_error))\n\n phi1 = xi[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin]\n pmra = dv[0][idmin] + pmra_error * np.random.randn(Ngd1)\n pmdec = dv[1][idmin] + pmdec_error * np.random.randn(Ngd1)\n pmra_true = dv[0][idmin]\n pmdec_true = dv[1][idmin]\n \n # convolve HST uncertainties\n pmerr = np.array([0.0848, 0.0685])\n Nfield = 2\n #p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n p2 = np.array([np.array([-27, -27])+40, [1.167,0]])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n np.random.seed(seed+7)\n phi1 = xi[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin2]\n pmra2 = dv[0][idmin2].value + pmerr * np.random.randn(Nfield)\n pmdec2 = dv[1][idmin2].value + pmerr * np.random.randn(Nfield)\n pmra2_true = dv[0][idmin2].value\n pmdec2_true = dv[1][idmin2].value\n \n # velocity scaling\n dist = gd1_dist(coord.Angle(-32.77*u.deg)).to(u.kpc).value\n r_v = np.array([5,10,20,40,70])\n r_pm = r_v/(4.74*dist)\n \n # mass scaling\n verr = pmerr*4.74*dist*u.km/u.s\n vkick = (G*3e7*u.Msun/(50*u.pc*190*u.km/u.s*np.sin(90*u.deg))).to(u.km/u.s)\n dM = (3*verr*50*u.pc*190*u.km/u.s/G).to(u.Msun)\n dMM = 3*verr/vkick\n #print(vkick)\n #print('{:g} {:g}'.format(*dMM))\n \n \n cspur = mpl.cm.magma(0.6)\n cstream = mpl.cm.magma(0.2)\n colors = [cspur, cstream]\n alpha = [0.1, 0.07]\n lw = [2.5, 3.5]\n \n plt.close()\n fig, ax = plt.subplots(1,2,figsize=(10.6,5.3))\n \n for i in range(2):\n plt.sca(ax[i])\n plt.errorbar(pmra[onstream_mask].value, pmdec[onstream_mask].value, yerr=pmdec_error[onstream_mask].value, xerr=pmra_error[onstream_mask].value, fmt='none', color=cstream, alpha=alpha[i], lw=lw[i])\n plt.errorbar(pmra[spur_mask].value, pmdec[spur_mask].value, yerr=pmdec_error[spur_mask].value, xerr=pmra_error[spur_mask].value, fmt='none', color=cspur, alpha=alpha[i], lw=lw[i])\n \n for e in range(2):\n plt.plot(pmra2_true[e], pmdec2_true[e], 'x', color=colors[e], ms=14, mew=4)\n plt.errorbar(pmra2[e], pmdec2[e], yerr=pmerr[e], xerr=pmerr[e], fmt='o', color=colors[e], lw=2.5)\n \n # add absolute velocity contours\n for r in r_pm:\n c = mpl.patches.Circle((0,0), radius=r, fc='none', ec='k', lw=1.5, ls=':', alpha=0.5)\n plt.gca().add_patch(c)\n \n plt.xlabel('$\\Delta$ $\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]')\n plt.ylabel('$\\Delta$ $\\mu_\\delta$ [mas yr$^{-1}$]')\n \n plt.sca(ax[0])\n # legend entries\n plt.errorbar(20+pmra[onstream_mask].value, pmdec[onstream_mask].value, yerr=pmdec_error[onstream_mask].value, xerr=pmra_error[onstream_mask].value, fmt='none', color='k', alpha=0.3, lw=lw[0], label='Gaia DR4', zorder=0)\n plt.errorbar(20+pmra2, pmdec2, yerr=pmerr, xerr=pmerr, fmt='o', color='0.3', lw=2.5, label='HST', zorder=1)\n plt.plot(20+pmra2_true, pmdec2_true, 'x', color='0.3', ms=10, mew=4, label='GD-1 model', zorder=2)\n plt.plot(20+pmra2_true, pmdec2_true, 's', color=cstream, ms=11, alpha=0.8, mew=0, label='Stream', zorder=3)\n plt.plot(20+pmra2_true, pmdec2_true, 's', color=cspur, ms=11, alpha=0.8, mew=0, label='Spur', zorder=4)\n \n plt.legend(fontsize='small', loc=2, ncol=2, handlelength=0.9)\n \n # resort the legend\n handles, labels = plt.gca().get_legend_handles_labels()\n # sort both labels and handles by labels\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n plt.gca().legend(handles, labels, fontsize='small', loc=2, ncol=2, handlelength=0.9)\n\n # velocity labels\n plt.text(0.1, -0.75, '20 km s$^{-1}$', fontsize='small', ha='center', color='0.2')\n plt.text(0.37, -1.13, '40 km s$^{-1}$', fontsize='small', ha='left', color='0.2')\n plt.text(1, -1.66, '70 km s$^{-1}$', fontsize='small', ha='left', color='0.2')\n \n # zoom in guidelines\n r = mpl.patches.Rectangle((-0.5,-0.5), 1, 1, fc='none', ec='k')\n plt.gca().add_patch(r)\n ax[0].annotate('', xy=(0.5,-0.5), xytext=(0,0), xycoords=ax[0].transData, textcoords=ax[1].transAxes, arrowprops=dict(color='k', arrowstyle='-'))\n ax[0].annotate('', xy=(0.5,0.5), xytext=(0,1), xycoords=ax[0].transData, textcoords=ax[1].transAxes, arrowprops=dict(color='k', arrowstyle='-'))\n \n plt.xlim(-2,2)\n plt.ylim(-2,2)\n plt.gca().set_aspect('equal')\n \n plt.sca(ax[1])\n # velocity labels\n plt.text(0.1, -0.18, '5 km s$^{-1}$', fontsize='small', ha='right', color='0.2')\n plt.text(0.1, -0.31, '10 km s$^{-1}$', fontsize='small', ha='right', color='0.2')\n plt.text(0.1, -0.46, '20 km s$^{-1}$', fontsize='small', ha='right', color='0.2')\n \n plt.xlim(-0.5,0.5)\n plt.ylim(-0.5,0.5)\n plt.gca().set_aspect('equal')\n \n plt.tight_layout()\n plt.savefig('../plots/pm_precision.pdf')", "def test_single_ended_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections, st_var=1.0, ast_var=1.0, method=\"wls\", solver=\"sparse\"\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)\n\n pass", "def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')", "def main():\n\n #\n # Generate waveform\n #\n\n print 'generating waveoform...'\n waveform = pmns_utils.Waveform('shen_135135_lessvisc')\n\n # Pick some extrinsic parameters\n ext_params = ExtParams(distance=1, ra=0.0, dec=0.0, polarization=0.0,\n inclination=0.0, phase=0.0, geocent_peak_time=0.0+5.0)\n\n # Construct the time series for these params\n waveform.make_wf_timeseries(theta=ext_params.inclination,\n phi=ext_params.phase)\n\n #\n # Generate IFO data\n #\n det1_data = DetData(waveform=waveform, ext_params=ext_params)\n\n from scipy import signal\n import pylab as pl\n\n pl.figure()\n pl.plot(det1_data.td_response.sample_times,det1_data.td_response.data)\n pl.plot(det1_data.td_signal.sample_times,det1_data.td_signal.data)\n\n pl.figure()\n f,p = signal.welch(det1_data.td_response.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n\n f,p = signal.welch(det1_data.td_signal.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n pl.ylim(1e-25,1e-21)\n pl.show()", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def vlook(model):\n plt.close()\n plt.plot(model.rec_t, np.array(model.data[\"soma\"][0]))\n plt.xlabel(\"Time (ms)\")\n plt.ylabel(\"Volgate (mV)\")\n plt.show()\n return len(model.rec_t)", "def estimate_bpm(D):\n if len(D) < 2*ignore:\n return 0\n else:\n return 1/np.mean(np.diff(D))*60", "def analyze(video, write_output=True, label=False, rate=False, model=None):\r\n # Initiate an empty list of tracked waves, ultimately recognized\r\n # waves, and a log of all tracked waves in each frame.\r\n tracked_waves = []\r\n recognized_waves = []\r\n wave_log = []\r\n ratings = []\r\n\r\n # Initialize frame counters.\r\n frame_num = 1\r\n num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\r\n fps = int(video.get(cv2.CAP_PROP_FPS))\r\n\r\n # If an output video is to be made:\r\n if write_output is True:\r\n out = mwt_io.create_video_writer(video)\r\n\r\n # Initiate a timer for program performance:\r\n time_start = time.time()\r\n\r\n\r\n # The main loop is here:\r\n while True:\r\n\r\n # Write status update to stdio.\r\n status_update(frame_num, num_frames)\r\n\r\n # Read frames until end of clip.\r\n successful_read, original_frame = video.read()\r\n if not successful_read:\r\n break\r\n\r\n # Preprocess frames.\r\n analysis_frame = mwt_preprocessing.preprocess(original_frame)\r\n \r\n # Detect all sections.\r\n sections = mwt_detection.detect_sections(analysis_frame,\r\n frame_num,\r\n original_frame)\r\n\r\n # Track all waves in tracked_waves.\r\n mwt_tracking.track(tracked_waves,\r\n analysis_frame,\r\n frame_num,\r\n num_frames,\r\n original_frame)\r\n \r\n\r\n # Write tracked wave stats to wave_log.\r\n for wave in tracked_waves:\r\n wave_log.append((frame_num, wave.name, wave.mass, wave.max_mass,\r\n wave.displacement, wave.max_displacement,\r\n wave.birth, wave.death, wave.recognized,\r\n wave.centroid))\r\n\r\n # Remove dead waves from tracked_waves.\r\n dead_recognized_waves = [wave for wave in tracked_waves \r\n if wave.death is not None\r\n and wave.recognized is True]\r\n recognized_waves.extend(dead_recognized_waves)\r\n\r\n # Label the dead waves, if label flag was specified\r\n if label:\r\n mwt_label.label(dead_recognized_waves, fps, dead=True)\r\n\r\n # Rate the dead waves, if rate flag was specified\r\n if rate:\r\n mwt_rate.rate(ratings, dead_recognized_waves, model)\r\n\r\n tracked_waves = [wave for wave in tracked_waves if wave.death is None]\r\n\r\n # Remove duplicate waves, keeping earliest wave.\r\n tracked_waves.sort(key=lambda x: x.birth, reverse=True)\r\n for wave in tracked_waves:\r\n other_waves = [wav for wav in tracked_waves if not wav == wave]\r\n if mwt_tracking.will_be_merged(wave, other_waves):\r\n wave.death = frame_num\r\n tracked_waves = [wave for wave in tracked_waves if wave.death is None]\r\n tracked_waves.sort(key=lambda x: x.birth, reverse=False)\r\n\r\n # Check sections for any new potential waves and add to\r\n # tracked_waves.\r\n for section in sections:\r\n if not mwt_tracking.will_be_merged(section, tracked_waves):\r\n tracked_waves.append(section)\r\n\r\n # Label all current waves if label flag was specified\r\n if label:\r\n mwt_label.label(tracked_waves, fps)\r\n\r\n # Rate all current waves if rate flag was specified\r\n if rate:\r\n mwt_rate.rate(ratings, tracked_waves, model)\r\n\r\n # analysis_frame = cv2.cvtColor(analysis_frame, cv2.COLOR_GRAY2RGB)\r\n\r\n if write_output is True:\r\n # Draw detection boxes on original frame for visualization.\r\n original_frame = mwt_io.draw(\r\n tracked_waves,\r\n original_frame,\r\n #1)\r\n 1/mwt_preprocessing.RESIZE_FACTOR)\r\n\r\n # Write frame to output video.\r\n # out.write(original_frame)\r\n #out.write(analysis_frame)\r\n\r\n # Increment the frame count.\r\n frame_num += 1\r\n\r\n # Stop timer here and calc performance.\r\n time_elapsed = (time.time() - time_start)\r\n performance = (num_frames / time_elapsed)\r\n\r\n if rate:\r\n final_rating = mwt_rate.get_final_rating(ratings)\r\n print (\"Final rating for this video: {}\".format(final_rating))\r\n\r\n # Provide update to user here.\r\n if recognized_waves is not None:\r\n print (\"{} wave(s) recognized.\".format(len(recognized_waves)))\r\n print (\"Program performance: %0.1f frames per second.\" %performance)\r\n for i, wave in enumerate(recognized_waves):\r\n print (\"Wave #{}: ID: {}, Birth: {}, Death: {},\" \\\r\n + \" Max Displacement: {}, Max Mass: {}\".format(\r\n i+1, wave.name, wave.birth, wave.death,\r\n wave.max_displacement, wave.max_mass))\r\n else:\r\n print (\"No waves recognized.\")\r\n\r\n # Clean-up resources.\r\n if write_output is True:\r\n out.release()\r\n\r\n return recognized_waves, wave_log, performance", "def convertToSpectroGram(self):", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def _compile_model(self):\n\n\t\t\t### GC90 atmospheric model implementation\n\t\t\n\t\t\ttheta_sun, beta, alpha, am, rh, pressure = T.scalars('theta_sun', 'beta', 'alpha', 'am', 'rh', 'pressure')\n\n\t\t\twl = T.vector('wl')\n\t\t\n\t\t\twl_a = 550\n\n\t\t\ttheta_sun_ = theta_sun * np.pi / 180.\n\n\t\t\tz3 = -0.1417 * alpha + 0.82\n\t\t\tz2 = ifelse(T.gt(alpha, 1.2), 0.65, z3)\n\t\t\tz1 = ifelse(T.lt(alpha, 0), 0.82, z2)\n\t\t\n\t\t\ttheta_sun_mean = z1\n\n\t\t\tB3 = T.log(1 - theta_sun_mean)\n\t\t\tB2 = B3 * (0.0783 + B3 * (-0.3824 - 0.5874 * B3))\n\t\t\tB1 = B3 * (1.459 + B3 * (0.1595 + 0.4129 * B3))\n\t\t\tFa = 1 - 0.5 * T.exp((B1 + B2 * T.cos(theta_sun_)) * T.cos(theta_sun_))\n\n\t\t\tomega_a = (-0.0032 * am + 0.972) * T.exp(3.06 * 1e-4 * rh)\n\t\t\ttau_a = beta*(wl/wl_a)**(-alpha)\n\n\t\t\t# fixed a bug in M, thanks Jaime! [brackets added]\n\t\t\tM = 1 / (T.cos(theta_sun_) + 0.50572 * (90 + 6.07995 - theta_sun)**(-1.6364)) \n\t\t\tM_ = M * pressure / 1013.25\n\n\t\t\tTr = T.exp(- M_ / (115.6406 * (wl / 1000)**4 - 1.335 * (wl / 1000)**2)) \n\n\t\t\tTas = T.exp(- omega_a * tau_a * M)\n\n\t\t\tEdd = Tr * Tas \n\t\t\tEdsr = 0.5 * (1 - Tr**0.95)\n\t\t\tEdsa = Tr**1.5 * (1 - Tas) * Fa\n\t\t\n\t\t\tEd = Edd + Edsr + Edsa\n\t\t\tEdd_Ed = Edd / Ed\n\t\t\tEdsr_Ed = Edsr / Ed\n\t\t\tEdsa_Ed = Edsa / Ed\n\t\t\tEds_Ed = Edsr_Ed + Edsa_Ed\n\n\t\t\t### Albert and Mobley bio-optical model implementation\n\n\t\t\ta_w, daw_dT, astar_ph, astar_y, Ls_Ed = T.vectors('a_w', 'daw_dT', 'astar_ph', 'astar_y', 'Ls_Ed') \n\t\t\n\t\t\tC_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_view, n_w, rho_s, rho_dd, rho_ds, delta= T.scalars('C_chl', 'C_sm', 'C_mie', 'n_mie', 'C_y', 'S_y', 'T_w', 'theta_view', 'n_w', 'rho_s', 'rho_dd', 'rho_ds', 'delta')\n\n\t\t\t# calc_a_ph\n\t\t\ta_ph = C_chl * astar_ph\n\n\t\t\t# calc_a_y\n\t\t\twl_ref_y = 440\n\t\t\ta_y = ifelse(T.eq(S_y, -1), C_y * astar_y, C_y * T.exp(- S_y * (wl - wl_ref_y)))\n\n\t\t\t# calc_a\n\t\t\tT_w_ref = 20.\n\t\t\ta_w_corr = a_w + (T_w - T_w_ref) * daw_dT\n\t\t\n\t\t\ta = a_w_corr + a_ph + a_y\n\n\t\t\t# calc_bb_sm\n\t\t\tbbstar_sm = 0.0086\n\t\t\tbbstar_mie = 0.0042\n\t\t\twl_ref_mie = 500\n\t\t\n\t\t\tbb_sm = C_sm * bbstar_sm + C_mie * bbstar_mie * (wl / wl_ref_mie)**n_mie\n\n\t\t\t# calc_bb\n\t\t\tb1 = ifelse(T.eq(n_w, 1.34), 0.00144, 0.00111)\n\t\t\n\t\t\twl_ref_water = 500\n\t\t\tS_water = -4.32\n\n\t\t\tbb_water = b1 * (wl / wl_ref_water)**S_water\n\t\t\tbb = bb_water + bb_sm\n\n\t\t\t# calc omega_b\n\t\t\tomega_b = bb / (bb + a)\n\n\t\t\t# calc sun and viewing zenith angles under water\n\t\t\ttheta_sun_ = theta_sun * np.pi / 180.\n\t\t\ttheta_sun_ss = T.arcsin(T.sin(theta_sun_) / n_w)\n\t\t\ttheta_view_ = theta_view * np.pi / 180.\n\t\t\ttheta_view_ss = T.arcsin(T.sin(theta_view_) / n_w)\n\n\t\t\tp_f = [0.1034, 1, 3.3586, -6.5358, 4.6638, 2.4121]\n\t\t\tp_frs = [0.0512, 1, 4.6659, -7.8387, 5.4571, 0.1098, 0.4021]\n\n\t\t\t# calc subsurface reflectance \n\t\t\tf = p_f[0] * (p_f[1] + p_f[2] * omega_b + p_f[3] * omega_b**2 + p_f[4] * omega_b**3) * (1 + p_f[5] / T.cos(theta_sun_ss)) \n\n\t\t\tR0minus = f * omega_b\n\n\t\t\t# calc subsurface remote sensing reflectance \n\t\t\tfrs = p_frs[0] * (p_frs[1] + p_frs[2] * omega_b + p_frs[3] * omega_b**2 + p_frs[4] * omega_b**3) * (1 + p_frs[5] / T.cos(theta_sun_ss)) * (1 + p_frs[6] / T.cos(theta_view_ss))\n\n\t\t\tRrs0minus = frs * omega_b\n\n\t\t\t# calc water surface reflected reflectance \n\t\t\tRrs_refl = rho_s * Ls_Ed + rho_dd * Edd_Ed / np.pi + rho_ds * Eds_Ed / np.pi + delta\n\n\t\t\t# calc_Rrs0plus (Lee1998, eq22), R=Q*Rrs\n\t\t\tgamma = 0.48\n\t\t\tzeta = 0.518\n\n\t\t\tRrs = zeta * Rrs0minus / ( 1 - gamma * R0minus )\n\t\t\n\t\t\tLu_Ed = Rrs + Rrs_refl\n\t\t\n\t\t\tf = th.function([beta, alpha, am, rh, pressure, C_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_sun, theta_view, n_w, rho_s, rho_dd, rho_ds, delta, wl, a_w, daw_dT, astar_ph, astar_y, Ls_Ed], [Rrs, Rrs_refl, Lu_Ed], on_unused_input='warn')\n\n\t\t\treturn f", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def get_more_info(model):# pragma: no cover\n\n group_time = model.meta.exposure.group_time\n nframes_used = model.meta.exposure.nframes\n saturated_flag = dqflags.group['SATURATED']\n jump_flag = dqflags.group['JUMP_DET']\n\n return (group_time, nframes_used, saturated_flag, jump_flag)", "def AlljointRuns():\n #800 nm\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #0.31, 0.3\n forwardModelJointFit(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'),\n out='J800nm5k', wavelength='800nm') #0.28 0.31\n forwardModelJointFit(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'),\n out='J800nm10k', wavelength='800nm') #0.27 0.29\n forwardModelJointFit(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'),\n out='J800nm20k', wavelength='800nm') #0.27 0.28\n forwardModelJointFit(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'),\n out='J800nm30k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'),\n out='J800nm38k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'),\n out='J800nm50k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'),\n out='J800nm54k', wavelength='800nm')\n #700 nm\n forwardModelJointFit(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'),\n out='J700nm5k', wavelength='700nm') # 0.28 0.32\n forwardModelJointFit(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'),\n out='J700nm9k', wavelength='700nm') # 0.27 0.32\n forwardModelJointFit(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'),\n out='J700nm52k', wavelength='700nm') # 0.26 0.31\n forwardModelJointFit(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'),\n out='J700nm32k', wavelength='700nm')\n #600 nm\n forwardModelJointFit(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'),\n out='J600nm5k', wavelength='600nm') #0.27 0.31\n forwardModelJointFit(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'),\n out='J600nm54k', wavelength='600nm') #0.299, 0.333\n forwardModelJointFit(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'),\n out='J600nm10k', wavelength='600nm') #0.28 0.32\n #890 nm\n forwardModelJointFit(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'),\n out='J890nm5k', wavelength='890nm') #0.28 0.35\n forwardModelJointFit(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'),\n out='J890nm10k', wavelength='890nm') #0.28 0.33\n forwardModelJointFit(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'),\n out='J890nm30k', wavelength='890nm') #0.3 0.33\n forwardModelJointFit(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'),\n out='J890nm50k', wavelength='890nm') #0.3 0.3", "def vlass_stars(duration, n_beams):\n n_pointings = duration//4.2\n n_observed = n_pointings*n_beams\n return n_observed", "def frames(velocity=286.7, datestring=\"2005/11/01/00:00:00\",\n ra=\"05:35:28.105\", dec=\"-069.16.10.99\", equinox=\"J2000\", \n observatory=\"ALMA\", prec=4, verbose=True, myme='', myqa='',\n restFreq=345.79599, veltype='optical'):\n localme = False\n localqa = False\n if (myme == ''):\n myme = createCasaTool(metool)\n localme = True\n if (myqa == ''):\n myqa = createCasaTool(qatool)\n localqa = True\n if (dec.find(':') >= 0):\n dec = dec.replace(':','.')\n position = myme.direction(equinox, ra, dec)\n obstime = myme.epoch('TAI', datestring)\n\n if (veltype.lower().find('opt') == 0):\n velOpt = create_casa_quantity(myqa,velocity,\"km/s\")\n dopp = myme.doppler(\"OPTICAL\",velOpt)\n # CASA doesn't do Helio, but difference to Bary is hopefully small\n rvelOpt = myme.toradialvelocity(\"BARY\",dopp)\n elif (veltype.lower().find('rad') == 0):\n rvelOpt = myme.radialvelocity('LSRK',str(velocity)+'km/s')\n else:\n print \"veltype must be 'rad'io or 'opt'ical\"\n return\n\n myme.doframe(position)\n myme.doframe(myme.observatory(observatory))\n myme.doframe(obstime)\n myme.showframe()\n\n rvelRad = myme.measure(rvelOpt,'LSRK')\n doppRad = myme.todoppler(\"RADIO\",rvelRad) \n restFreq = parseFrequencyArgumentToGHz(restFreq)\n freqRad = myme.tofrequency('LSRK',doppRad,me.frequency('rest',str(restFreq)+'GHz'))\n\n lsrk = qa.tos(rvelRad['m0'],prec=prec)\n rvelTop = myme.measure(rvelOpt,'TOPO')\n doppTop = myme.todoppler(\"RADIO\",rvelTop) \n freqTop = myme.tofrequency('TOPO',doppTop,me.frequency('rest',str(restFreq)+'GHz'))\n if (localme):\n myme.done()\n if (localqa):\n myqa.done()\n topo = qa.tos(rvelTop['m0'],prec=prec)\n velocityDifference = 0.001*(rvelRad['m0']['value']-rvelTop['m0']['value'])\n frequencyDifference = freqRad['m0']['value'] - freqTop['m0']['value']\n return(freqTop['m0']['value'], velocityDifference, frequencyDifference)", "def main(graph=None, model=None):\n\n mjd = [57433.4816, 57436.4815, 57439.4817, 57451.4604, 57454.4397, 57459.3963, 57462.418, 57465.4385, 57468.3768,\n 57473.3606, 57487.3364, 57490.3341, 57493.3154, 57496.3352, 57505.3144, 57513.2542, 57532.2717, 57536.2531,\n 57543.2545, 57546.2703, 57551.2115, 57555.2669, 57558.2769, 57561.1899, 57573.2133, 57433.5019, 57436.4609,\n 57439.4587, 57444.4357, 57459.4189, 57468.3142, 57476.355, 57479.3568, 57487.3586, 57490.3562, 57493.3352,\n 57496.2949, 57505.3557, 57509.2932, 57513.2934, 57518.2735, 57521.2739, 57536.2321, 57539.2115, 57543.2301,\n 57551.1701, 57555.2107, 57558.191, 57573.1923, 57576.1749, 57586.1854]\n flux = [2.0357230e+00, -2.0382695e+00, 1.0084588e+02, 5.5482742e+01, 1.4867026e+01, -6.5136810e+01, 1.6740545e+01,\n -5.7269131e+01, 1.0649184e+02, 1.5505235e+02, 3.2445984e+02, 2.8735449e+02, 2.0898877e+02, 2.8958893e+02,\n 1.9793906e+02, -1.3370536e+01, -3.9001358e+01, 7.4040916e+01, -1.7343750e+00, 2.7844931e+01, 6.0861992e+01,\n 4.2057487e+01, 7.1565346e+01, -2.6085690e-01, -6.8435440e+01, 17.573107, 41.445435, -110.72664, 111.328964,\n -63.48336, 352.44907, 199.59058, 429.83075, 338.5255, 409.94604, 389.71262, 195.63905, 267.13318, 123.92461,\n 200.3431, 106.994514, 142.96387, 56.491238, 55.17521, 97.556946, -29.263103, 142.57687, -20.85057,\n -0.67210346, 63.353024, -40.02601]\n fluxerr = [42.784702, 43.83665, 99.98704, 45.26248, 43.040398, 44.00679, 41.856007, 49.354336, 105.86439, 114.0044,\n 45.697918, 44.15781, 60.574158, 93.08788, 66.04482, 44.26264, 91.525085, 42.768955, 43.228336, 44.178196,\n 62.15593, 109.270035, 174.49638, 72.6023, 48.021034, 44.86118, 48.659588, 100.97703, 148.94061, 44.98218,\n 139.11194, 71.4585, 47.766987, 45.77923, 45.610615, 60.50458, 105.11658, 71.41217, 43.945534, 45.154167,\n 43.84058, 52.93122, 44.722775, 44.250145, 43.95989, 68.101326, 127.122025, 124.1893, 49.952255, 54.50728,\n 114.91599]\n passband = ['g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g',\n 'g', 'g', 'g', 'g', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r',\n 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r']\n photflag = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4096, 4096, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4096,\n 6144, 4096, 4096, 4096, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n objid = 'MSIP_01_NONIa-0001_10400862'\n ra = 3.75464531293933\n dec = 0.205076187109334\n redshift = 0.233557\n mwebv = 0.0228761\n\n light_curve_list = [(mjd, flux, fluxerr, passband, photflag, ra, dec, objid, redshift, mwebv)]\n\n classification = Classify(known_redshift=True)\n predictions, time_steps = classification.get_predictions(light_curve_list, return_predictions_at_obstime=False)\n print(predictions)\n\n # classification.plot_light_curves_and_classifications(step=False)\n # classification.plot_classification_animation()\n # classification.plot_classification_animation_step()\n\n predictions, time_steps = classification.get_predictions(light_curve_list, return_predictions_at_obstime=True)\n\n if predictions is not None:\n import matplotlib.pyplot as plt\n for i, class_name in enumerate(classification.class_names):\n plt.plot(time_steps[0], predictions[0][:, i], label=class_name)\n plt.legend()\n plt.show()", "def muontrg_efficiencies():\r\n# to be updated with new numbers:\r\n ejpsi_trg = Jpsi_trg\r\n ejpsi_trg.add_relative_error(0.03) # TISTOS Justine 140711\r\n ebsmm_trg.add_relative_error(0.02) # Extra lines Justine 140711\r\n ebsmm_trg = BmmE_trg\r\n ebsmm_trg.add_relative_error(0.03) # TISTOS Justine 140711\r\n ebsmm_trg.add_relative_error(0.025) # Extra lines Justine 120711\r\n return ejpsi_trg,ebsmm_trg", "def printLatestMeasurement(self): \n data = self.tristimulus[len(self.tristimulus)-1]\n x = data[0]\n y = data[1]\n L = data[2]\n print\"(x,y) = ({0:.4f}, {1:.4f}), L = {2:.4f} cd/m2 ({3:.4f} fL)\".format( x, y, L, 0.291863508*L)", "def calculateStatisticalSignificance():\n ##tau HCS pearson\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y = np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[1], s2=stds[1], n2=17280)\n print(\"stats for HCS pearson, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[2], s2=stds[2], n2=17280)\n print(\"stats for HCS pearson, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n \n ##tau HCS MSE\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[1], s1=stds[1], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[2], s1=stds[2], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated pearon\n ##this one is a bit more involved because we have individual means and STDs over a 3-fold cross-val\n ##we have the following for the ablated ML model (sample size, avg pearson, std), one for each fold:\n # (108330 0.7498484453029202 0.12794946936625312)\n # (108330 0.7507672277328549 0.12978897185198424) \n # (108330 0.7512250395547646 0.12858723725044444)\n ##combining to one sample we have mean = .7506, std=.1288\n ##and the following for the Null Model\n #(108330 0.3951239419846807 0.13861514301358197)\n #(108330 0.39522112186984787 0.1387019314192389)\n #(108330 0.3956142180066648 0.13832544923711507)\n ##combining this into one sample, we have: mean = 0.3953, std = .1385\n z, p = calculateZScoreAndPValue(m1=.7506, s1=.1288, n1=108330*3, m2=.3953, s2=.1385, n2=108330*3)\n print(\"stats for osteosarcoma ablated pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated MSE\n ##ML model performance:\n # (108330 0.5003031 0.25589895)\n # (108330 0.4984656 0.25957793)\n # (108330 0.49754992 0.2571745)\n ##combining to one sample we have mean = 0.4988 , std= .2576\n ##Null Model performance:\n # (108330 1.209752 0.2772303)\n # (108330 1.2095579 0.27740386)\n # (108330 1.2087716 0.27665088)\n ##combining to one sample we have mean = 1.2094 , std= 0.2771\n z, p = calculateZScoreAndPValue(m1=1.2094, s1=.2771, n1=108330*3, m2=.4988, s2=.2576, n2=108330*3)\n print(\"stats for osteosarcoma ablated MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw pearson \n ##ML model performance:\n #(108330 0.8487535502148598, 0.0750789260880985)\n #(108330 0.8482422038817274, 0.0749674444367002)\n # (108330 0.8500693686258434, 0.07491226209365953)\n ##combining to one sample we have mean = .849 , std= 0.075\n ##Null model performance:\n #(108330 0.44372635525546694, 0.11585072713296693)\n #(108330 0.4440357996615424, 0.11573081667714848)\n # (108330 0.4443288449364213, 0.11528081384708891)\n ##combining to one sample we have mean = 0.444 , std= 0.1156\n z, p = calculateZScoreAndPValue(m1=.849, s1=0.075, n1=108330*3, m2=0.444, s2=0.1156, n2=108330*3)\n print(\"stats for osteosarcoma raw pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw MSE\n ##ML model performance:\n #(108330 0.3024929, 0.15015785)\n #(108330 0.3035156, 0.1499349)\n # (108330 0.29986125, 0.14982451)\n ##combining to one sample we have mean = 0.302 , std= 0.15\n ##Null model performance\n # (108330 1.1125473, 0.23170146)\n # (108330 1.1119285, 0.23146166)\n # (108330 1.1113423, 0.23056163)\n ##combining to one sample we have mean = 1.1119 , std= 0.2312\n z, p = calculateZScoreAndPValue(m1=1.1119, s1=0.2312, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for osteosarcoma raw MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated pearson\n z, p = calculateZScoreAndPValue(m1=0.849, s1=0.075, n1=108330*3, m2=0.7506, s2=0.1288, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated pearson: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated MSE\n z, p = calculateZScoreAndPValue(m1=.4988, s1=.2576, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated MSE: z: {}, p: {}\".format(z, p))", "def dY_dt(self, y, t=0):\n\t\t \n\t\t#variables\n\t\tpSgg = y[0] / float(sum(y))\n\t\tpSgh = y[3] / float(sum(y))\n\t\tpSh = y[3] / float(y[3] + y[4] + y[5])\n\t\t\n\t\t#exit flows\n\t\texit_Sg = y[0] * (1 / time_active) * t \n\t\texit_Pg = y[1] * (1 / time_active) * t\n\t\texit_PPg = y[2] * (1 / time_active) * t\n\t\texit_Sh = y[3] * (1 / time_active) * t\n\t\texit_Ph = y[4] * (1 / time_active) * t\n\t\texit_PPh = y[5] * (1 / time_active) * t\n\t\t#episodic flows\n\t\tSg_to_h = y[0] * (1 / tin_g) * t\n\t\tPg_to_h = y[1] * (1 / tin_g) * t\n\t\tPPg_to_h = y[2] * (1 / tin_g) * t\n\t\tSh_to_g = y[3] * (1 / tin_h) * t\n\t\tPh_to_g = y[4] * (1 / tin_h) * t\n\t\tPPh_to_g = y[5] * (1 / tin_h) * t\n\t\t#entry flows\n\t\tinto_g = new_g * t\n\t\tinto_h = new_h * t\n\t\t#infection flows\n\t\tnewinf_gg = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgg * t\n\t\tnewinf_gh = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgh * t\n\t\tnewinf_h = (y[4] * B1 + y[5] * B2) * Ch * pSh * t\n\t\t#stage progression flows\n\t\tPg_to_PPg = y[1] * D1 * t\n\t\tPPg_to_d = y[2] * D2 * t\n\t\tPh_to_PPh = y[4] * D1 * t\n\t\tPPh_to_d = y[5] * D2 * t\n\t\t\t\n\t\tstate = [- exit_Sg - newinf_gg - Sg_to_h + into_g + Sh_to_g,\n\t\t\t\t - exit_Pg - Pg_to_PPg - Pg_to_h + newinf_gg + Ph_to_g,\n\t\t\t\t - exit_PPg - PPg_to_d - PPg_to_h + Pg_to_PPg + PPh_to_g,\n\t\t\t\t - exit_Sh - newinf_gh - newinf_h - Sh_to_g + into_h + Sg_to_h,\n\t\t\t\t - exit_Ph - Ph_to_PPh - Ph_to_g + newinf_gh + newinf_h + Pg_to_h,\n\t\t\t\t - exit_PPh - PPh_to_d - PPh_to_g + Ph_to_PPh + PPg_to_h]\n\t\n\t\treturn state", "def test_double_ended_two_matching_sections_and_two_asym_atts():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 5\n time = np.arange(nt)\n nx_per_sec = 4\n nx = nx_per_sec * 9\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = 4.0 + np.cos(time) * 4\n ts_warm = 20.0 + -np.sin(time) * 4\n ts_ground = np.linspace(1, 9, num=nx_per_sec)\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.0\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n talph_fw = 0.95\n talph_bw = 0.85\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[:nx_per_sec] += ts_cold[None]\n temp_real_kelvin[nx_per_sec : 2 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[2 * nx_per_sec : 3 * nx_per_sec] += ts_ground[:, None]\n temp_real_kelvin[3 * nx_per_sec : 4 * nx_per_sec] += ts_ground[::-1, None]\n temp_real_kelvin[5 * nx_per_sec : 6 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[6 * nx_per_sec : 7 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[7 * nx_per_sec : 8 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[8 * nx_per_sec : 9 * nx_per_sec] += ts_cold[None]\n\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = (\n eta_pf[None]\n * C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n st[3 * nx_per_sec :] *= talph_fw\n st[6 * nx_per_sec :] *= talph_fw\n ast = (\n eta_mf[None]\n * C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst = (\n eta_pb[None]\n * C_p\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_p * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst[: 3 * nx_per_sec] *= talph_bw\n rst[: 6 * nx_per_sec] *= talph_bw\n rast = (\n eta_mb[None]\n * C_m\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_m * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n\n ds = DataStore(\n {\n \"TMPR\": ([\"x\", \"time\"], temp_real_celsius),\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, x[nx_per_sec - 1])],\n \"warm\": [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])],\n }\n ms = [\n (\n slice(x[2 * nx_per_sec], x[3 * nx_per_sec - 1]),\n slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),\n True,\n ),\n (\n slice(x[5 * nx_per_sec], x[6 * nx_per_sec - 1]),\n slice(x[6 * nx_per_sec], x[7 * nx_per_sec - 1]),\n False,\n ),\n ]\n\n ds.calibration_double_ended(\n sections=sections,\n st_var=0.5,\n ast_var=0.5,\n rst_var=0.1,\n rast_var=0.1,\n method=\"wls\",\n solver=\"sparse\",\n trans_att=[x[3 * nx_per_sec], x[6 * nx_per_sec]],\n matching_sections=ms,\n )\n\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)\n pass", "def computeTm(self):\n #first step is finding the derivative series of the well\n x = self.temperatures\n if self.fluorescence == None:\n self.Tm = None\n return\n y = self.fluorescence\n \n xdiff = np.diff(x)\n dydx = -np.diff(y)/xdiff\n #the derivative series, has one less index since there is one fewer differences than points\n seriesDeriv = pandas.Series(dydx, x[:-1])\n \n #now that we have the derivative series, we can find the Tm\n lowestPoint = 0\n lowestPointIndex = None\n \n #gets number of signchanges between max and min of the curve, used to determin if the curve\n #is complex or not\n lowestPoint2 = 1\n lowestIndex2 = None\n highestPoint = 0\n highestIndex = None\n previous = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n if highestIndex == 0 :\n highestPoint = 0\n highestIndex = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n for i, value in enumerate(self.fluorescence[:-1]):\n if i < lowestIndex2:\n continue\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n else:\n for i, value in enumerate(self.fluorescence[:-1]):\n if i > highestIndex:\n break\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n signChange = False\n for ind in seriesDeriv.index[lowestIndex2+1:highestIndex]:\n \n if previous:\n if seriesDeriv[ind] + SIGN_CHANGE_THRESH < 0 and previous - SIGN_CHANGE_THRESH > 0:\n signChange = True\n if seriesDeriv[ind] - SIGN_CHANGE_THRESH > 0 and previous + SIGN_CHANGE_THRESH < 0:\n signChange = True\n # if seriesDeriv[ind] == 0:\n # signChangeCount += 1\n previous = seriesDeriv[ind]\n\n \n #finding the lowest point and its index on the derivative series\n #only search for Tm up to 90degrees, since last part is hard to predict\n #and often gives false positives\n ignoreNum = int(len(seriesDeriv.index)*0.125)\n for ind in seriesDeriv.index[:-ignoreNum]:\n if seriesDeriv[ind]<lowestPoint:\n lowestPoint = seriesDeriv[ind]\n lowestPointIndex = ind\n \n #TODO working, tms not steep enough added to complex\n #if the slope is not steep enough, tm remains saved, but curve is grouped with the\n #complex curves (now known as the unreliable group)\n #if lowestPoint > -0.000001 / (normalisationFactor / saturation max point of all curves thing):\n # print self.name, 'lowestpoint too small', lowestPoint\n # self.complex = True\n\n #if lowest point is the first index, then no curve fit is required\n if lowestPointIndex == seriesDeriv.index[0]:\n tm = lowestPointIndex\n self.Tm = tm\n \n #set complex to true if curve was complex\n if signChange:\n self.complex = True\n return\n \n #could not find any Tm\n if lowestPointIndex == None:\n self.Tm = None\n \n #if no tm, the curve hopefully be picked up as a monotonic/in the noise/saturated/outlier\n #however, if this does not happen, the curve remains as complex\n self.complex = True\n return \n \n #the indices in the series either side of the lowest index\n #note the first list is indexed e.g. list[i] where i is the section using .index\n leftIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)-1]\n rightIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)+1]\n \n \n #matrices used to fit a parabola to the 3 points\n Y=[seriesDeriv[leftIndex],\n seriesDeriv[lowestPointIndex],\n seriesDeriv[rightIndex]]\n \n A=[[leftIndex**2, leftIndex, 1],\n [lowestPointIndex**2, lowestPointIndex, 1],\n [rightIndex**2, rightIndex, 1]]\n \n #solve for b, in the form Y=Ab\n (a,b,c) = np.linalg.solve(A,Y)\n \n #initialise tm to left most point of relevant curve\n tm=seriesDeriv[leftIndex]\n tmValue=0\n #make tm the lowest point on the fitted parabola rounded to nearest 0.01\n for x in np.arange(leftIndex,rightIndex,0.01):\n point = (a*(x**2) + b*x + c)\n if tmValue > point:\n tmValue = point\n tm = x\n self.Tm = tm\n \n #again check for complex shape before returning\n if signChange:\n self.complex = True\n\n\n averagePoint = (lowestPoint2 +highestPoint) / 2\n i = lowestIndex2\n while self.fluorescence[i]<averagePoint:\n i += 1;\n\n # estimates tm by another method and if the difference is too large the curve is considred complex\n if (self.temperatures[i] -self.Tm)**2 > 5**2:\n self.complex=True\n return", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)", "def runGood():\n forwardModelJointFit(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'),\n out='J600nm54k', wavelength='600nm') #kernel around 0.3, 0.33\n forwardModelJointFit(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'),\n out='J700nm52k', wavelength='700nm') #around 0.3, 0.31\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[2],], out='G800nm',\n wavelength='l800') #around 0.305/315 and 0.295/0.3\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #around 0.3, 0.3\n forwardModelJointFit(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'),\n out='J890nm50k', wavelength='890nm') #around 0.285, 0.29", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def test_PerfectModelEnsemble_smooth_carries_lead_attrs(\r\n perfectModelEnsemble_initialized_control_1d_ym_cftime,\r\n):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smooth = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n assert (\r\n pm_smooth.verify(metric=\"rmse\", comparison=\"m2e\", dim=\"init\").lead.attrs[\r\n \"units\"\r\n ]\r\n == \"years\"\r\n )", "def analyze(self,event):\n print \"\\n%s event %s %s\"%('-'*10,event.event,'-'*68)\n self.nevents += 1\n leptonic = False\n particles = Collection(event,'GenPart')\n #particles = Collection(event,'LHEPart')\n seeds = [ ] # seeds for decay chain\n chain = { } # decay chain\n print \" \\033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \\033[0m\"%(\n \"index\",\"pdgId\",\"moth\",\"mothid\",\"dR\",\"pt\",\"eta\",\"status\",\"prompt\",\"last copy\")\n for i, particle in enumerate(particles):\n mothidx = particle.genPartIdxMother\n if 0<=mothidx<len(particles):\n moth = particles[mothidx]\n mothpid = moth.pdgId\n mothdR = min(9999,particle.DeltaR(moth)) #particle.p4().DeltaR(moth.p4())\n else:\n mothpid = -1\n mothdR = -1\n eta = max(-9999,min(9999,particle.eta))\n prompt = hasbit(particle.statusFlags,0)\n lastcopy = hasbit(particle.statusFlags,13)\n print \" %7d %8d %8d %8d %8.2f %8.2f %8.2f %8d %9s %10s\"%(\n i,particle.pdgId,mothidx,mothpid,mothdR,particle.pt,eta,particle.status,prompt,lastcopy)\n if abs(particle.pdgId) in [11,13,15]:\n leptonic = True\n if mothidx in chain: # add to decay chain\n chain[mothidx].append(i)\n chain[i] = [ ] # daughters\n elif abs(particle.pdgId) in self.seedpids: # save as decay chain seed\n seeds.append(i)\n chain[i] = [ ] # daughters\n if leptonic:\n self.nleptons += 1\n print parsechain(particles,seeds,chain) # print decay chain", "def analyze(self, event):\n ##### set variables ####\n self.nElectrons = 0\n self.nMuons = 0\n self.nTaus = 0\n self.nFatJets = 0\n self.EventWeight = 1.\n self.TopWeight = 1.\n self.BTagAK8Weight = 1.\n self.BTagAK4Weight = 1.\n self.BTagAK8Weight_deep = 1.\n self.BTagAK8Weight_deep_up = 1.\n self.BTagAK8Weight_deep_down = 1.\n self.BTagAK4Weight_deep = 1.\n self.BTagAK4Weight_deep_up = 1.\n self.BTagAK4Weight_deep_down = 1.\n self.BBTagWeight = 1.\n self.GenWeight = 1.\n self.PUWeight = 1.\n self.LeptonWeight = 1.\n self.LeptonWeightUp = 1.\n self.LeptonWeightDown = 1.\n self.TriggerWeight = 1.\n self.TriggerWeightUp = 1.\n self.TriggerWeightDown = 1.\n self.isZtoMM = False\n self.isZtoEE = False\n self.isZtoNN = False\n self.isTtoEM = False\n self.isBoosted4B = False\n self.isHtobb = False\n self.isHtobb_ml = False\n self.isMaxBTag_loose = False\n self.isMaxBTag_medium = False\n self.isMaxBTag_tight = False\n self.isVBF = False\n self.is2016 = False\n self.is2017 = False\n self.is2018 = False\n self.nTaus = 0\n self.nJetsNoFatJet = 0\n self.H_partonflavour = -1.\n self.H_hadronflavour = -1.\n self.DPhi = -1.\n self.VHDEta = -1.\n self.MinJetMetDPhi = 10.\n self.MaxJetNoFatJetBTag = -1.\n self.BtagDeepB = -1.\n self.DeepTagMD_H4qvsQCD = -1.\n self.DeepTagMD_HbbvsQCD = -1.\n self.DeepTagMD_ZHbbvsQCD = -1.\n self.DeepTagMD_ZbbvsQCD = -1.\n self.DeepTagMD_bbvsLight = -1.\n self.DeepTagMD_WvsQCD = -1.\n self.DeepTagMD_ZvsQCD = -1.\n self.Mu1_pt = -1.\n self.Mu1_eta = -1.\n self.Mu1_phi = -1.\n self.Mu1_mass = -1.\n self.Mu1_pfIsoId = -1.\n self.Mu1_relIso = -1.\n self.Mu1_highPtId = -1.\n self.Mu2_pt = -1.\n self.Mu2_eta = -1.\n self.Mu2_phi = -1.\n self.Mu2_mass = -1.\n self.Mu2_pfIsoId = -1.\n self.Mu2_relIso = -1.\n self.Mu2_highPtId = -1.\n self.Ele1_pt = -1.\n self.Ele1_eta = -1.\n self.Ele1_phi = -1.\n self.Ele1_mass = -1.\n self.Ele2_pt = -1.\n self.Ele2_eta = -1.\n self.Ele2_phi = -1.\n self.Ele2_mass = -1.\n self.Ele_HEM15_16 = -1.\n self.HT_HEM15_16 = -1.\n self.HT = 0.\n self.LHEScaleWeight = -1.\n self.LHEPdfWeight = -1.\n self.LHEWeight_originalXWGTUP = -1.\n self.PrefireWeight = 1.\n self.PrefireWeightUp = 1.\n self.PrefireWeightDown = 1.\n self.QCDNLO_Corr = 1.\n self.QCDNNLO_Corr = 1.\n self.EWKNLO_Corr = 1.\n self.Jet1_VBF_pt = -1.\n self.Jet1_VBF_eta = -1.\n self.Jet1_VBF_phi = -1.\n self.Jet1_VBF_mass = -1.\n self.Jet2_VBF_pt = -1.\n self.Jet2_VBF_eta = -1.\n self.Jet2_VBF_phi = -1.\n self.Jet2_VBF_mass = -1.\n self.dijet_VBF_mass = -1.\n self.deltaR_VBF = -1.\n self.deltaR_HVBFjet1 = -1.\n self.deltaR_HVBFjet2 = -1.\n self.H_pt = -1.\n self.H_eta = -1.\n self.H_phi = -1.\n self.H_mass = -1.\n self.H_M = -1.\n self.H_tau21 = -1.\n self.H_tau41 = -1.\n self.H_tau42 = -1.\n self.H_tau31 = -1.\n self.H_tau32 = -1.\n self.H_ddt = -1.\n self.H_csv1 = -1.\n self.H_csv2 = -1.\n self.H_deepcsv1 = -1.\n self.H_deepcsv2 = -1.\n self.H_dbt = -1.\n self.H_chf = -1.\n self.H_nhf = -1.\n self.V_pt = -1.\n self.V_eta = -1.\n self.V_phi = -1.\n self.V_mass = -1.\n self.VH_deltaR = -1.\n self.X_pt = -1.\n self.X_eta = -1.\n self.X_phi = -1.\n self.X_mass = -1.\n self.X_mass_chs = -1.\n self.X_mass_nom = -1.\n self.X_mass_jesUp = -1.\n self.X_mass_jesDown = -1.\n self.X_mass_jerUp = -1.\n self.X_mass_jerDown = -1.\n self.X_mass_MET_nom = -1.\n self.X_mass_MET_jesUp = -1.\n self.X_mass_MET_jesDown = -1.\n self.X_mass_MET_jerUp = -1.\n self.X_mass_MET_jerDown = -1.\n self.H_mass_nom = -1.\n self.H_mass_jmsUp = -1.\n self.H_mass_jmsDown = -1.\n self.H_mass_jmrUp = -1.\n self.H_mass_jmrDown = -1.\n\n \n \n eecutflow_list = []\n mmcutflow_list = []\n nncutflow_list = []\n\n idx_electrons = []\n idx_loose_electrons = []\n idx_muons = []\n idx_loose_muons = []\n idx_fatjet = []\n idx_jet = []\n idx_jet_vbf = []\n\n electrons_tlv_list = []\n loose_electrons_tlv_list = []\n muons_tlv_list = []\n loose_muons_tlv_list = []\n fatjet_tlv_list = []\n jet_tlv_list = []\n jet_tlv_list_vbf = []\n fatjet_tau21_list = []\n fatjet_tau41_list = []\n fatjet_tau42_list = []\n fatjet_tau31_list = []\n fatjet_tau32_list = []\n\n V = ROOT.TLorentzVector()\n H = ROOT.TLorentzVector()\n X = ROOT.TLorentzVector()\n\n V_chs = ROOT.TLorentzVector()\n ######### cuts #########\n elec1_pt_cut = 55.\n elec2_pt_cut = 20.\n elec_pt_cut = 10.\n elec_eta_cut = 2.5\n muon1_pt_cut = 55.\n muon2_pt_cut = 20. \n muon_pt_cut = 10.\n muon_eta_cut = 2.4\n tau_pt_cut = 18.\n tau_eta_cut = 2.3\n ak4_pt_cut = 30.\n ak4_eta_cut = 2.4\n fatjet_pt_cut = 200.\n fatjet_eta_cut = 2.4\n met_pt_cut = 250.\n v_pt_cut = 200.\n tau21_lowercut = 0.35\n tau21_uppercut = 0.75\n j_mass_lowercut = 30.\n j_mass_uppercut = 250.\n v_mass_lowercut = 65.\n v_mass_intercut = 85.\n v_mass_uppercut = 105.\n h_mass_lowercut = 105.\n h_mass_uppercut = 135.\n x_mass_lowercut = 750.\n xt_mass_lowercut = 650.\n xjj_mass_lowercut = 950.\n \n #### flag for year #######\n if self.year == 2016:\n self.is2016 = True\n elif self.year == 2017:\n self.is2017 = True\n elif self.year == 2018:\n self.is2018 = True\n \n \n ######### triggers #########\n if self.year == 2016:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu50])\n except:\n trigger_SingleMu = event.HLT_Mu50\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele27_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon175\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight])\n trigger_MET = any([event.HLT_PFMET170_NotCleaned,\n event.HLT_PFMET170_HBHECleaned])\n elif self.year == 2017:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n except:\n trigger_SingleMu = event.HLT_Mu50\n try:\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n except:\n trigger_SingleEle = None\n trigger_SingleIsoEle = event.HLT_Ele35_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n try:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n except:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n try:\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n except:\n trigger_MET = None\n\n elif self.year == 2018:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele32_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n ########## Gen Weight #########\n if self.isMC:\n self.GenWeight = -1. if event.genWeight < 0 else 1.\n self.PUWeight = self.puTool.getWeight(event.Pileup_nTrueInt)\n self.EventWeight *= self.GenWeight\n self.EventWeight *= self.PUWeight\n for i,weight in enumerate(event.LHEScaleWeight):\n self.out.LHEScaleWeight_hist.Fill(i,weight)\n for j,weight in enumerate(event.LHEPdfWeight):\n self.out.LHEPdfWeight_hist.Fill(j,weight)\n self.LHEScaleWeight = event.LHEScaleWeight\n self.LHEPdfWeight = event.LHEPdfWeight\n self.LHEWeight_originalXWGTUP = event.LHEWeight_originalXWGTUP\n self.out.events.Fill(0.,self.GenWeight)\n self.out.original.Fill(0.,event.LHEWeight_originalXWGTUP)\n if self.year == 2016 or self.year == 2017:\n self.PrefireWeight = event.PrefireWeight\n self.PrefireWeightUp = event.PrefireWeight_Up\n self.PrefireWeightDown = event.PrefireWeight_Down\n \n if self.isData and event.PV_npvs == 0:\n return False\n if not self.isData:\n self.out.pileup.Fill(event.Pileup_nTrueInt)\n if event.Pileup_nTrueInt == 0:\n return False\n ########### FatJet #########\n for ifatjet in range(event.nFatJet):\n fatjet_pt = event.FatJet_pt[ifatjet]\n fatjet_eta = event.FatJet_eta[ifatjet]\n fatjet_phi = event.FatJet_phi[ifatjet]\n fatjet_mass = event.FatJet_mass[ifatjet]\n fatjet_jetid = event.FatJet_jetId[ifatjet]\n fatjet_tlv = ROOT.TLorentzVector()\n fatjet_tlv.SetPtEtaPhiM(fatjet_pt, fatjet_eta, fatjet_phi, fatjet_mass)\n if fatjet_pt > fatjet_pt_cut and abs(fatjet_eta) < fatjet_eta_cut:\n fatjet_tlv_list.append(fatjet_tlv)\n idx_fatjet.append(ifatjet)\n if event.FatJet_tau1[ifatjet]==0:\n fatjet_tau21_list.append(0)\n fatjet_tau41_list.append(0)\n fatjet_tau31_list.append(0)\n else:\n fatjet_tau21_list.append(event.FatJet_tau2[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau41_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau31_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau1[ifatjet])\n if event.FatJet_tau2[ifatjet]==0:\n fatjet_tau42_list.append(0)\n fatjet_tau32_list.append(0)\n else:\n fatjet_tau42_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau2[ifatjet])\n fatjet_tau32_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau2[ifatjet])\n self.nFatJets = len(fatjet_tlv_list)\n #stop if no suitable Fatjet\n if len(fatjet_tlv_list) == 0:\n return False \n ########### electrons ##########\n for ielectron in range(event.nElectron):\n electron_pt = event.Electron_pt[ielectron]\n electron_eta = event.Electron_eta[ielectron]\n electron_phi = event.Electron_phi[ielectron]\n electron_mass = event.Electron_mass[ielectron]\n electron_tlv = ROOT.TLorentzVector()\n electron_tlv.SetPtEtaPhiM(electron_pt,electron_eta,electron_phi,electron_mass)\n if electron_eta > -2.5 and electron_eta < -1.479 and electron_phi > -1.55 and electron_phi < -0.9:\n if self.Ele_HEM15_16 == -1.:\n self.Ele_HEM15_16 = 0.\n self.Ele_HEM15_16 += electron_pt\n if electron_pt > elec_pt_cut and abs(electron_eta) < elec_eta_cut:\n idx_electrons.append(ielectron)\n electrons_tlv_list.append(electron_tlv)\n if event.Electron_cutBased[ielectron] >= 2:\n idx_loose_electrons.append(ielectron)\n loose_electrons_tlv_list.append(electron_tlv)\n self.nElectrons = len(loose_electrons_tlv_list)\n \n ########### muons #########\n for imuon in range(event.nMuon):\n muon_pt = event.Muon_pt[imuon]\n muon_eta = event.Muon_eta[imuon]\n muon_phi = event.Muon_phi[imuon]\n muon_mass = event.Muon_mass[imuon]\n muon_tlv = ROOT.TLorentzVector()\n muon_tlv.SetPtEtaPhiM(muon_pt, muon_eta, muon_phi, muon_mass)\n if muon_pt > muon_pt_cut and abs(muon_eta) < muon_eta_cut:\n idx_muons.append(imuon)\n muons_tlv_list.append(muon_tlv)\n if event.Muon_isPFcand[imuon] and struct.unpack('B',event.Muon_pfIsoId[imuon])[0]>=2 and (event.Muon_isGlobal[imuon] or event.Muon_isTracker[imuon]):\n idx_loose_muons.append(imuon)\n loose_muons_tlv_list.append(muon_tlv)\n self.nMuons = len(loose_muons_tlv_list)\n\n\n ############ taus #########\n for itau in range(event.nTau):\n tau_pt = event.Tau_pt[itau]\n tau_eta = event.Tau_eta[itau]\n tau_phi = event.Tau_phi[itau]\n tau_mass = event.Tau_mass[itau]\n tau_tlv = ROOT.TLorentzVector()\n tau_tlv.SetPtEtaPhiM(tau_pt, tau_eta, tau_phi, tau_mass)\n if tau_pt > tau_pt_cut and abs(tau_eta) < tau_eta_cut:\n cleanTau = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n if cleanTau:\n self.nTaus += 1\n\n ############ MET ##########\n METx = 0.\n METy = 0.\n MET_tlv = ROOT.TLorentzVector()\n MET_tlv.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi, event.PuppiMET_pt)\n \n ############ TTbar pT reweighting ########\n if self.isMC and 'TT' in self.sample[0]:\n Top1_pt, Top2_pt = getTTPt(event)\n self.TopWeight = getTTptWeight(Top1_pt, Top2_pt)\n\n ############ ZtoEE ############\n self.out.eecutflow.Fill(0.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodelectronpair = False\n for i in idx_electrons:\n for j in idx_electrons:\n if i==j or event.Electron_charge[i] == event.Electron_charge[j]:\n continue\n eli_tlv = ROOT.TLorentzVector()\n eli_tlv.SetPtEtaPhiM(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i],event.Electron_mass[i])\n eli_v = ROOT.TVector3()\n eli_v.SetPtEtaPhi(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i])\n elj_tlv = ROOT.TLorentzVector()\n elj_tlv.SetPtEtaPhiM(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j],event.Electron_mass[j])\n elj_v = ROOT.TVector3()\n elj_v.SetPtEtaPhi(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j])\n diel = eli_tlv + elj_tlv\n Z_pt = diel.Pt()\n Z_m = diel.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if eli_tlv.Pt() > elj_tlv.Pt():\n el1 = i\n el2 = j\n el1_tlv = eli_tlv\n el2_tlv = elj_tlv\n el1_v = eli_v\n el2_v = elj_v\n else:\n el1 = j\n el2 = i\n el1_tlv = elj_tlv\n el2_tlv = eli_tlv\n el1_v = elj_v\n el2_v = eli_v\n goodelectronpair = True\n \n \n if goodelectronpair:\n self.out.eecutflow.Fill(1.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if el1_tlv.Pt() > elec1_pt_cut and el2_tlv.Pt() > elec2_pt_cut:\n self.out.eecutflow.Fill(2.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if event.Electron_cutBased[el1] >= 2 and event.Electron_cutBased[el2] >= 2:\n self.out.eecutflow.Fill(3.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.eecutflow.Fill(4.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n #if not self.isMC and (\"SinglePhoton\" in self.sample[0] and (trigger_SingleEle or trigger_SingleIsoEle)):\n # print \"ZtoEE double counting\"\n # return False\n self.out.eecutflow.Fill(5.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if self.isMC:\n eltrig_tlv = el1_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==11:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # print \"electron TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==14336:\n # #if event.TrigObj_filterBits[i]==1110000000000000:\n # print \"found matching electron\"\n # deltaR1 = trigobj_v.DeltaR(el1_v)\n # deltaR2 = trigobj_v.DeltaR(el2_v)\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # eltrig_tlv = el2_tlv\n # break\n self.TriggerWeight = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightUp = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) + self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightDown = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) - self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())*self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1 = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2 = self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1error = self.elSFs.getIdIsoSFerror(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2error = self.elSFs.getIdIsoSFerror(el2_tlv.Pt(),el2_tlv.Eta())\n \n self.LeptonWeight = IdIsoSF1*IdIsoSF2\n LeptonWeightsigma = np.sqrt((IdIsoSF1error*IdIsoSF2)**2+(IdIsoSF2error*IdIsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n V = el1_tlv + el2_tlv\n self.Ele1_pt = el1_tlv.Pt()\n self.Ele1_eta = el1_tlv.Eta()\n self.Ele1_phi = el1_tlv.Phi()\n self.Ele1_mass = el1_tlv.M()\n self.Ele2_pt = el2_tlv.Pt()\n self.Ele2_eta = el2_tlv.Eta()\n self.Ele2_phi = el2_tlv.Phi()\n self.Ele2_mass = el2_tlv.M()\n self.isZtoEE = True\n\n ########## ZtoMM #############\n self.out.mmcutflow.Fill(0.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodmuonpair = False\n for i in idx_muons:\n for j in idx_muons:\n if i==j or event.Muon_charge[i] == event.Muon_charge[j]:\n continue\n mui_tlv = ROOT.TLorentzVector()\n mui_tlv.SetPtEtaPhiM(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i],event.Muon_mass[i])\n mui_v = ROOT.TVector3()\n mui_v.SetPtEtaPhi(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i])\n muj_tlv = ROOT.TLorentzVector()\n muj_tlv.SetPtEtaPhiM(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j],event.Muon_mass[j]) \n muj_v = ROOT.TVector3()\n muj_v.SetPtEtaPhi(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j])\n dimu = mui_tlv + muj_tlv\n Z_pt = dimu.Pt()\n Z_m = dimu.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if mui_tlv.Pt() > muj_tlv.Pt():\n mu1 = i\n mu2 = j\n mu1_tlv = mui_tlv\n mu2_tlv = muj_tlv\n mu1_v = mui_v\n mu2_v = muj_v\n else:\n mu1 = j\n mu2 = i\n mu1_tlv = muj_tlv\n mu2_tlv = mui_tlv\n mu1_v = muj_v\n mu2_v = mui_v\n goodmuonpair = True\n \n\n if goodmuonpair:\n self.out.mmcutflow.Fill(1.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0] \n if mu1_tlv.Pt() > muon1_pt_cut and mu2_tlv.Pt() > muon2_pt_cut:\n self.out.mmcutflow.Fill(2.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if (mu1_highPtId >= 2 and mu2_highPtId >= 1) or (mu1_highPtId >= 1 and mu2_highPtId >= 2):\n self.out.mmcutflow.Fill(3.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.mmcutflow.Fill(4.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if not trigger_SingleMu:\n print \"ZtoMM trigger inconsistency\"\n return False\n self.out.mmcutflow.Fill(5.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if self.isMC:\n if mu1_highPtId >=2:\n mutrig_tlv = mu1_tlv\n else:\n mutrig_tlv = mu2_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==13:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # deltaR1 = trigobj_v.DeltaR(mu1_v)\n # deltaR2 = trigobj_v.DeltaR(mu2_v)\n # print \"muon TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==2048:\n # #if event.TrigObj_filterBits[i]==10000000000:\n # print \"found matching muon\"\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # mutrig_tlv = mu2_tlv\n # break\n\n self.TriggerWeight = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightUp = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) + self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightDown = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) - self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n IdSF1 = self.muSFs.getIdSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2 = self.muSFs.getIdSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1 = self.muSFs.getIsoSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2 = self.muSFs.getIsoSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IdSF1error = self.muSFs.getIdSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2error = self.muSFs.getIdSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1error = self.muSFs.getIsoSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2error = self.muSFs.getIsoSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n self.LeptonWeight = IdSF1*IdSF2*IsoSF1*IsoSF2\n LeptonWeightsigma = np.sqrt((IdSF1error*IdSF2*IsoSF1*IsoSF2)**2+(IdSF2error*IdSF1*IsoSF1*IsoSF2)**2+(IsoSF1error*IdSF1*IdSF2*IsoSF2)**2+(IsoSF2error*IdSF1*IdSF2*IsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n if mu1_tlv.DeltaR(mu2_tlv) < 0.3:\n try:\n self.Mu1_relIso = ((event.Muon_tkRelIso[mu1]*mu1_tlv.Pt()) - mu2_tlv.Pt())/mu1_tlv.Pt()\n self.Mu2_relIso = ((event.Muon_tkRelIso[mu2]*mu2_tlv.Pt()) - mu1_tlv.Pt())/mu2_tlv.Pt()\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n else:\n try:\n self.Mu1_relIso = event.Muon_tkRelIso[mu1]\n self.Mu2_relIso = event.Muon_tkRelIso[mu2]\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n V = mu1_tlv + mu2_tlv\n self.Mu1_pt = mu1_tlv.Pt()\n self.Mu1_eta = mu1_tlv.Eta()\n self.Mu1_phi = mu1_tlv.Phi()\n self.Mu1_mass = mu1_tlv.M()\n self.Mu1_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu1])[0]\n self.Mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n self.Mu2_pt = mu2_tlv.Pt()\n self.Mu2_eta = mu2_tlv.Eta()\n self.Mu2_phi = mu2_tlv.Phi()\n self.Mu2_mass = mu2_tlv.M()\n self.Mu2_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu2])[0]\n self.Mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]\n self.isZtoMM = True\n\n \n ########### TtoEM ######### \n if not self.isZtoMM and not self.isZtoEE and self.nElectrons == 1 and self.nMuons == 1:\n if event.Electron_charge[idx_loose_electrons[0]] != event.Muon_charge[idx_loose_muons[0]]:\n el_tlv = loose_electrons_tlv_list[0]\n mu_tlv = loose_muons_tlv_list[0]\n if mu_tlv.Pt() > 30. and el_tlv.Pt() > 30.: \n V = mu_tlv + el_tlv\n if V.Pt() > 50.:\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n if self.isMC:\n self.TriggerWeight = self.elSFs.getTriggerSF(el_tlv.Pt(),el_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el_tlv.Pt(), el_tlv.Eta())\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n self.Mu1_pt = mu_tlv.Pt()\n self.Mu1_eta = mu_tlv.Eta()\n self.Mu1_phi = mu_tlv.Phi()\n self.Mu1_mass = mu_tlv.M()\n self.Ele1_pt = el_tlv.Pt()\n self.Ele1_eta = el_tlv.Eta()\n self.Ele1_phi = el_tlv.Phi()\n self.Ele1_mass = el_tlv.M()\n self.isTtoEM = True\n\n ######### ZtoNN ##########\n self.out.nncutflow.Fill(0.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if not self.isZtoMM and not self.isZtoEE and not self.isTtoEM:\n if event.PuppiMET_pt > met_pt_cut :\n self.out.nncutflow.Fill(1.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.nElectrons == 0 and self.nMuons == 0 and self.nTaus == 0:\n self.out.nncutflow.Fill(2.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n V.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi,event.PuppiMET_pt)\n V_chs.SetPtEtaPhiE(event.MET_pt,0.,event.MET_phi,event.MET_pt)\n if trigger_MET == None:\n if not self.isMC and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n else:\n if not self.isMC and not trigger_MET and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n self.out.nncutflow.Fill(3.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.filter(event) == False:\n print \"Bad event\"\n return False\n self.out.nncutflow.Fill(4.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.isMC:\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.TriggerWeight = 1.\n self.isZtoNN = True\n #stop if no semileptonic decays\n if self.isZtoEE==False and self.isZtoMM==False and self.isZtoNN==False and self.isTtoEM==False:\n return False\n ########## setting the Higgs and V index #######\n fatjet_idx_H = 0\n valid_Higgs = False\n if self.isZtoMM:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(mu1_tlv)>0.8 and fatjet_tlv.DeltaR(mu2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n\n elif self.isZtoEE:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(el1_tlv)>0.8 and fatjet_tlv.DeltaR(el2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n \n elif self.isZtoNN:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n\n ############ AK4 Jet ###########\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n self.HT += jet_pt\n if jet_eta > -2.5 and jet_eta < -1.479 and jet_phi > -1.55 and jet_phi < -0.9:\n if self.HT_HEM15_16 == -1.:\n self.HT_HEM15_16 = 0.\n self.HT_HEM15_16 += jet_pt\n if jet_pt > ak4_pt_cut and abs(jet_eta) < ak4_eta_cut:\n cleanJet = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n if cleanJet and getJetID(self.year,event,ijet):\n if len(fatjet_tlv_list) > 0 and fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n jet_tlv_list.append(jet_tlv)\n idx_jet.append(ijet)\n\n ############ AK4 Jet check for VBF ###########\n if self.isZtoMM:\n lep1_tlv = mu1_tlv\n lep2_tlv = mu2_tlv\n if self.isZtoEE:\n lep1_tlv = el1_tlv\n lep2_tlv = el2_tlv\n \n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if abs(jet_eta) < 5.0:\n if len(fatjet_tlv_list) > 0:\n if fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n if getJetID(self.year,event,ijet) and event.Jet_puId[ijet]==7:\n if self.isZtoMM or self.isZtoEE:\n if jet_tlv.DeltaR(lep1_tlv)>0.4 and jet_tlv.DeltaR(lep2_tlv)>0.4:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n elif self.isZtoNN:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n\n idx1_vbf = -1\n idx2_vbf = -1\n maxVBFmass = -1.\n for ijet1, jet1_tlv in enumerate(jet_tlv_list_vbf):\n for ijet2, jet2_tlv in enumerate(jet_tlv_list_vbf):\n if ijet1 == ijet2: continue\n eta1 = jet_tlv_list_vbf[ijet1].Eta()\n eta2 = jet_tlv_list_vbf[ijet2].Eta()\n V_VBF = jet_tlv_list_vbf[ijet1]+jet_tlv_list_vbf[ijet2]\n VBFmass = V_VBF.M()\n if abs(eta1-eta2)>4.0 and eta1*eta2<0. and VBFmass>maxVBFmass:\n idx1_vbf = ijet1\n idx2_vbf = ijet2\n maxVBFmass = VBFmass\n \n\n self.dijet_VBF_mass = maxVBFmass\n if maxVBFmass > 500.: \n self.isVBF = True\n self.Jet1_VBF_pt = jet_tlv_list_vbf[idx1_vbf].Pt()\n self.Jet1_VBF_eta = jet_tlv_list_vbf[idx1_vbf].Eta()\n self.Jet1_VBF_phi = jet_tlv_list_vbf[idx1_vbf].Phi()\n self.Jet1_VBF_mass = jet_tlv_list_vbf[idx1_vbf].M()\n self.Jet2_VBF_pt = jet_tlv_list_vbf[idx2_vbf].Pt()\n self.Jet2_VBF_eta = jet_tlv_list_vbf[idx2_vbf].Eta()\n self.Jet2_VBF_phi = jet_tlv_list_vbf[idx2_vbf].Phi()\n self.Jet2_VBF_mass = jet_tlv_list_vbf[idx2_vbf].M()\n self.deltaR_VBF = jet_tlv_list_vbf[idx1_vbf].DeltaR(jet_tlv_list_vbf[idx2_vbf])\n self.deltaR_HVBFjet1 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx1_vbf]))\n self.deltaR_HVBFjet2 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx2_vbf]))\n\n ########## Higgs ######## \n H = fatjet_tlv_list[fatjet_idx_H]\n\n if self.runJEC:\n self.H_mass_nom = event.FatJet_msoftdrop_nom[fatjet_idx_H]\n self.H_mass_jmsUp = event.FatJet_msoftdrop_jmsUp[fatjet_idx_H]\n self.H_mass_jmsDown = event.FatJet_msoftdrop_jmsDown[fatjet_idx_H]\n self.H_mass_jmrUp = event.FatJet_msoftdrop_jmrUp[fatjet_idx_H]\n self.H_mass_jmrDown = event.FatJet_msoftdrop_jmrDown[fatjet_idx_H]\n self.H_pt_nom = event.FatJet_pt_nom[fatjet_idx_H]\n self.H_pt_jesUp = event.FatJet_pt_jesTotalUp[fatjet_idx_H]\n self.H_pt_jesDown = event.FatJet_pt_jesTotalDown[fatjet_idx_H]\n self.H_pt_jerUp = event.FatJet_pt_jerUp[fatjet_idx_H]\n self.H_pt_jerDown = event.FatJet_pt_jerDown[fatjet_idx_H]\n self.PuppiMET_pt_nom = event.PuppiMET_pt_nom\n self.PuppiMET_pt_jesUp = event.PuppiMET_pt_jesTotalUp\n self.PuppiMET_pt_jesDown = event.PuppiMET_pt_jesTotalDown\n self.PuppiMET_pt_jerUp = event.PuppiMET_pt_jerUp\n self.PuppiMET_pt_jerDown = event.PuppiMET_pt_jerDown\n \n H_Eta = H.Eta()\n H_Phi = H.Phi()\n H_M = H.M()\n H_nom = ROOT.TLorentzVector()\n H_jesUp = ROOT.TLorentzVector()\n H_jesDown = ROOT.TLorentzVector()\n H_jerUp = ROOT.TLorentzVector()\n H_jerDown = ROOT.TLorentzVector()\n H_nom.SetPtEtaPhiM(self.H_pt_nom,H_Eta,H_Phi,H_M)\n H_jesUp.SetPtEtaPhiM(self.H_pt_jesUp,H_Eta,H_Phi,H_M)\n H_jesDown.SetPtEtaPhiM(self.H_pt_jesDown,H_Eta,H_Phi,H_M)\n H_jerUp.SetPtEtaPhiM(self.H_pt_jerUp,H_Eta,H_Phi,H_M)\n H_jerDown.SetPtEtaPhiM(self.H_pt_jerDown,H_Eta,H_Phi,H_M)\n MET_nom = ROOT.TLorentzVector()\n MET_jesUp = ROOT.TLorentzVector()\n MET_jesDown = ROOT.TLorentzVector()\n MET_jerUp = ROOT.TLorentzVector()\n MET_jerDown = ROOT.TLorentzVector()\n MET_nom.SetPtEtaPhiM(self.PuppiMET_pt_nom,0.,event.PuppiMET_phi,self.PuppiMET_pt_nom)\n MET_jesUp.SetPtEtaPhiM(self.PuppiMET_pt_jesUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesUp)\n MET_jesDown.SetPtEtaPhiM(self.PuppiMET_pt_jesDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesDown)\n MET_jerUp.SetPtEtaPhiM(self.PuppiMET_pt_jerUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerUp)\n MET_jerDown.SetPtEtaPhiM(self.PuppiMET_pt_jerDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerDown)\n\n for ifatjet in idx_fatjet:\n if event.FatJet_btagHbb[ifatjet] > 0.3:\n self.isBoosted4B = True\n\n \n self.nJetsNoFatJet = len(jet_tlv_list)\n \n if self.isZtoNN:\n self.DPhi = abs(MET_tlv.DeltaPhi(H))\n else:\n self.DPhi = abs(V.DeltaPhi(H))\n \n self.VH_deltaR = H.DeltaR(V)\n \n jet_list_temp = []\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if jet_tlv.DeltaR(H) < 0.8:\n jet_list_temp.append(ijet)\n if len(jet_list_temp) == 1:\n idx = jet_list_temp[0]\n self.H_chf = event.Jet_chHEF[idx]\n self.H_nhf = event.Jet_neHEF[idx]\n elif len(jet_list_temp) == 2:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n self.H_chf = (chf1*pt1+chf2*pt2)/(pt1+pt2) \n self.H_nhf = (nhf1*pt1+nhf2*pt2)/(pt1+pt2)\n elif len(jet_list_temp) == 3:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n idx3 = jet_list_temp[2]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n pt3 = event.Jet_pt[idx3]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n chf3 = event.Jet_chHEF[idx3]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n nhf3 = event.Jet_neHEF[idx3]\n self.H_chf = (chf1*pt1+chf2*pt2+chf3*pt3)/(pt1+pt2+pt3) \n self.H_nhf = (nhf1*pt1+nhf2*pt2+nhf3*pt3)/(pt1+pt2+pt3)\n\n\n\n for jet_tlv in jet_tlv_list:\n if abs(MET_tlv.DeltaPhi(jet_tlv)) < self.MinJetMetDPhi:\n self.MinJetMetDPhi = abs(MET_tlv.DeltaPhi(jet_tlv))\n\n\n for ijet in idx_jet:\n if event.Jet_btagDeepB[ijet] > self.MaxJetNoFatJetBTag:\n self.MaxJetNoFatJetBTag = event.Jet_btagDeepB[ijet]\n\n if not self.isData:\n for igenjet in range(event.nGenJetAK8):\n genjetAK8_tlv = ROOT.TLorentzVector()\n genjetAK8_tlv.SetPtEtaPhiM(event.GenJetAK8_pt[igenjet], event.GenJetAK8_eta[igenjet], event.GenJetAK8_phi[igenjet], event.GenJetAK8_mass[igenjet])\n if H.DeltaR(genjetAK8_tlv) < 0.8:\n self.H_hadronflavour = struct.unpack('B',event.GenJetAK8_hadronFlavour[igenjet])[0]\n self.H_partonflavour = event.GenJetAK8_partonFlavour[igenjet]\n self.btagToolAK4_deep.fillEfficiencies(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep = self.btagToolAK4_deep.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_up = self.btagToolAK4_deep_up.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_down = self.btagToolAK4_deep_down.getWeight(event,idx_jet,fatjet_idx_H)\n #search for AK4 jets which match with the subjets from the H\n ak4_subjets = []\n subjet1 = TLorentzVector()\n subjet2 = TLorentzVector()\n subjet1_idx = event.FatJet_subJetIdx1[fatjet_idx_H]\n subjet2_idx = event.FatJet_subJetIdx2[fatjet_idx_H]\n if subjet1_idx>=0. and subjet2_idx>=0.:\n subjet1.SetPtEtaPhiM(event.SubJet_pt[subjet1_idx],event.SubJet_eta[subjet1_idx],event.SubJet_phi[subjet1_idx],event.SubJet_mass[subjet1_idx])\n subjet2.SetPtEtaPhiM(event.SubJet_pt[subjet2_idx],event.SubJet_eta[subjet2_idx],event.SubJet_phi[subjet2_idx],event.SubJet_mass[subjet2_idx])\n for jetid in range(event.nJet):\n ak4jet = TLorentzVector()\n ak4jet.SetPtEtaPhiM(event.Jet_pt[jetid],event.Jet_eta[jetid],event.Jet_phi[jetid],event.Jet_mass[jetid])\n if ak4jet.DeltaR(subjet1)<0.4:\n ak4_subjets.append(jetid)\n if ak4jet.DeltaR(subjet2)<0.4:\n ak4_subjets.append(jetid)\n self.btagToolAK8_deep.fillEfficiencies(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep = self.btagToolAK8_deep.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_up = self.btagToolAK8_deep_up.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_down = self.btagToolAK8_deep_down.getWeight(event,ak4_subjets,fatjet_idx_H)\n ########### X and variables ############\n X = V + H\n if self.isZtoNN:\n X_chs = V_chs + H\n self.X_mass_chs = X_chs.M()\n\n if self.runJEC:\n X_nom = V + H_nom\n X_jesUp = V + H_jesUp\n X_jesDown = V + H_jesDown\n X_jerUp = V + H_jerUp\n X_jerDown = V + H_jerDown\n X_MET_nom = MET_nom + H_nom\n X_MET_jesUp = MET_jesUp + H_jesUp\n X_MET_jesDown = MET_jesDown + H_jesDown\n X_MET_jerUp = MET_jerUp + H_jerUp\n X_MET_jerDown = MET_jerDown + H_jerDown\n self.X_mass_nom = X_nom.M()\n self.X_mass_jesUp = X_jesUp.M()\n self.X_mass_jesDown = X_jesDown.M()\n self.X_mass_jerUp = X_jerUp.M()\n self.X_mass_jerDown = X_jerDown.M()\n self.X_mass_MET_nom = X_MET_nom.M()\n self.X_mass_MET_jesUp = X_MET_jesUp.M()\n self.X_mass_MET_jesDown = X_MET_jesDown.M()\n self.X_mass_MET_jerUp = X_MET_jerUp.M()\n self.X_mass_MET_jerDown = X_MET_jerDown.M()\n\n self.V_pt = V.Pt()\n self.V_eta = V.Eta()\n self.V_phi = V.Phi()\n self.V_mass = V.M()\n \n if self.isZtoNN:\n self.V_mass = 0.\n\n self.H_pt = H.Pt()\n self.H_eta = H.Eta()\n self.H_phi = H.Phi()\n self.H_M = H.M()\n self.H_mass = event.FatJet_msoftdrop[fatjet_idx_H]\n self.X_pt = X.Pt()\n self.X_eta = X.Eta()\n self.X_phi = X.Phi()\n self.X_mass = X.M()\n\n\n self.H_dbt = event.FatJet_btagHbb[fatjet_idx_H]\n self.BtagDeepB = event.FatJet_btagDeepB[fatjet_idx_H]\n self.DeepTagMD_H4qvsQCD = event.FatJet_deepTagMD_H4qvsQCD[fatjet_idx_H]\n self.DeepTagMD_HbbvsQCD = event.FatJet_deepTagMD_HbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZHbbvsQCD = event.FatJet_deepTagMD_ZHbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZbbvsQCD = event.FatJet_deepTagMD_ZbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_bbvsLight = event.FatJet_deepTagMD_bbvsLight[fatjet_idx_H]\n self.DeepTagMD_WvsQCD = event.FatJet_deepTagMD_WvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZvsQCD = event.FatJet_deepTagMD_ZvsQCD[fatjet_idx_H]\n self.H_tau21 = fatjet_tau21_list[fatjet_idx_H]\n self.H_tau41 = fatjet_tau41_list[fatjet_idx_H]\n self.H_tau42 = fatjet_tau42_list[fatjet_idx_H]\n self.H_tau31 = fatjet_tau31_list[fatjet_idx_H]\n self.H_tau32 = fatjet_tau32_list[fatjet_idx_H]\n self.VHDEta = abs(V.Eta() - H.Eta())\n\n \n \n if event.FatJet_subJetIdx1[fatjet_idx_H] >= 0:\n Hcsv1 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx1[fatjet_idx_H]]\n Hdeepcsv1 = event.SubJet_btagDeepB[event.FatJet_subJetIdx1[fatjet_idx_H]]\n else:\n Hcsv1 = -1.\n Hdeepcsv1 = -1.\n if event.FatJet_subJetIdx2[fatjet_idx_H] >= 0:\n Hcsv2 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx2[fatjet_idx_H]]\n Hdeepcsv2 = event.SubJet_btagDeepB[event.FatJet_subJetIdx2[fatjet_idx_H]]\n else:\n Hcsv2 = -1.\n Hdeepcsv2 = -1.\n \n self.H_csv1 = max(Hcsv1,Hcsv2)\n self.H_csv2 = min(Hcsv1,Hcsv2)\n self.H_deepcsv1 = max(Hdeepcsv1,Hdeepcsv2)\n self.H_deepcsv2 = min(Hdeepcsv1,Hdeepcsv2)\n\n\n if self.year == 2016:\n wp_loose = 0.2217\n wp_medium = 0.6321\n wp_tight = 0.8953\n elif self.year == 2017:\n wp_loose = 0.1522\n wp_medium = 0.4941\n wp_tight = 0.8001\n elif self.year == 2018:\n wp_loose = 0.1241\n wp_medium = 0.4184\n wp_tight = 0.7527\n\n if self.H_deepcsv2 > wp_loose:\n self.isHtobb = True\n if self.H_deepcsv1 > wp_medium and self.H_deepcsv2 > wp_loose:\n self.isHtobb_ml = True\n\n if self.MaxJetNoFatJetBTag > wp_loose:\n self.isMaxBTag_loose = True\n if self.MaxJetNoFatJetBTag > wp_medium:\n self.isMaxBTag_medium = True\n if self.MaxJetNoFatJetBTag > wp_tight:\n self.isMaxBTag_tight = True\n\n \n if self.H_mass != 0.:\n self.H_ddt = self.H_tau21 + 0.082 *np.log(self.H_mass*self.H_mass/self.H_pt)\n else:\n self.H_ddt = -1.\n \n self.X_tmass = np.sqrt(2.*V.Pt()*fatjet_tlv_list[fatjet_idx_H].Pt()*(1.-np.cos(fatjet_tlv_list[fatjet_idx_H].DeltaPhi(V))))\n if self.isZtoNN:\n self.X_mass = self.X_tmass\n else:\n self.X_mass = X.M()\n if self.X_mass > 750 and self.VH_deltaR > 2:\n if self.MinJetMetDPhi>0.5 and self.DPhi>2:\n for i,weight in enumerate(nncutflow_list):\n self.out.nncutflow_inc.Fill(i,weight)\n if self.VHDEta<1.3:\n for i,weight in enumerate(eecutflow_list):\n self.out.eecutflow_inc.Fill(i,weight)\n for i,weight in enumerate(mmcutflow_list):\n self.out.mmcutflow_inc.Fill(i,weight)\n \n if self.isZtoEE or self.isZtoMM or self.isZtoNN or self.isTtoEM:\n self.fillBranches(event)\n return True" ]
[ "0.5894989", "0.5636878", "0.5595273", "0.5590827", "0.5589143", "0.55404943", "0.5539545", "0.55124915", "0.5504166", "0.5484351", "0.5466944", "0.5432563", "0.5431083", "0.5426489", "0.54122007", "0.5407446", "0.54009956", "0.53980666", "0.5388078", "0.53808546", "0.5372438", "0.5368383", "0.53637177", "0.53633434", "0.53354716", "0.53166914", "0.53110766", "0.5310202", "0.5302322", "0.52824634" ]
0.6149179
0
Make n bars dataframe seeing past n bars. The row size of `df` must be greater than or equal to `n_bars`, or raise ValueError.
def make_nbars_past(df: pd.DataFrame, n_bars: int, cols: List[str] = ['Close'], datetime_col: Union[str, None] = 'Date') -> pd.DataFrame: if df.shape[0] < n_bars + 1: raise ValueError( f'row size of the df (={df.shape[0]}) must be greater than or equal to n_bars + 1 (={n_bars + 1})') df = df.rename(columns={col: f'{col}{n_bars}' for col in cols}) for i in reversed(range(n_bars)): inc = n_bars - i for col in cols: df[f'{col}{i}'] = df[f'{col}{n_bars}'][inc:].append( pd.Series([np.nan]*inc)).reset_index(drop=True) # correct bar date (or datetime) if datetime_col is not None: df[datetime_col] = df[datetime_col][n_bars:].append( pd.Series([np.nan]*n_bars)).reset_index(drop=True) df = df.dropna() return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_nbars_future(df: pd.DataFrame, n_bars: int, cols: List[str] = ['Close'], datetime_col: Union[str, None] = 'Date') -> pd.DataFrame:\n if df.shape[0] < n_bars + 1:\n raise ValueError(\n f'row size of the df (={df.shape[0]}) must be greater than or equal to n_bars + 1 (={n_bars + 1})')\n df = df.rename(columns={col: f'{col}0' for col in cols})\n\n for i in range(1, n_bars+1):\n for col in cols:\n df[f'{col}{i}'] = df[f'{col}0'][i:].append(\n pd.Series([np.nan]*i)).reset_index(drop=True)\n\n df = df.dropna()\n\n return df", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3))\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.50, wspace=0.05)\n\n bar_width = 0.35\n for ax, (key, dat) in zip(axs.flatten(), df):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_title(f\"Effect size = {key}\")\n ax.set_xlabel(f\"Group Size\")\n ax.legend()\n\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n\n fig.suptitle(dict[\"title\"], y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def barlocs(n, width=0.1, mean=0):\n barloc = np.arange(n) * width\n if np.mean(barloc) > mean:\n barloc = barloc - (np.mean(barloc) - mean)\n elif np.mean(barloc) < mean:\n barloc = barloc + (np.mean(barloc) - mean)\n return barloc", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=8, ncols=1, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n indexes = df.index.tolist()\n df[\"index\"] = indexes\n df[\"effect_size\"] = df[\"index\"].apply(lambda x: x[0])\n df[\"sd\"] = df[\"index\"].apply(lambda x: x[1])\n df[\"group\"] = df[\"index\"].apply(lambda x: x[2])\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n df_new = df.groupby(\"sd\")\n # for key, item in df_new:\n # print(df_new.get_group(key))\n for ax, (sd, dat) in zip(axs, df_new):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n es = dat[\"effect_size\"].iloc[0]\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_xlabel(f\"Mean error for sd = {sd} per group size\")\n print(dat[\"sig\"])\n print(\"\\n\\n\")\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n ax.legend()\n\n fig.suptitle(f\"Effect size = {es}\", y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def get_latest_bars(self, symbol, N=1):\n raise NotImplementedError(\"Should implement get_latest_bars()\")", "def setBarGroups(ngroups, gap):\n dislin.bargrp(ngroups, gap)", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def tail(self, n: int = 5) -> 'DataFrame':\n return self[-n:, :] # type: ignore", "def imbalance_bar(data,ET_window,P_window, warm_up_len = 100,mode=\"TIB\"):\n assert mode in [\"TIB\",\"VIB\"], \"please enter mode of imbalance bar: TIB/VIB\"\n if mode == \"TIB\":\n data = _preprocess(data)\n else:\n data = _preprocess(data, need_vol=True)\n\n N = data.shape[0]\n b_t = _direction(data[\"price\"])\n if mode == \"VIB\":\n b_t = np.array(b_t * data[\"vol\"])\n E_T = warm_up_len\n E_theta = E_T * 0.5 # without prior knowledge it's reasonable to assume P(b_t==1) = 0.5\n\n # length of first bar\n t0 = np.where(abs(np.cumsum(b_t))>=E_theta)[0]\n if len(t0) == 0:\n raise ValueError(\"No such bar can be created!\")\n\n bar = [t0[0]+1]\n bar_len = 0\n current_loc = sum(bar)\n while True:\n E_T = _EMA(bar, ET_window)[-1]\n P_estimate = _EMA(b_t[:current_loc], P_window)[-1]\n E_theta = E_T * abs(P_estimate)\n\n theta_t = abs(np.cumsum(b_t[current_loc:]))\n increment = np.where(theta_t >= E_theta)[0] # np.where() will return a tuple\n \n if len(increment)==0: # if can't find any appropriate bar\n bar.append(data.shape[0] - current_loc)\n break \n if bar[bar_len] + (increment[0]+1) >= N:\n bar.append(data.shape[0] - current_loc)\n break\n bar.append(increment[0]+1)# python start from 0 but we want to store the length of each bar\n current_loc += (increment[0]+1)\n bar_len += 1\n result = _bar2df(bar,data)\n return result", "def plotNBars(Xs, Ys, labels, xlabel, ylabel, title, plotter, ylog = False, horizontalLine = None, verticalLine = None):\n\timport numpy.numarray as na\n\tmaxData = max(map(len, Xs))\n\tminVal = min(map(min, Xs))\n\txlocations = na.array(range(maxData))\n\twidth = 0.7\n\ti = 0\n\tcolores = ['b','r','g','c','m','y','k','w','#610b0b']\n\tbar_width = float(width/len(Xs))\n\tfor (x, y, l) in zip(Xs, Ys, labels):\n\t\tplotter.bar(map(lambda t: t+bar_width*i, x), y, bar_width, label= l, color = colores[i], log=ylog)\n\t\ti += 1\n\n\tplotter.ylabel(ylabel)\n\tplotter.xlabel(xlabel)\n\tplotter.title(title)\n\tif horizontalLine:\n\t\thline = plotter.axhline(linewidth=2, color='r', y = horizontalLine, linestyle='dashed') \n\t\tbars.append(hline)\n\tif verticalLine:\n\t\tplotter.axvline(linewidth=2, color='r', x = verticalLine) \n\tplotter.legend()\n\tplotter.xticks(xlocations+width/2+minVal, xlocations+minVal, fontsize = 12) #, rotation = 30\n\t\n\treturn plotter", "def drop(n):\n def _drop_xducer(step):\n outer = {\"count\": 0}\n def _drop_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"count\"] < n:\n outer[\"count\"] += 1\n return r\n else:\n return step(r, x)\n return _drop_step\n return _drop_xducer", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def top_words_bar_chart(df, n=10):\n messages = df['message'].values\n word_counts = {}\n for message in messages:\n tokens = tokenize(message)\n for token in tokens:\n if token in word_counts:\n word_counts[token] += 1\n else:\n word_counts[token] = 1\n\n items = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n items = items[0:n]\n words = list(map(lambda x: x[0], items))\n counts = list(map(lambda x: x[1], items))\n return {\n 'data': [\n Bar(\n x=words,\n y=counts\n )\n ],\n\n 'layout': {\n 'title': 'Most common word stems (outside stopwords)',\n 'yaxis': {\n 'title': \"Count\",\n },\n 'xaxis': {\n 'title': \"Word\"\n }\n }\n }", "def repeat(df, n):\n return pd.concat([df] * n, ignore_index=True)", "def pivot_num(data: PandasDF, var: str, performance: str = 'bad_ind',\n n: int = 10, ks: float = True, max_ks_only: float = False) -> PandasDF:\n temp = data.loc[:, [var, performance]].copy()\n temp_missing = temp.loc[temp[var].isnull(), :]\n temp_noMissing = temp.loc[~temp[var].isnull(), :]\n temp_noMissing.sort_values(var, inplace=True)\n length = round(temp_noMissing.shape[0]/n)\n\n group = temp_noMissing.groupby(np.arange(temp_noMissing.shape[0]) // length).apply(\n lambda obj: pd.Series({\n 'var': var,\n 'level': str(obj[var].min()) + ' - ' + str(obj[var].max()),\n 'bad rate': obj[performance].mean(),\n 'count': len(obj[performance])\n }))\n group_missing = pd.DataFrame({\n 'var': var,\n 'level': np.nan,\n 'bad rate': temp_missing[performance].mean(),\n 'count': temp_missing.shape[0],\n 'ks': np.nan\n }, index=[n+1, ])\n # temp = group[['bad rate', 'count']].copy()\n if ks or max_ks_only:\n group['bad'] = [r * c for r, c in zip(group['bad rate'], group['count'])]\n group['cum_bad'] = [sum(group.loc[0:i, 'bad']) for i in range(group.shape[0])]\n group['cum_count'] = [sum(group.loc[0:i, 'count']) for i in range(group.shape[0])]\n group['cum_good'] = [c - b for c, b in zip(group['cum_count'], group['cum_bad'])]\n group['ks'] = [\n (100 * abs(g/group.loc[group.shape[0]-1, 'cum_good'] - b/group.loc[group.shape[0]-1, 'cum_bad']))\n for g, b in zip(group.cum_good, group.cum_bad)]\n max_index = group['ks'].idxmax()\n if max_ks_only:\n return group.loc[[max_index, ], ['var', 'ks']]\n group['ks'] = ['%.1f%%' % x for x in group['ks']]\n group = group.append(group_missing)\n group['bad rate'] = ['%.2f%%' % (x * 100) for x in group['bad rate']]\n\n group.style.applymap(highlight, subset=pd.IndexSlice[max_index, ['ks']])\n\n return group[['var', 'level', 'bad rate', 'count', 'cum_bad', 'cum_good', 'ks']]\n else:\n group = group.append(group_missing[['var', 'level', 'bad rate', 'count']])\n group.rename(columns={'bad rate': 'avg %s' % performance}, inplace=True)\n return group[['var', 'level', 'avg %s' % performance, 'count']]", "def test_n_bins(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.n_bins == 5\n assert hist2.n_bins == 5\n assert hist3.n_bins == 7", "def setNSlices(self,n):\n assert(n> 0)\n self._c_param.lee_richards_n_slices = n", "def n_valid_rows(n, X):\n if n > X.shape[0]:\n return X.shape[0]\n return n", "def limit(requestContext, seriesList, n):\n return seriesList[0:n]", "def keep_n(self, n=100):\n before = self.item_count()\n\n item_count = self.item_count()\n if item_count > n: self.filter(self.sample(n))\n\n after = self.item_count()\n with msg(f'Keeping (at most) {n} items: {after} of {before}', done=False, enabled=self.output):pass", "def stacked_bar_plot(df, cat1, cat2, bar_size=30, nan_colums_thresh=0, figsize=(20, 10), percentile=0.001, plot_flag = 1, normalize = False, sort_bars = False, return_pivot = False):\n \n df_for_pivot = df[[cat1, cat2]].groupby([cat1, cat2]).size().reset_index(name='counts')\n df_pivot = df_for_pivot.pivot(index=cat1, columns=cat2, values='counts')\n \n if normalize == True:\n df_pivot['sum_cols'] = df_pivot.sum(axis = 1)\n # Normalize by the sum of the row\n df_pivot_percent = df_pivot.div(df_pivot.sum_cols, axis=0)\n # Drop the sum column\n df_pivot_clean = df_pivot_percent.drop(columns=['sum_cols'])\n not_nan_positions_ratio = 100\n else:\n # Drops rows having more than nan_colums_thresh Nan values\n # In case of search_term VS search_position, drops rows with all NaN and only search_position = 11 is not NaN\n df_pivot_clean = df_pivot.dropna(thresh=nan_colums_thresh)\n \n # sort the search term index in descending order\n if sort_bars:\n ordered_index = df_pivot_clean.sum(axis=1).sort_values(ascending=False).index\n df_pivot_clean = df_pivot_clean.reindex(ordered_index)\n\n # Calculate the ratio of the informative search terms size compared to the overall size of the different search terms\n not_nan_positions_ratio = 100 * df_pivot_clean.shape[0] / (df_pivot.shape[0])\n \n \n if plot_flag == 1:\n # Choose only the top percentile data to avoid data resolution problems\n # df_to_plot = df_pivot_clean[df_pivot_clean.sum(axis = 1) > df_pivot_clean.sum(axis = 1).quantile(percentile)]\n\n # Choose only the top 30 bars to avoid data resolution problems\n df_to_plot = df_pivot_clean.iloc[0:bar_size, ]\n\n # Stacked bar plot\n df_to_plot.plot.bar(stacked=True, figsize=figsize, cmap=plt.get_cmap('tab20c'))\n if normalize == True:\n plt.ylabel(\"Normalized distribution\", fontsize=15)\n else:\n plt.suptitle(str('Number of samples per '+ cat1 + ' and '+ cat2), fontsize=20, fontweight='bold')\n plt.ylabel('Number of samples', fontsize=15)\n \n if return_pivot == True : \n return (df_pivot_clean, not_nan_positions_ratio)", "def head(self, n=5):\n return self.dataframe.limit(n).toPandas()", "def plot_bar(self, nsingular=None, nbars=20):\n if nsingular is not None:\n self.get_identifiability_dataframe(nsingular)\n\n plot_obj = plots.IdentBar(self.ident_df, nsingular=nsingular, nbars=nbars)\n plot_obj.generate()\n plot_obj.draw()\n \n return plot_obj.fig, plot_obj.ax", "def ends(df, x=5):\n return df.head(x).append(df.tail(x))", "def keep_only_n_largest_locations(\n df: pd.DataFrame, n: int, count: Counting\n) -> pd.DataFrame:\n\n case_type = CaseInfo.get_info_item_for(\n InfoField.CASE_TYPE, stage=DiseaseStage.CONFIRMED, count=count\n )\n\n def get_n_largest_locations(df: pd.DataFrame) -> pd.Series:\n return (\n df[df[Columns.CASE_TYPE] == case_type]\n .groupby(Columns.location_id_cols)\n .apply(lambda g: g[Columns.CASE_COUNT].iloc[-1])\n .nlargest(n)\n .rename(CaseTypes.CONFIRMED)\n )\n\n def keep_only_above_cutoff(df: pd.DataFrame, cutoff: float) -> pd.DataFrame:\n return df.groupby(Columns.location_id_cols).filter(\n lambda g: (\n g.loc[g[Columns.CASE_TYPE] == case_type, Columns.CASE_COUNT].iloc[-1]\n >= cutoff\n )\n )\n\n n_largest_location_case_counts = get_n_largest_locations(df)\n case_count_cutoff = n_largest_location_case_counts.min()\n return keep_only_above_cutoff(df, case_count_cutoff)", "def value_count_bottom(df, cat_features, bottom = 10, save_plot = False, path_dir = None ):\n cat_features = list(set(cat_features))\n cols = cat_features\n if len(cols) != 0:\n for i, col in sorted(list(enumerate(cols)), key=lambda x: x[1]):\n fig, ax = plt.subplots()\n fig.set_size_inches(4.5, 5.5)\n fig.set_size_inches(4, 4)\n ax = df[col].value_counts()[-bottom:].plot(kind='barh')\n plt.title(str(\"Distribution of BOTTOM \"+str(bottom)+ \" \" + col), fontsize=10)\n plt.show(block=False)\n if save_plot == True:\n plt.savefig((plot_dir + \"bottom_\"+str(bottom)+\"_value_count_ordinal.png\"))\n plt.clf()\n else:\n print(\"No categorial features to plot\")", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def n50_barplot(df, path, settings, title=None):\n n50_bar = Plot(path=path + \"NanoComp_N50.html\", title=\"Comparing read length N50\")\n datasets = df[\"dataset\"].unique()\n length_column = \"aligned_lengths\" if \"aligned_lengths\" in df else \"lengths\"\n ylabel = \"Aligned read length N50\" if \"aligned_lengths\" in df else \"Sequenced read length N50\"\n\n palette = (\n settings[\"colors\"] if settings[\"colors\"] else cycle(plotly.colors.DEFAULT_PLOTLY_COLORS)\n )\n\n n50s = [get_N50(np.sort(df.loc[df[\"dataset\"] == d, length_column])) for d in datasets]\n n50_bar.fig = go.Figure()\n\n for idx, n50, color in zip(datasets, n50s, palette):\n n50_bar.fig.add_trace(go.Bar(x=[idx], y=[n50], name=idx, marker_color=color))\n\n n50_bar.fig.update_layout(\n title=title or n50_bar.title,\n title_x=0.5,\n yaxis_title=ylabel,\n )\n\n n50_bar.html = n50_bar.fig.to_html(full_html=False, include_plotlyjs=\"cdn\")\n n50_bar.save(settings)\n return [n50_bar]", "def evenly_spaced_BDs_OLD(BDs, n):\n BDs = BDs.iloc[:,0].tolist()\n BD_min = min(BDs)\n BD_max = max(BDs)\n return np.linspace(BD_min, BD_max, n)" ]
[ "0.5731061", "0.5586702", "0.5446808", "0.53735137", "0.53575814", "0.53006095", "0.5197947", "0.5093131", "0.5072677", "0.50318915", "0.49762776", "0.49578872", "0.49176425", "0.49153638", "0.48178166", "0.4813536", "0.47499412", "0.47253093", "0.46960294", "0.46573168", "0.46395522", "0.4618687", "0.46020746", "0.4598681", "0.4596625", "0.45958537", "0.45717275", "0.45644552", "0.4563442", "0.4546885" ]
0.65518504
0
Make n bars dataframe seeing future n bars. The row size of `df` must be greater than or equal to `n_bars`, or raise ValueError.
def make_nbars_future(df: pd.DataFrame, n_bars: int, cols: List[str] = ['Close'], datetime_col: Union[str, None] = 'Date') -> pd.DataFrame: if df.shape[0] < n_bars + 1: raise ValueError( f'row size of the df (={df.shape[0]}) must be greater than or equal to n_bars + 1 (={n_bars + 1})') df = df.rename(columns={col: f'{col}0' for col in cols}) for i in range(1, n_bars+1): for col in cols: df[f'{col}{i}'] = df[f'{col}0'][i:].append( pd.Series([np.nan]*i)).reset_index(drop=True) df = df.dropna() return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_nbars_past(df: pd.DataFrame, n_bars: int, cols: List[str] = ['Close'], datetime_col: Union[str, None] = 'Date') -> pd.DataFrame:\n if df.shape[0] < n_bars + 1:\n raise ValueError(\n f'row size of the df (={df.shape[0]}) must be greater than or equal to n_bars + 1 (={n_bars + 1})')\n df = df.rename(columns={col: f'{col}{n_bars}' for col in cols})\n\n for i in reversed(range(n_bars)):\n inc = n_bars - i\n for col in cols:\n df[f'{col}{i}'] = df[f'{col}{n_bars}'][inc:].append(\n pd.Series([np.nan]*inc)).reset_index(drop=True)\n\n # correct bar date (or datetime)\n if datetime_col is not None:\n df[datetime_col] = df[datetime_col][n_bars:].append(\n pd.Series([np.nan]*n_bars)).reset_index(drop=True)\n\n df = df.dropna()\n\n return df", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3))\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.50, wspace=0.05)\n\n bar_width = 0.35\n for ax, (key, dat) in zip(axs.flatten(), df):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_title(f\"Effect size = {key}\")\n ax.set_xlabel(f\"Group Size\")\n ax.legend()\n\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n\n fig.suptitle(dict[\"title\"], y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=8, ncols=1, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n indexes = df.index.tolist()\n df[\"index\"] = indexes\n df[\"effect_size\"] = df[\"index\"].apply(lambda x: x[0])\n df[\"sd\"] = df[\"index\"].apply(lambda x: x[1])\n df[\"group\"] = df[\"index\"].apply(lambda x: x[2])\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n df_new = df.groupby(\"sd\")\n # for key, item in df_new:\n # print(df_new.get_group(key))\n for ax, (sd, dat) in zip(axs, df_new):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n es = dat[\"effect_size\"].iloc[0]\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_xlabel(f\"Mean error for sd = {sd} per group size\")\n print(dat[\"sig\"])\n print(\"\\n\\n\")\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n ax.legend()\n\n fig.suptitle(f\"Effect size = {es}\", y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def barlocs(n, width=0.1, mean=0):\n barloc = np.arange(n) * width\n if np.mean(barloc) > mean:\n barloc = barloc - (np.mean(barloc) - mean)\n elif np.mean(barloc) < mean:\n barloc = barloc + (np.mean(barloc) - mean)\n return barloc", "def get_latest_bars(self, symbol, N=1):\n raise NotImplementedError(\"Should implement get_latest_bars()\")", "def setBarGroups(ngroups, gap):\n dislin.bargrp(ngroups, gap)", "def plotNBars(Xs, Ys, labels, xlabel, ylabel, title, plotter, ylog = False, horizontalLine = None, verticalLine = None):\n\timport numpy.numarray as na\n\tmaxData = max(map(len, Xs))\n\tminVal = min(map(min, Xs))\n\txlocations = na.array(range(maxData))\n\twidth = 0.7\n\ti = 0\n\tcolores = ['b','r','g','c','m','y','k','w','#610b0b']\n\tbar_width = float(width/len(Xs))\n\tfor (x, y, l) in zip(Xs, Ys, labels):\n\t\tplotter.bar(map(lambda t: t+bar_width*i, x), y, bar_width, label= l, color = colores[i], log=ylog)\n\t\ti += 1\n\n\tplotter.ylabel(ylabel)\n\tplotter.xlabel(xlabel)\n\tplotter.title(title)\n\tif horizontalLine:\n\t\thline = plotter.axhline(linewidth=2, color='r', y = horizontalLine, linestyle='dashed') \n\t\tbars.append(hline)\n\tif verticalLine:\n\t\tplotter.axvline(linewidth=2, color='r', x = verticalLine) \n\tplotter.legend()\n\tplotter.xticks(xlocations+width/2+minVal, xlocations+minVal, fontsize = 12) #, rotation = 30\n\t\n\treturn plotter", "def repeat(df, n):\n return pd.concat([df] * n, ignore_index=True)", "def imbalance_bar(data,ET_window,P_window, warm_up_len = 100,mode=\"TIB\"):\n assert mode in [\"TIB\",\"VIB\"], \"please enter mode of imbalance bar: TIB/VIB\"\n if mode == \"TIB\":\n data = _preprocess(data)\n else:\n data = _preprocess(data, need_vol=True)\n\n N = data.shape[0]\n b_t = _direction(data[\"price\"])\n if mode == \"VIB\":\n b_t = np.array(b_t * data[\"vol\"])\n E_T = warm_up_len\n E_theta = E_T * 0.5 # without prior knowledge it's reasonable to assume P(b_t==1) = 0.5\n\n # length of first bar\n t0 = np.where(abs(np.cumsum(b_t))>=E_theta)[0]\n if len(t0) == 0:\n raise ValueError(\"No such bar can be created!\")\n\n bar = [t0[0]+1]\n bar_len = 0\n current_loc = sum(bar)\n while True:\n E_T = _EMA(bar, ET_window)[-1]\n P_estimate = _EMA(b_t[:current_loc], P_window)[-1]\n E_theta = E_T * abs(P_estimate)\n\n theta_t = abs(np.cumsum(b_t[current_loc:]))\n increment = np.where(theta_t >= E_theta)[0] # np.where() will return a tuple\n \n if len(increment)==0: # if can't find any appropriate bar\n bar.append(data.shape[0] - current_loc)\n break \n if bar[bar_len] + (increment[0]+1) >= N:\n bar.append(data.shape[0] - current_loc)\n break\n bar.append(increment[0]+1)# python start from 0 but we want to store the length of each bar\n current_loc += (increment[0]+1)\n bar_len += 1\n result = _bar2df(bar,data)\n return result", "def test_n_bins(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.n_bins == 5\n assert hist2.n_bins == 5\n assert hist3.n_bins == 7", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def plot_bar(self, nsingular=None, nbars=20):\n if nsingular is not None:\n self.get_identifiability_dataframe(nsingular)\n\n plot_obj = plots.IdentBar(self.ident_df, nsingular=nsingular, nbars=nbars)\n plot_obj.generate()\n plot_obj.draw()\n \n return plot_obj.fig, plot_obj.ax", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def top_words_bar_chart(df, n=10):\n messages = df['message'].values\n word_counts = {}\n for message in messages:\n tokens = tokenize(message)\n for token in tokens:\n if token in word_counts:\n word_counts[token] += 1\n else:\n word_counts[token] = 1\n\n items = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n items = items[0:n]\n words = list(map(lambda x: x[0], items))\n counts = list(map(lambda x: x[1], items))\n return {\n 'data': [\n Bar(\n x=words,\n y=counts\n )\n ],\n\n 'layout': {\n 'title': 'Most common word stems (outside stopwords)',\n 'yaxis': {\n 'title': \"Count\",\n },\n 'xaxis': {\n 'title': \"Word\"\n }\n }\n }", "def n50_barplot(df, path, settings, title=None):\n n50_bar = Plot(path=path + \"NanoComp_N50.html\", title=\"Comparing read length N50\")\n datasets = df[\"dataset\"].unique()\n length_column = \"aligned_lengths\" if \"aligned_lengths\" in df else \"lengths\"\n ylabel = \"Aligned read length N50\" if \"aligned_lengths\" in df else \"Sequenced read length N50\"\n\n palette = (\n settings[\"colors\"] if settings[\"colors\"] else cycle(plotly.colors.DEFAULT_PLOTLY_COLORS)\n )\n\n n50s = [get_N50(np.sort(df.loc[df[\"dataset\"] == d, length_column])) for d in datasets]\n n50_bar.fig = go.Figure()\n\n for idx, n50, color in zip(datasets, n50s, palette):\n n50_bar.fig.add_trace(go.Bar(x=[idx], y=[n50], name=idx, marker_color=color))\n\n n50_bar.fig.update_layout(\n title=title or n50_bar.title,\n title_x=0.5,\n yaxis_title=ylabel,\n )\n\n n50_bar.html = n50_bar.fig.to_html(full_html=False, include_plotlyjs=\"cdn\")\n n50_bar.save(settings)\n return [n50_bar]", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return", "def oneNumBar(df, colName):\n bins = pd.qcut(x=df[colName[0]], q=15, duplicates='drop')\n ax = bins.value_counts()\n bins = bins.cat.as_ordered()\n bins = bins.cat.categories\n bounds = bins.left \n bounds = list(bounds)\n bounds.append(bins[len(bounds)-1].right)\n texts = []\n for x,y in zip(bounds[0::],bounds[1::]):\n texts.append(\"(\" + str(x) + \", \" + str(y) + \"]\") \n barData = [go.Bar(x=texts, \n y=ax,\n marker=dict(\n color = '#92c5de',\n opacity=0.8)\n )] \n layout = go.Layout(\n title=\"Bar Plot Showing Count of Values for \" + str(colName[0]),\n xaxis=dict(\n title= colName[0]\n ),\n yaxis=dict(\n title= \"NUMBER OF RECORDS\", \n )\n )\n fig = go.Figure(data=barData, layout=layout)\n return {\"label\":\"Frequency\", \"plot\":fig}", "def pivot_num(data: PandasDF, var: str, performance: str = 'bad_ind',\n n: int = 10, ks: float = True, max_ks_only: float = False) -> PandasDF:\n temp = data.loc[:, [var, performance]].copy()\n temp_missing = temp.loc[temp[var].isnull(), :]\n temp_noMissing = temp.loc[~temp[var].isnull(), :]\n temp_noMissing.sort_values(var, inplace=True)\n length = round(temp_noMissing.shape[0]/n)\n\n group = temp_noMissing.groupby(np.arange(temp_noMissing.shape[0]) // length).apply(\n lambda obj: pd.Series({\n 'var': var,\n 'level': str(obj[var].min()) + ' - ' + str(obj[var].max()),\n 'bad rate': obj[performance].mean(),\n 'count': len(obj[performance])\n }))\n group_missing = pd.DataFrame({\n 'var': var,\n 'level': np.nan,\n 'bad rate': temp_missing[performance].mean(),\n 'count': temp_missing.shape[0],\n 'ks': np.nan\n }, index=[n+1, ])\n # temp = group[['bad rate', 'count']].copy()\n if ks or max_ks_only:\n group['bad'] = [r * c for r, c in zip(group['bad rate'], group['count'])]\n group['cum_bad'] = [sum(group.loc[0:i, 'bad']) for i in range(group.shape[0])]\n group['cum_count'] = [sum(group.loc[0:i, 'count']) for i in range(group.shape[0])]\n group['cum_good'] = [c - b for c, b in zip(group['cum_count'], group['cum_bad'])]\n group['ks'] = [\n (100 * abs(g/group.loc[group.shape[0]-1, 'cum_good'] - b/group.loc[group.shape[0]-1, 'cum_bad']))\n for g, b in zip(group.cum_good, group.cum_bad)]\n max_index = group['ks'].idxmax()\n if max_ks_only:\n return group.loc[[max_index, ], ['var', 'ks']]\n group['ks'] = ['%.1f%%' % x for x in group['ks']]\n group = group.append(group_missing)\n group['bad rate'] = ['%.2f%%' % (x * 100) for x in group['bad rate']]\n\n group.style.applymap(highlight, subset=pd.IndexSlice[max_index, ['ks']])\n\n return group[['var', 'level', 'bad rate', 'count', 'cum_bad', 'cum_good', 'ks']]\n else:\n group = group.append(group_missing[['var', 'level', 'bad rate', 'count']])\n group.rename(columns={'bad rate': 'avg %s' % performance}, inplace=True)\n return group[['var', 'level', 'avg %s' % performance, 'count']]", "def tail(self, n: int = 5) -> 'DataFrame':\n return self[-n:, :] # type: ignore", "def barplot(self, x = \"Predictor\", color = None, opacity = 1, template = \"ggplot2\", \n has_title = True, barmode=\"stack\", is_horizontal = False, title = None, is_percent = False,\n show_num = False):\n if color: #Produce either a stacked or grouped bar plot\n df_stack = self._df.groupby([x,color]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x, color]).size().groupby(level = 0).apply(lambda \n x:100 * x/float(x.sum())).values\n df_stack.columns = [x, color, 'Count', 'Percentage']\n df_stack['Percentage'] = round(df_stack['Percentage'], 2)\n \n x_clean, df_clean = clean_varname(df_stack, var = x)\n color_clean, df_clean = clean_varname(df_clean, var = color)\n \n if has_title:\n if not title:\n title = f\"Bar Plot of {x_clean} and {color_clean}\"\n else:\n title = None\n \n \n # 8 different variations for how this graph can appear:\n if is_horizontal:\n if is_percent:\n if show_num: #Show percentages on stacked bar graph\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num: #Show counts on stacked bar graph:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if is_percent:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title) \n \n return fig\n \n else: #Create a basic bar plot\n df_stack = self._df.groupby([x]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x]).size().groupby(level = 0).apply(lambda", "def bar_length(stage, fraction):\n x1, y1, x2, y2 = canvas.coords('bar_' + stage)\n canvas.coords(\n 'bar_' + stage,\n 20,\n y1,\n 20 + round(fraction * (width - 40)),\n y2,\n )", "def plot_barplot(\n df: pd.DataFrame,\n feature: str,\n length: int = None,\n cutoff: float = None,\n figsize: tuple = (5, 10),\n ticksize: int = 15,\n) -> pd.DataFrame:\n\n # count each category within the feature\n count = df.groupby(f\"{feature}\")[f\"{feature}\"].count().sort_values(ascending=False)\n # convert it into a dataframe\n df_count = pd.DataFrame(columns=([\"counts\"]))\n df_count.counts = count\n\n # plot barplot\n plt.figure(figsize=figsize)\n x = df_count.counts[:length]\n y = df_count.index[:length]\n b = sns.barplot(x=x, y=y)\n b.set_yticklabels(df_count.index[:length], size=ticksize)\n # add a cut off line\n if cutoff:\n plt.axhline(y=cutoff, linestyle=\"--\")\n plt.show()\n return df_count", "def evenly_spaced_BDs_OLD(BDs, n):\n BDs = BDs.iloc[:,0].tolist()\n BD_min = min(BDs)\n BD_max = max(BDs)\n return np.linspace(BD_min, BD_max, n)", "def stacked_bar_plot(df, cat1, cat2, bar_size=30, nan_colums_thresh=0, figsize=(20, 10), percentile=0.001, plot_flag = 1, normalize = False, sort_bars = False, return_pivot = False):\n \n df_for_pivot = df[[cat1, cat2]].groupby([cat1, cat2]).size().reset_index(name='counts')\n df_pivot = df_for_pivot.pivot(index=cat1, columns=cat2, values='counts')\n \n if normalize == True:\n df_pivot['sum_cols'] = df_pivot.sum(axis = 1)\n # Normalize by the sum of the row\n df_pivot_percent = df_pivot.div(df_pivot.sum_cols, axis=0)\n # Drop the sum column\n df_pivot_clean = df_pivot_percent.drop(columns=['sum_cols'])\n not_nan_positions_ratio = 100\n else:\n # Drops rows having more than nan_colums_thresh Nan values\n # In case of search_term VS search_position, drops rows with all NaN and only search_position = 11 is not NaN\n df_pivot_clean = df_pivot.dropna(thresh=nan_colums_thresh)\n \n # sort the search term index in descending order\n if sort_bars:\n ordered_index = df_pivot_clean.sum(axis=1).sort_values(ascending=False).index\n df_pivot_clean = df_pivot_clean.reindex(ordered_index)\n\n # Calculate the ratio of the informative search terms size compared to the overall size of the different search terms\n not_nan_positions_ratio = 100 * df_pivot_clean.shape[0] / (df_pivot.shape[0])\n \n \n if plot_flag == 1:\n # Choose only the top percentile data to avoid data resolution problems\n # df_to_plot = df_pivot_clean[df_pivot_clean.sum(axis = 1) > df_pivot_clean.sum(axis = 1).quantile(percentile)]\n\n # Choose only the top 30 bars to avoid data resolution problems\n df_to_plot = df_pivot_clean.iloc[0:bar_size, ]\n\n # Stacked bar plot\n df_to_plot.plot.bar(stacked=True, figsize=figsize, cmap=plt.get_cmap('tab20c'))\n if normalize == True:\n plt.ylabel(\"Normalized distribution\", fontsize=15)\n else:\n plt.suptitle(str('Number of samples per '+ cat1 + ' and '+ cat2), fontsize=20, fontweight='bold')\n plt.ylabel('Number of samples', fontsize=15)\n \n if return_pivot == True : \n return (df_pivot_clean, not_nan_positions_ratio)", "def update_bars(self):\n raise NotImplementedError(\"Should implement update_bars()\")", "def process_bars_tasksize(key):\n dt = data[key]\n ts_vals = dt['Tasksize'].drop_duplicates() # number of rows tests.\n\n for ts in ts_vals:\n dt_ts = dt[dt['Tasksize'] == ts] # rows with tasksize == ts\n\n row_vals = dt_ts['Rows'].drop_duplicates()\n\n # Figure\n fig, ax = plt.subplots()\n\n fig.suptitle(\"Tasksize \" + str(ts))\n\n ax.set_xlabel('Nodes')\n ax.set_ylabel('Time')\n ax.grid(axis = 'y', color = 'b', ls = '-.', lw = 0.25)\n\n nbars = len(row_vals)\n width = 1.0 / (nbars + 1)\n offset = - width * nbars / 4 # /2 center /2 half\n\n for rows in row_vals:\n dt_rows = dt_ts[dt_ts['Rows'] == rows]\n\n x = np.arange(dt_rows['worldsize'].size)\n\n ax.bar(x + offset, dt_rows[\"Total time\"], color = 'b', width = width)\n ax.bar(x + offset, dt_rows[\"Algorithm time\"], color = 'r', width = width)\n offset = offset + width\n\n x = dt_ts[dt_ts['Rows'] == rows]['worldsize']\n ax.set_xticks(np.arange(x.size))\n ax.set_xticklabels([str(i) for i in x])\n\n filename = \"Bars_\"+key+\"_\"+str(ts)+\".png\"\n plt.savefig(filename)\n plt.close()\n print(\"Generated: \", filename)", "def bar_plot (data, n_slices, dx = 1, dy = 1, z_max = 1, x_label = 'x',\n y_label='y', z_label='z', elev_angle = 30, azim_angle = 115):\n\n # Initialize the figure object\n fig = plt.figure(figsize = [10, 8])\n ax = fig.add_subplot(111, projection='3d')\n\n # Colors to indicate variation in y-axis\n colors = sns.color_palette('YlGnBu_r', n_colors=n_slices+1)\n\n # Dimensions of the 2d-array\n x_length, y_length = data.shape\n\n # Initial index of the slice\n i_slice = 0\n\n # Iterate through each slice and add bar plots\n for y in np.arange(0, y_length, y_length//n_slices):\n\n # x-, y- and z-positions\n x_pos = np.arange(x_length)*dx\n y_pos = y*np.ones(x_length)*dy\n z_pos = np.zeros(x_length)\n\n # Horizontal dimensions of the bars\n delta_x = dx*np.ones(x_length)\n delta_y = 2*dy*np.ones(x_length)\n\n # Heights in the z-direction\n delta_z = data[:,y]\n\n ax.bar3d(x_pos, y_pos, z_pos, delta_x, delta_y, delta_z,\n color = colors[i_slice])\n\n i_slice = i_slice + 1;\n\n # Add axis labels\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_zlabel(z_label)\n\n # Adjust the 3d viewing angle of the plot\n ax.view_init(elev_angle, azim_angle)\n\n # Set the z-limit of the plot\n z_max = np.min([z_max, np.max(data)])\n ax.set_zlim([0, z_max])\n\n return fig, ax", "def _convert_bar_width(x, width=1, ncols=1):\n # WARNING: This will fail for non-numeric non-datetime64 singleton\n # datatypes but this is good enough for vast majority of cases.\n x_test = np.atleast_1d(_to_ndarray(x))\n if len(x_test) >= 2:\n x_step = x_test[1:] - x_test[:-1]\n x_step = np.concatenate((x_step, x_step[-1:]))\n elif x_test.dtype == np.datetime64:\n x_step = np.timedelta64(1, 'D')\n else:\n x_step = np.array(0.5)\n if np.issubdtype(x_test.dtype, np.datetime64):\n # Avoid integer timedelta truncation\n x_step = x_step.astype('timedelta64[ns]')\n return width * x_step / ncols", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks" ]
[ "0.6623224", "0.5880733", "0.56581855", "0.5601714", "0.5332026", "0.52718735", "0.52266806", "0.5171003", "0.51566476", "0.5070246", "0.5067792", "0.4987947", "0.4968835", "0.49510282", "0.49025717", "0.48347977", "0.47774142", "0.4759826", "0.46863613", "0.4677156", "0.46704686", "0.46412578", "0.46273905", "0.4619714", "0.4618038", "0.4610588", "0.45995718", "0.45780757", "0.4575404", "0.45707652" ]
0.61674595
1
Usage When you call the DescribeDBInstanceEncryptionKey operation, the instance must have transparent data encryption (TDE) enabled in BYOK mode. You can call the [ModifyDBInstanceTDE](~~131267~~) operation to enable TDE.
def describe_dbinstance_encryption_key_with_options( self, request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.encryption_key): query['EncryptionKey'] = request.encryption_key if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeDBInstanceEncryptionKey', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "async def describe_dbinstance_tdeinfo_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_tdeinfo_with_options_async(request, runtime)", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]" ]
[ "0.673887", "0.6716472", "0.65085775", "0.6401395", "0.6124461", "0.6019866", "0.5743563", "0.5593969", "0.5453083", "0.53697926", "0.50822043", "0.5020621", "0.50204563", "0.50042874", "0.4984543", "0.49620974", "0.49530983", "0.48969638", "0.4866419", "0.4756271", "0.4729609", "0.4723395", "0.47055665", "0.47010508", "0.47006", "0.46861228", "0.46778607", "0.46730378", "0.46603808", "0.46564457" ]
0.6771073
0
Usage When you call the DescribeDBInstanceEncryptionKey operation, the instance must have transparent data encryption (TDE) enabled in BYOK mode. You can call the [ModifyDBInstanceTDE](~~131267~~) operation to enable TDE.
def describe_dbinstance_encryption_key( self, request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest, ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse: runtime = util_models.RuntimeOptions() return self.describe_dbinstance_encryption_key_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "async def describe_dbinstance_tdeinfo_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_tdeinfo_with_options_async(request, runtime)", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]" ]
[ "0.6771073", "0.6716472", "0.65085775", "0.6401395", "0.6124461", "0.6019866", "0.5743563", "0.5593969", "0.5453083", "0.53697926", "0.50822043", "0.5020621", "0.50204563", "0.50042874", "0.4984543", "0.49620974", "0.49530983", "0.48969638", "0.4866419", "0.4756271", "0.4729609", "0.4723395", "0.47055665", "0.47010508", "0.47006", "0.46861228", "0.46778607", "0.46730378", "0.46603808", "0.46564457" ]
0.673887
1
You can use the custom key obtained by calling the DescribeUserEncryptionKeyList operation to enable TDE. For more information, see [ModifyDBInstanceTDE](~~131267~~).
def describe_user_encryption_key_list_with_options( self, request: dds_20151201_models.DescribeUserEncryptionKeyListRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.target_region_id): query['TargetRegionId'] = request.target_region_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeUserEncryptionKeyList', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeUserEncryptionKeyListResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def _derive_key_iv(nonce, user_key, settings):\n if settings.ticketCipher == \"aes128gcm\":\n prf_name = \"sha256\"\n prf_size = 32\n else:\n prf_name = \"sha384\"\n prf_size = 48\n\n # mix the nonce with the key set by user\n secret = bytearray(prf_size)\n secret = secureHMAC(secret, nonce, prf_name)\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, user_key, prf_name)\n\n ticket_secret = derive_secret(secret,\n bytearray(b'SessionTicket secret'),\n None, prf_name)\n\n key = HKDF_expand_label(ticket_secret, b\"key\", b\"\", len(user_key),\n prf_name)\n # all AEADs use 12 byte long IV\n iv = HKDF_expand_label(ticket_secret, b\"iv\", b\"\", 12, prf_name)\n return key, iv", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def save_symmetric_key(self, key, user):\n self.temp_passphrase = key\n self.send_request(user, self.KM_TEMP_KEY_ACK)", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def test_create_digital_access_key(self):\n pass", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def create_key ():", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()" ]
[ "0.6331765", "0.5759311", "0.56402856", "0.55784595", "0.55656636", "0.5502756", "0.54750466", "0.5373099", "0.5144797", "0.5122141", "0.5041896", "0.5034966", "0.49652913", "0.49571082", "0.49031255", "0.48929504", "0.47773162", "0.47277826", "0.47213864", "0.46845135", "0.46508694", "0.4631471", "0.4613264", "0.46118814", "0.45919737", "0.45791653", "0.45643374", "0.45619097", "0.45438543", "0.45341885" ]
0.57961434
1
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
def migrate_available_zone_with_options( self, request: dds_20151201_models.MigrateAvailableZoneRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.MigrateAvailableZoneResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.effective_time): query['EffectiveTime'] = request.effective_time if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.vswitch): query['Vswitch'] = request.vswitch if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='MigrateAvailableZone', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.MigrateAvailableZoneResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "async def migrate_available_zone_with_options_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def migrate_available_zone_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return await self.migrate_available_zone_with_options_async(request, runtime)", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run" ]
[ "0.52417934", "0.50473094", "0.5008553", "0.49998647", "0.48307848", "0.47682485", "0.47216007", "0.4666874", "0.46228546", "0.45956224", "0.4590136", "0.45401716", "0.45268676", "0.4509593", "0.45067737", "0.4491239", "0.44909394", "0.4489098", "0.44764355", "0.44562212", "0.44552153", "0.4452588", "0.44209155", "0.4413766", "0.44047692", "0.44022316", "0.43876037", "0.43639293", "0.43613485", "0.43509868" ]
0.5335884
0
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
def migrate_available_zone( self, request: dds_20151201_models.MigrateAvailableZoneRequest, ) -> dds_20151201_models.MigrateAvailableZoneResponse: runtime = util_models.RuntimeOptions() return self.migrate_available_zone_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone_with_options(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "async def migrate_available_zone_with_options_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def migrate_available_zone_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return await self.migrate_available_zone_with_options_async(request, runtime)", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run" ]
[ "0.5335884", "0.50473094", "0.5008553", "0.49998647", "0.48307848", "0.47682485", "0.47216007", "0.4666874", "0.46228546", "0.45956224", "0.4590136", "0.45401716", "0.45268676", "0.4509593", "0.45067737", "0.4491239", "0.44909394", "0.4489098", "0.44764355", "0.44562212", "0.44552153", "0.4452588", "0.44209155", "0.4413766", "0.44047692", "0.44022316", "0.43876037", "0.43639293", "0.43613485", "0.43509868" ]
0.52417934
1
Precautions The instance must be in the Running state when you call this operation. If you call this operation to modify specific instance parameters and the modification for part of the parameters can take effect only after an instance restart, the instance is automatically restarted after this operation is called. You can call the [DescribeParameterTemplates](~~67618~~) operation to query the parameters that take effect only after the instance is restarted.
def modify_parameters_with_options( self, request: dds_20151201_models.ModifyParametersRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyParametersResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.character_type): query['CharacterType'] = request.character_type if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.parameters): query['Parameters'] = request.parameters if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyParameters', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyParametersResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runtime_updatable_params(self) -> pulumi.Output['outputs.RuntimeUpdatableParamsResponse']:\n return pulumi.get(self, \"runtime_updatable_params\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def updateParameters(self):\n\n return", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def modify_parameters(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n ) -> dds_20151201_models.ModifyParametersResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_parameters_with_options(request, runtime)", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def set_parameters(self, params):\n self.kp = params.pgain", "def runtime_updatable_params(self) -> Optional[pulumi.Input['RuntimeUpdatableParamsArgs']]:\n return pulumi.get(self, \"runtime_updatable_params\")", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def _base_troposphere_template(self):\n template = troposphere.Template()\n template.add_parameter(\n troposphere.Parameter(\n \"Stage\",\n Default=\"dev\",\n Description=\"Name of the Stage\",\n Type=\"String\",\n )\n )\n\n template.add_parameter(\n troposphere.Parameter(\n \"Region\",\n Description=\"AWS Region\",\n Type=\"String\",\n )\n )\n return template", "def get_resource_params():\n return Parameter.list()", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def potential_parameters(cls):\n raise NotImplementedError()", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def updateParameters(self, parameters):\n return" ]
[ "0.52970576", "0.5158294", "0.5146255", "0.51394826", "0.5136844", "0.5133746", "0.51167595", "0.5108743", "0.5072519", "0.5060148", "0.5052976", "0.4969164", "0.4969164", "0.4955536", "0.49382296", "0.4936451", "0.4931274", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4902821", "0.4901451", "0.48979196", "0.48969564", "0.48968825" ]
0.53479296
0
This operation can be used to release the internal endpoint of a shard or Configserver node in a sharded cluster instance. For more information, see [Release the endpoint of a shard or Configserver node](~~134067~~). To release the public endpoint of a shard or Configserver node in a sharded cluster instance, you can call the [ReleasePublicNetworkAddress](~~67604~~) operation.
def release_node_private_network_address( self, request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest, ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse: runtime = util_models.RuntimeOptions() return self.release_node_private_network_address_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')", "def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False", "def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def delete_endpoint(EndpointName=None):\n pass", "def test_delete_host_subnet(self):\n pass", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)", "def delete_endpoint_config(EndpointConfigName=None):\n pass", "def test_replace_host_subnet(self):\n pass", "def release_dhcp_port(self, network_id, device_id):\n LOG.debug(\"release_dhcp_port: %s %s\", network_id, device_id)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )", "def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()", "def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})", "def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass", "def test_patch_host_subnet(self):\n pass", "def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)", "def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})", "def unregister_router(self, hostname):" ]
[ "0.5712464", "0.5554131", "0.54412097", "0.524051", "0.52084637", "0.5085088", "0.50104165", "0.49779233", "0.49513933", "0.49257186", "0.4916261", "0.48996267", "0.48951858", "0.48708498", "0.48667976", "0.48482025", "0.47911367", "0.47712603", "0.47670642", "0.4749826", "0.47098243", "0.46974462", "0.46822196", "0.4679282", "0.46725243", "0.46538797", "0.46525046", "0.4645978", "0.4635546", "0.46105352" ]
0.5607081
1
The instance must be in the running state when you call this operation. > The available database versions depend on the storage engine used by the instance. For more information, see [Upgrades of MongoDB major versions](~~398673~~). You can also call the [DescribeAvailableEngineVersion](~~141355~~) operation to query the available database versions. > You cannot downgrade the MongoDB version of an instance after you upgrade it. > The instance is automatically restarted for two to three times during the upgrade process. Make sure that you upgrade the instance during offpeak hours.
def upgrade_dbinstance_engine_version( self, request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest, ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse: runtime = util_models.RuntimeOptions() return self.upgrade_dbinstance_engine_version_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")", "def db_version(engine):\n return IMPL.db_version(engine)", "def db_version():\n return IMPL.db_version()", "def mmo_mongo_version(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"version\"]", "def version(self):\r\n print migration.db_version()", "def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )", "def safe_upgrade():\n goviewbe.upgrade_db(current_app)", "async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db", "def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)", "def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db", "def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")", "def upgrade_to_18():\n\n gear_doc = config.db.singletons.find_one({\"_id\": \"gears\"})\n\n if gear_doc is not None:\n gear_list = gear_doc.get('gear_list', [])\n for gear in gear_list:\n try:\n gears.upsert_gear(gear)\n except Exception as e:\n logging.error(\"\")\n logging.error(\"Error upgrading gear:\")\n logging.error(type(e))\n logging.error(\"Gear will not be retained. Document follows:\")\n logging.error(gear)\n logging.error(\"\")\n\n config.db.singletons.remove({\"_id\": \"gears\"})", "def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'", "def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:[email protected]:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object", "def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")", "def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)", "def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)", "def environment_needs_upgrade(self, db):\n\n return False" ]
[ "0.60887057", "0.57711166", "0.56431997", "0.55289644", "0.5520775", "0.54857856", "0.5430625", "0.5372646", "0.5360392", "0.52892214", "0.5286582", "0.5278977", "0.52780265", "0.52661526", "0.5259783", "0.5211577", "0.51565385", "0.514894", "0.5146001", "0.5137969", "0.5118388", "0.5096248", "0.5089838", "0.5089794", "0.5075383", "0.50698084", "0.5068324", "0.50616986", "0.504583", "0.50418276" ]
0.58272535
1
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
def upgrade_dbinstance_kernel_version( self, request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest, ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse: runtime = util_models.RuntimeOptions() return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def upgrade_dbinstance_kernel_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)", "def upgrade_dbinstance_kernel_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n self.call_api(params, req, runtime)\n )", "async def upgrade_dbinstance_kernel_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)", "def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass", "async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)", "def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)", "def restart_kernel(self, kernel_id, now=False):", "def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")", "def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )", "def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass", "def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass", "def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version", "def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )", "def shutdown_kernel(self, kernel_id, now=False, restart=False):", "async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)", "async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def reboot_instance(InstanceId=None):\n pass", "def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)", "def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])", "def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)", "def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)", "def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)" ]
[ "0.713365", "0.7097949", "0.65763366", "0.6048078", "0.56803346", "0.5636641", "0.55429274", "0.5415511", "0.53714454", "0.5316516", "0.5281482", "0.52180755", "0.5102098", "0.50040174", "0.499213", "0.49542388", "0.49341667", "0.48674172", "0.4845689", "0.4839142", "0.48272142", "0.4824538", "0.48026252", "0.477112", "0.47707742", "0.47630265", "0.47553083", "0.47503594", "0.4742487", "0.47246185" ]
0.79366666
0
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
async def upgrade_dbinstance_kernel_version_async( self, request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest, ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse: runtime = util_models.RuntimeOptions() return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade_dbinstance_kernel_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)", "def upgrade_dbinstance_kernel_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n self.call_api(params, req, runtime)\n )", "async def upgrade_dbinstance_kernel_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)", "def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass", "async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)", "def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)", "def restart_kernel(self, kernel_id, now=False):", "def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")", "def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )", "def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass", "def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass", "def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version", "def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )", "def shutdown_kernel(self, kernel_id, now=False, restart=False):", "async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)", "async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def reboot_instance(InstanceId=None):\n pass", "def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)", "def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])", "def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)", "def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return", "def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)", "def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)" ]
[ "0.7936817", "0.70986426", "0.6577163", "0.60453767", "0.56828487", "0.5638934", "0.55407673", "0.54129595", "0.53686017", "0.5318697", "0.5280949", "0.5216302", "0.5101547", "0.5003827", "0.49964812", "0.49521586", "0.4936471", "0.48645243", "0.48442766", "0.48389387", "0.4830036", "0.4824042", "0.48002774", "0.47697324", "0.47693932", "0.4761148", "0.47581944", "0.47528934", "0.47409526", "0.4722575" ]
0.71337795
1
Use features and result to train Support Vector Machine
def train(features, result): clf = grid_search(result) clf.fit(features, result) return clf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainModel( self, featureTrain, classTrain):", "def trainNewModel():\n print \"Creating feature vectors for trainset...\"\n trainDependencies = getDependency(trainDepFilename)\n trainLabel, trainFeatureVectors = \\\n createFeatureVectors(trainFilename, trainDependencies)\n print \"Length of feature vector for trainset: %d\" \\\n % len(trainFeatureVectors[0])\n if not len(addTrainsetList) == 0:\n print \"Combining feature vectors of additional trainset...\"\n trainLabel, trainFeatureVectors = \\\n combineAdditionalTrainset(\n addTrainsetList, trainLabel, trainFeatureVectors)\n print \"Feature vectors of trainset created.\"\n SVMTrain(trainLabel, trainFeatureVectors, modelFilename)", "def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list", "def svm():", "def train(self, features, labels):\n pass", "def train():\n pass", "def __trainLocal__(self, featureVals, targetVals):\n pass", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def train(self, test_vector):\n\t\twith open(self.PATH + '/src/data/train_emma.csv', 'rt') as f:\n\t\t\treader = csv.reader(f)\n\n\t\t\ttrain_data = dict()\n\t\t\ttrain_data_labels = list()\n\t\t\ttrain_data_list = []\n\t\t\ttrain_data_labels_list = []\n\n\t\t\tnext(reader, None)\n\t\t\tfor row in reader:\n\t\t\t\tfor idx in range(len(row)):\n\t\t\t\t\tif idx == 0:\n\t\t\t\t\t\ttrain_data['file'] = row[idx]\n\t\t\t\t\tif idx == 1:\n\t\t\t\t\t\ttrain_data['line'] = int(row[idx])\n\t\t\t\t\tif idx == 2:\n\t\t\t\t\t\ttrain_data['timestamp'] = row[idx]\n\t\t\t\t\tif idx == 3:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\t\t\t\t\tif idx == 4:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\n\t\t\t\ttrain_data_list.append(train_data)\n\t\t\t\ttrain_data_labels_list.append(train_data_labels)\n\t\t\t\ttrain_data = dict()\n\t\t\t\ttrain_data_labels = list()\n\n\t\t\tC = 0.8\n\t\t\tdict_vectorizer = DictVectorizer(sparse=False)\n\t\t\ttrain_data_trasformed = dict_vectorizer.fit_transform(train_data_list)\n\t\t\ttest_vector_transformed = dict_vectorizer.transform(test_vector)\n\n\t\t\t# print(dict_vectorizer.get_feature_names())\n\t\t\t# print(dict_vectorizer.inverse_transform(train_data_trasformed))\n\n\t\t\t# print('Inverse transformation !!!')\n\t\t\t# print(test_vector)\n\t\t\t# inv_trans = dict_vectorizer.inverse_transform(test_vector_transformed)\n\n\t\t\t# fit LinearSVC\n\t\t\t# multi label binarizer to convert iterable of iterables into processing format\n\t\t\tmlb = MultiLabelBinarizer()\n\t\t\ty_enc = mlb.fit_transform(train_data_labels_list)\n\n\t\t\ttrain_vector = OneVsRestClassifier(svm.SVC(probability=True))\n\t\t\tclassifier_rbf = train_vector.fit(train_data_trasformed, y_enc)\n\n\t\t\t# test_vecc = cnt_vectorizer.fit_transform(X[:, 0])\n\t\t\t# # todo use pickle to persist\n\t\t\t# test_vector_reshaped = np.array(test_vector.ravel()).reshape((1, -1))\n\t\t\tprediction = classifier_rbf.predict(test_vector_transformed)\n\n\n\t\t\tprint(\"Predicted usernames: \\n\")\n\t\t\t# print(prediction)\n\t\t\t# print(mlb.inverse_transform(prediction))\n\n\t\t\tusers = self.parse_prediction(mlb.inverse_transform(prediction))\n\t\t\tprint(users)\n\t\t\treturn users", "def setup_svm_classifier(training_data, y_training, testing_data, features, method=\"count\", ngrams=(1,1)):\n # generate x and y training data\n\n if method == \"count\":\n vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)\n elif method == \"tfidf\":\n vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)\n else:\n print(\"Method has to be either count or tfidf\")\n return 1\n\n # train classifier\n\n model = SVMClassifier_scratch()\n model.fit(x_training, y_training)\n\n return model, vec, x_testing", "def train(self):\n\t\traise NotImplementedError", "def test_machine_learning():", "def train():\n # YOUR TRAINING CODE GOES HERE", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')", "def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)", "def walk_forward_cv(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n optimal_params_by_model = {}\r\n cv_metadata_by_model = {}\r\n cv_predictions_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.cv_params = self.cv_params\r\n svm.test_name = self.test_name\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.run_svm_cv()\r\n optimal_params_by_model['SVM'] = svm.svm_optimal_params\r\n cv_metadata_by_model['SVM'] = svm.metadata\r\n cv_predictions_by_model['SVM'] = svm.svm_cv_predictions\r\n \r\n self.optimal_params_by_output[output_name] = optimal_params_by_model\r\n self.cv_metadata_by_output[output_name] = cv_metadata_by_model\r\n self.cv_predictions_by_output[output_name] = cv_predictions_by_model", "def learn1_svc():\n \n svc.fit(vector_training,sentiment_training) ##fit the training data of vector tweets and sentiments using LinearSVC\n correct = 0\n for i in range(vector_testing.shape[0]): ##using the testing data, see how accurate LinearSVC is\n prediction = svc.predict(vector_testing[i])\n sentiment = sentiment_testing[i]\n if prediction[0] == sentiment:\n correct +=1\n accuracy = correct/vector_testing.shape[0]\n print('Linear Support Vector Classifier Testing Accuracy: {:.2f}'.format(accuracy)) ##print the accuracy of the algorithm", "def train_svm_model(self, X_train, X_test, y_train, y_test):\r\n clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\r\n ('clf', LinearSVC())])\r\n clf = clf.fit(X_train, y_train)\r\n pred = clf.predict(X_test)\r\n print('Confusion matrix\\n',confusion_matrix(y_test,pred))\r\n print('Classification_report\\n',classification_report(y_test,pred))\r\n return clf", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n from sklearn import svm\n \n \"*** YOUR CODE HERE ***\"\n self.sklearn_svm = svm.SVC(C=5, kernel='rbf', gamma=0.005, decision_function_shape='ovo')\n self.sklearn_svm.fit(trainingData, trainingLabels)", "def train(self)->None:", "def train(self):\n raise NotImplementedError", "def train_func(sets,\n\t\t\t\t names=[\"Cake.lie\",\"Cake.lie1\",\"Cake.lie2\",\"Cake.lie3\",\"Cake.lie4\",\"Cake.lie5\",\"Cake.lie6\",\"Cake.lie7\"],\n\t\t\t\t adds=[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t\t\t nfolds=10, cv = None,n_params=100,debug=True):\n\t\n\tret_preds = []\n\n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n\t 'gamma' : [0.001,0.005,0.01,0.05,0.1,0.5,1.0,10.0,100.0],\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"linear\",\"rbf\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVM%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVM%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n\t\n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"linear\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVML%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVML%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n \n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n 'gamma' : expon(scale=.1),\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"rbf\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVMRBF%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVMRBF%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n \n\tret_preds = pd.DataFrame(ret_preds).transpose()\n \n\tret_preds.columns = [\n\t\t\t\t \"%s_SVM_orig%s\" % (names[0],adds[0]),\n\t\t\t\t \"%s_SVML%s\" % (names[1],adds[1]),\n\t\t\t\t \"%s_SVMRBF%s\" % (names[2],adds[2])]\n\n\n\treturn(ret_preds)", "def svm_clf_training(max_features, data):\r\n X_train, y_train, X_test, y_test = data\r\n clf = Pipeline([('feature_selection', SelectKBest(score_func=chi2, k=max_features)),\r\n ('clf', svm.SVC(C=1., kernel='linear'))])\r\n\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True) # unigrams and bigrams\r\n X_matrix_tr = vectorizer.fit_transform(X_train)\r\n # parameters = [{'clf__kernel': ['linear'], 'clf__C': [0.1, 1, 10, 100]},\r\n # {'clf__kernel': ['rbf'], 'clf__C': [0.1, 1, 10, 100], 'clf__gamma': [0.001, 0.01, 0.1]},\r\n # {'clf__kernel': ['poly'], 'clf__C': [0.1, 1, 10, 100], 'clf__degree': [2, 3, 4, 5]}]\r\n # clf = GridSearchCV(svc, parameters, scoring='accuracy')\r\n clf.fit(X_matrix_tr, y_train)\r\n # print(\"Best parameters set found on development set:\")\r\n # print()\r\n # print(clf.best_estimator_)\r\n # print()\r\n # print(\"Grid scores on development set:\")\r\n # print()\r\n # for params, mean_score, scores in clf.grid_scores_:\r\n # print(\"%0.3f (+/-%0.03f) for %r\"\r\n # % (mean_score, scores.std() / 2, params))\r\n # print()\r\n voc = vectorizer.get_feature_names()\r\n # vectorizer1 = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=voc)\r\n # X_matrix_val = vectorizer1.fit_transform(X_test)\r\n # y_pred = clf.predict(X_matrix_val)\r\n\r\n # for i in range(len(X_test)):\r\n # if y_test[i] != y_pred[i]:\r\n # print(X_test[i], y_test[i], y_pred[i])\r\n # print(classification_report(y_test, y_pred))\r\n return clf, voc" ]
[ "0.7079623", "0.6985331", "0.6951296", "0.68263084", "0.67677474", "0.6763887", "0.67388266", "0.66454804", "0.66344404", "0.6605667", "0.6589454", "0.65607995", "0.6531744", "0.6518474", "0.6516882", "0.64959514", "0.6491756", "0.64540136", "0.64505", "0.64383566", "0.64315933", "0.64315933", "0.64315933", "0.64315933", "0.64315933", "0.640507", "0.63803285", "0.63614905", "0.63611233", "0.6356435" ]
0.700062
1
When a user asks for a potential_category, rank the possible categories by relevance and return the top match
def find_match(potential_category: str, categories: List[str]): return process.extractOne(potential_category, categories)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FoodRank(search_term):\n return _ranks[search_term.lower()]", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category", "def _look_in_concordance(self, term, concordance):\n\n suggested = dict()\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n# messagebox.showwarning(\"_look_in_concordance\",\"words={}\".format(words))\n for word in words:\n if word in concordance:\n for item in concordance[word]:\n if item in suggested:\n suggested[item] += 1\n else:\n suggested[item] = 1\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' suggested?\",\"suggested={}\".format(suggested))\n# pass\n rank = sorted(suggested, key=suggested.get, reverse=True)\n for item in rank:\n if item not in self.tree.get_children(term):\n self.tree.insert(term,'end', \\\n values=[self.tree.item(item)['values'][0], \\\n self.tree.item(item)['values'][1]],\\\n text='possible', tags=('suggestions',))\n if len(rank) > 0 and self.tree.parent(term) != 'suggestions':\n for child in self.tree.get_children(term):\n self.tree.item(item, tags='suggestions')\n self.tree.item(term, tags='suggestions')\n self.tree.move(term, 'suggestions', 'end')", "def search_categorie(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.categories, f.categories.id, f.categories.name)\n q.where().equal(f.categories.name, _input)\n categorie_data = j.executeQuery(q)\n\n if categorie_data: \n cat_id, cat_name = categorie_data[0]\n examples = _create_examples(j.list_word_by_categorie, cat_name)\n return SelectorResult('categorie', cat_id, cat_name, *examples)", "def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d", "async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories", "def category(self):\n\n for category, match_list in rule_list:\n for match in match_list:\n if match.match(self):\n return category\n\n return None", "def calculate_rank(category, word_pairs): \n rand_vals = np.random.uniform(-1, 100, size=len(word_pairs)) #random numbers\n ranked = sorted(zip(rand_vals, word_pairs), key=lambda x:x[0], reverse=True) #sorting them\n formatted = []\n for e in ranked:\n prob = '%.1f'%e[0]\n formatted.append('{} \"{}:{}\"'.format(prob, e[1][0],e[1][1]))\n return formatted", "def suggest_categories(self, category, max_results=10, blogid=1):\n return self.execute('wp.suggestCategories', blogid, self.username, self.password, category, max_results)", "def search(query):\n r = requests.get(BASE_URL + str(query))\n page_body = r.text\n # Hand the page source to Beautiful Soup\n\n soup = BeautifulSoup(page_body, 'html.parser')\n \n product_item = soup.select('div.product-info-item')\n if(len(product_item)==0):\n product_item = soup.select('div.cat')\n #get the cateegory\n product_item = product_item[0]\n category = str(product_item.find_all('a'))\n category = category[category.find(\">\")+1:-5]\n \n url = str(soup.findAll('meta',property=\"og:url\"))\n url_splitted = url.split('/')\n print(url)\n #parent_category = url_splitted[4]\n if(len(url)>20):\n parent_category = url_splitted[4]\n else:\n parent_category = None\n\n \n return category, parent_category", "def eval_category(self, u_eval, v_compares):\n # get cosinus sim with k-NN\n # cos_sim_results = [(tag, cos_sim) ... ] \n cos_sim_results = self.kNN(u_eval, v_compares)\n\n # found the most common tag\n c = Counter([tag for tag, _ in cos_sim_results])\n try:\n tag, number = c.most_common(1)[0]\n except IndexError: # No result (cos_sim_results is empty)\n logging.error(\"No results for %s %s\" % (u_eval, cos_sim_results))\n return\n\n # get the cosinus similarity average for the founded tag\n average = 0.0\n for _tag, _number in cos_sim_results:\n if _tag == tag:\n average += _number\n average /= number\n\n logging.debug(\"%s common tag %s (nb %s) (av %s)\" % \\\n (u_eval, tag, number, average))\n\n return tag, average", "def display_by_category(request, category):\n if request.method == \"POST\":\n form = SearchForm(request.POST)\n if form.is_valid():\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category).filter(title__icontains=form.cleaned_data[\"query\"])\n else:\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category)\n else:\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category)\n form = None\n bids = []\n for product in products:\n bid = product.bids.all().aggregate(Max(\"bid\")).get(\"bid__max\")\n bids.append(bid)\n return render(request, \"auctions/index.html\", {\n \"zip_products_bids\": zip(products, bids),\n \"category\": category,\n \"form\": form,\n \"title\": \"Active Listing\",\n })", "def keyword_classifier(utterance):\n categories = {\n 'hello': ['hi ', 'greetings', 'hello', 'what\\'s up', 'hey ', 'how are you?', 'good morning', 'good night',\n 'good evening', 'good day', 'howdy', 'hi-ya', 'hey ya'],\n 'bye': ['bye', 'cheerio', 'adios', 'sayonara', 'peace out', 'see ya', 'see you', 'c ya', 'c you', 'ciao'],\n 'ack': ['okay', 'whatever', 'ok ', 'o.k. ', 'kay ', 'fine '],\n 'confirm': ['is it', 'is that', 'make sure', 'confirm', 'double check', 'check again', 'does it'],\n 'deny': ['dont want', 'don\\'t want', 'wrong', 'dont like', 'don\\'t like'],\n 'inform': ['dont care', 'don\\'t care', 'whatever', 'bakery', 'bar', 'cafe', 'coffeeshop', 'pub', 'restaurants',\n 'roadhouse', 'african',\n 'american', 'arabian', 'asian', 'international', 'european', 'central american', 'middle eastern',\n 'world', 'vegan', 'vegetarian', 'free', 'kosher', 'traditional', 'fusion', 'modern', 'afghan',\n 'algerian', 'angolan', 'argentine',\n 'austrian', 'australian', 'bangladeshi', 'belarusian', 'belgian', 'bolivian', 'bosnian',\n 'herzegovinian', 'brazilian', 'british', 'bulgarian', 'cambodian',\n 'cameroonian', 'canadian', 'cantonese', 'catalan', 'caribbean', 'chadian', 'chinese', 'colombian',\n 'costa rican', 'czech', 'congolese', 'cuban', 'danish', 'ecuadorian', 'salvadoran', 'emirati',\n 'english', 'eritrean',\n 'estonian',\n 'ethiopian', 'finnish', 'french', 'german', 'ghanaian', 'greek', 'guatemalan', 'dutch', 'honduran',\n 'hungarian', 'icelandic',\n 'indian', 'indonesian', 'iranian', 'iraqi', 'irish', 'israeli', 'italian', 'ivorian', 'jamaican',\n 'japanese',\n 'jordanian', 'kazakh', 'kenyan', 'korean', 'lao', 'latvian', 'lebanese', 'libyan', 'lithuanian',\n 'malagasy', 'malaysian',\n 'mali', 'mauritanian', 'mediterranean', 'mexican', 'moroccan', 'namibian', 'new zealand',\n 'nicaraguan',\n 'nigerien', 'nigerian', 'norwegian', 'omani', 'pakistani', 'panamanian', 'paraguayan', 'peruvian',\n 'persian', 'philippine', 'polynesian', 'polish', 'portuguese', 'romanian', 'russian', 'scottish',\n 'senegalese', 'serbian',\n 'singaporean', 'slovak', 'somalian', 'spanish', 'sudanese', 'swedish', 'swiss', 'syrian', 'thai',\n 'tunisian', 'turkish',\n 'ukranian', 'uruguayan', 'vietnamese', 'welsh', 'zambian', 'zimbabwean', 'west', 'north', 'south',\n 'east', 'part of town', 'moderate', 'expensive', 'cheap', 'any ', 'priced', 'barbecue', 'burger',\n 'chicken',\n 'doughnut', 'fast food',\n 'fish and chips', 'hamburger', 'hot dog', 'ice cream', 'noodles', 'pasta', 'pancake', 'pizza',\n 'ramen', 'restaurant', 'seafood', 'steak',\n 'sandwich', 'sushi'],\n 'negate': ['no ', 'false', 'nope'],\n 'repeat': ['repeat', 'say again', 'what was that'],\n 'reqalts': ['how about', 'what about', 'anything else'],\n 'reqmore': ['more', 'additional information'],\n 'request': ['what', 'whats' 'what\\'s', 'why', 'where', 'when', 'how much', 'may', 'address', 'post code',\n 'location', 'phone number'],\n 'restart': ['reset', 'start over', 'restart'],\n 'thankyou': ['thank you', 'cheers', 'thanks'],\n 'affirm': ['ye ', 'yes', 'right ']\n }\n\n classification = []\n sentence_to_classify = utterance.lower()\n for category, keywords in categories.items():\n keywords_found = [keyword for keyword in keywords if keyword in sentence_to_classify]\n if len(keywords_found) > 0: classification.append(category)\n\n return classification if len(classification) > 0 else ['null']", "def sort_by_ratings():\n\n print(\"***** Find Businesses by Categories Sorted by Rate *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10).sort(\"stars\", -1)\n\n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print(f'Stars: {business_object[\"stars\"]}')\n print_business(business_object)", "def search_substitute(product):\r\n cursor.execute('USE openfoodfacts;')\r\n # Make a string with the categories used in the query\r\n search = product.category\r\n # Other variable\r\n product_name = product.name\r\n product_score = product.nutri_score\r\n\r\n cursor.execute(\"\"\"SELECT Food.id, Food.name, categories_id, nutri_score, url, stores \\\r\n FROM Food \\\r\n INNER JOIN Categories ON Food.categories_id = Categories.name\\\r\n WHERE categories_id LIKE %s AND Food.name NOT LIKE %s \\\r\n AND Food.nutri_score <= %s \"\"\", (search, product_name, product_score))\r\n substitute = cursor.fetchone()\r\n try:\r\n return cl.Food(substitute)\r\n except TypeError:\r\n print(\"Désolé, il n'y a pas de substitut pour ce product...\")", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def category_match(self,single_word, input_dataframe):\n temp_set = self.index_dict[single_word]\n category_list = list(temp_set)\n filtered_df = input_dataframe.loc[category_list]\n return filtered_df", "def get_topk_terms(self, k, label, term_type='w'):\n counter = Counter()\n # Retrieve reviews with the input label\n label_reviews = self.df[self.df[self.truth_col] == label][self.review_col]\n for review in label_reviews:\n terms = self.tokenize(review, term_type)\n counter.update(terms)\n\n return counter.most_common(k)", "def rank_results(result_list, search_title, search_artist, uploader_list):\n #scores = []\n #search_artist = search_artist.replace(\"+\", \" \").lower()\n search_title = search_title.replace(\"+\", \" \")\n #search_terms = search_title.split() + search_artist.split()\n\n ## Give score to each result\n #for index, title in enumerate(result_list):\n # title = title.lower()\n # score = 0\n\n # # One point for each word in result title\n # for term in search_terms:\n # if term in title:\n # score += 1\n\n # # 2 points if whole title in result, 2 points for whole artist, 4 points for both\n # if search_title in title:\n # score += 2\n # if search_artist in title:\n # score += 2\n # if search_title in title and search_artist in title:\n # score += 4\n # if search_title == title and (uploader_list[index] == search_artist+\" - topic\" or uploader_list[index] == 'various artists - topic' or uploader_list[index] == search_artist or uploader_list[index] == search_artist+'\\\\xa0'):\n # score += 100\n # if 'karaoke' in title:\n # score-=1000\n\n # scores.append(score)\n\n # return scores.index(max(scores))\n for index, title in enumerate(result_list):\n title = title\n if search_title == title:\n return index\n\n return 0", "def get_closest(list_of_nearby, favorite_place):\n\tref_rating = float(favorite_place[\"rating\"]) # this is a float\n\tref_price_len = len(favorite_place[\"price\"]) # this is the length of the dollar sign - an int\n\tref_categ = favorite_place[\"categories\"] # this is a string!\n\n\tfor item in list_of_nearby:\n\t\tscore = 0\n\t\tlist_of_cat_words = item[categories].split()\n\t\tfor word in list_of_cat_words:\n\t\t\tif word in ref_categ:\n\t\t\t\tscore += 1\n\t\tscore = score * 5\n\t\tscore = score - 2 * abs(len(item[\"price\"]) - ref_price_len)\n\t\tscore = score - 10 * abs(float(item[\"rating\"]) - ref_rating)\n\t\titem[\"score\"] = score\n\n\tfor item in list_of_nearby:\n\t\treturn_list = []\n\t\treturn_list.append({\"id\": item[\"id\"], \"score\": item[\"score\"]})\n\n\treturn_list = sorted(return_list, key = lambda i: i[\"score\"])\n\treturn return_list", "async def search(self, ctx: Context, category: str, *, query: str) -> None:\n if category not in config.basic_search_categories:\n await ctx.send(f\"Invalid Category! ```Available Categories : {', '.join(config.basic_search_categories)}```\")\n return\n await self._basic_search(ctx, query, category)", "def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def CategoryScore(Category):\r\n \r\n Category = pd.read_excel('OutdoorScores.xlsx', Category , \r\n usecols=[0,1,2,3,4])\r\n ResultCategory = Category.sort_values(['Score','Golds','Hits'],\r\n ascending=[False,False,False],na_position='last')\r\n ResultCategory = ResultCategory.reset_index(drop=True)\r\n N=0\r\n for i in range(100):\r\n N += 1\r\n if pd.isnull(Category.loc[N,'Name']) == True: \r\n # looks at row N, column 'Name'\r\n break\r\n return ResultCategory[0:N] # if the cell is NaN, stops at row N\r", "def searchPoses():\n\n # TODO: how to account for substrings too? postgres doesn't seem to do this...maybe algolia is better\n if request.args:\n keyword = request.args.get('keyword')\n\n difficulty = request.args.getlist('difficulty') # list of difficulty\n if not difficulty: # if the list is empty\n difficulty = ['Beginner', 'Intermediate', 'Expert']\n\n categories = request.args.getlist('categories') # list of categories\n if not categories:\n all_cat_ids = db.session.query(Category.cat_id).all() # returns a list of tuples of all the ids\n categories = [category[0] for category in all_cat_ids] # converts that to a list\n \n query = db.session.query(Pose).join(PoseCategory)\n query = search(query, keyword, sort=True) # sort the search results by ranking\n all_poses = query.filter(Pose.difficulty.in_(difficulty),PoseCategory.cat_id.in_(categories)).order_by(Pose.name).all()\n \n else:\n all_poses = Pose.query.order_by('name').all()\n\n # make a dictionary of all the counts for the category and difficulty\n # TODO: try doing a subquery in SQLAlchemy for better performance?\n # https://stackoverflow.com/questions/38878897/how-to-make-a-subquery-in-sqlalchemy\n difficulty_counts = {'Beginner':0, 'Intermediate': 0, 'Expert':0}\n category_counts = {}\n for pose in all_poses:\n difficulty_counts[pose.difficulty] += 1\n pose_categories = pose.pose_categories # a list of pose_categories\n for pose_cat in pose_categories:\n if pose_cat.cat_id not in category_counts:\n category_counts[pose_cat.cat_id] = 0\n category_counts[pose_cat.cat_id] +=1\n\n all_categories = Category.query.order_by('name').all()\n\n return render_template(\"search.html\", \n all_poses=all_poses, \n categories=all_categories,\n difficulty_counts=difficulty_counts,\n category_counts=category_counts)", "def getBest(self, category):\n if category == 'Accuracy':\n index = np.argmax(self.trainAcc)\n elif category == 'Error':\n index = np.argmin(self.trainError)\n\n return self.trainError[index], self.trainAcc[index], self.w[index]", "def search_by_term():\n body = request.get_json()\n term = body.get('searchTerm', '')\n current_category = None\n\n if term == '':\n abort(422)\n\n questions = Question.query.filter(Question.question.ilike('%'+term+'%')).all()\n formatted_questions = [question.format() for question in questions]\n\n if len(formatted_questions) > 0:\n current_category = formatted_questions[0]['category']\n\n return jsonify({\n 'success': True,\n 'questions': formatted_questions,\n 'total_questions': len(formatted_questions),\n 'current_category': current_category\n })", "def suggest(ctx, request: str):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n suggestion = replacer.suggest(request)\n if suggestion == request:\n logger.info(\n 'Result from Google Knowledge Graph equals input: \"{0}\"', request,\n )\n elif suggestion:\n logger.info('Result from Google Knowledge Graph: \"{0}\"', suggestion)\n else:\n logger.info(\n 'No results in the Google Knowledge Graph for: \"{0}\"', request,\n )", "def topKSimilar(self,word,k = 5,maxDistance = 5):\n r = self.__search(word,self.root,maxDistance)\n return sorted(r.items(),key = lambda x:x[1])[0:k]" ]
[ "0.5779498", "0.5776032", "0.57445335", "0.5656867", "0.56513745", "0.5642086", "0.5516284", "0.5513393", "0.5420318", "0.54119575", "0.53935814", "0.5359446", "0.532522", "0.5297809", "0.5292416", "0.5278605", "0.52570313", "0.5227728", "0.52234787", "0.52224135", "0.52097696", "0.5199643", "0.517219", "0.5162983", "0.5162358", "0.5160729", "0.51602113", "0.5118577", "0.5099927", "0.5082551" ]
0.66086555
0
Gets the is_bot of this UserBase. A boolean specifying whether the user is a bot or full account.
def is_bot(self): return self._is_bot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_bot(self) -> bool:\n if self._bot is not None:\n return hasattr(self, 'ubot')\n return bool(Config.BOT_TOKEN)", "def is_bot(self) -> undefined.UndefinedOr[bool]:", "def bot_type(self):\n return self._bot_type", "def is_bot(self) -> bool:", "def bot(self):\n return self._bot", "def is_bot(self, is_bot):\n\n self._is_bot = is_bot", "def is_active_user(self):\n\n return self.is_active", "def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)", "def isNew(self):\n bot = self.storage.find_one({\"user\": self.user_id})\n if not bot:\n return True\n return False", "def is_bot(self, pid:int) -> bool:\n\t\treturn pid in self._agents", "def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default_bot_for_cog_svc_account\")", "def is_active(self):\n return self.user.is_active", "def is_active(self):\n return self.status == ACTIVE_USER", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def is_bot(environ, is_bot_ua=is_bot_ua):\n if is_bot_ua(environ.get('HTTP_USER_AGENT', '')):\n return True\n if 'HTTP_ACCEPT_LANGUAGE' not in environ:\n return True\n return False", "def is_logged_in(self):\n return self.__is_logged_in", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def is_superuser(self):\n return self.is_admin", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_usermanager(self):\n return False", "def auth_enabled(self):\n\n return self._api_manager.auth_enabled()", "def get_is_ai(self):\n return self.__is_ai", "def logged_in(self):\n return self.user is not None", "def is_turbo(self) -> bool:\n return self.turbo", "def user_verified(self):\n return self.user.verified", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def get_all_bots(self):\n\t\ttry:\n\t\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\t\tconn.row_factory = sqlite3.Row\n\t\t\tc \t\t\t\t = conn.cursor()\n\t\t\tc.execute('SELECT * FROM bots')\n\t\t\tall_bots = c.fetchall()\n\t\t\treturn all_bots\t\t\t\t\t\t\t\t# list(all_bots) = [<sqlite3.Row object at 0x000001BB27302FD0>, <sqlite3.Row object at 0x000001BB27302CB0>,...]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# [{pair:'LTCBTC', 'is_active'=True, ...}, {pair:'ETHBTC, 'is_active'=True, ...}]\n\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False", "def get_isenabled(self):\n return self.isenabled", "def get_has_password(self, user):\n return True if user.password else False" ]
[ "0.774403", "0.6506757", "0.6466639", "0.63527393", "0.6351612", "0.59704244", "0.5953491", "0.5796303", "0.5599795", "0.55755657", "0.55704886", "0.5508024", "0.5390804", "0.5384107", "0.5262253", "0.52469635", "0.52303284", "0.522477", "0.5224286", "0.5201915", "0.5175014", "0.5174314", "0.51557016", "0.5155061", "0.5142407", "0.51405805", "0.5131339", "0.5123091", "0.5119839", "0.5115879" ]
0.7682634
1
Sets the is_bot of this UserBase. A boolean specifying whether the user is a bot or full account.
def is_bot(self, is_bot): self._is_bot = is_bot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_bot(self):\n return self._is_bot", "def is_bot(self) -> bool:\n if self._bot is not None:\n return hasattr(self, 'ubot')\n return bool(Config.BOT_TOKEN)", "def bot_type(self, bot_type):\n\n self._bot_type = bot_type", "def set_is_ai(self, is_ai):\n self.__is_ai = is_ai", "def is_bot(self) -> undefined.UndefinedOr[bool]:", "def is_bot(self) -> bool:", "def bot_type(self):\n return self._bot_type", "def is_business(self, is_business):\n\n self._is_business = is_business", "def add_bot(self, bot):\n self.add_user(bot)", "def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default_bot_for_cog_svc_account\")", "def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin", "def ban_user(self, session, chat_id: int) -> None:\n\n user = session.query(User).get(chat_id)\n if user and user.is_banned is False:\n user.is_banned = True\n session.commit()", "def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)", "def bot(self):\n return self._bot", "def boolean(self, boolean):\n\n self._boolean = boolean", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)", "def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)", "def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)", "def set(self, boolean):\n self._val = boolean", "def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)", "def is_bot(self, pid:int) -> bool:\n\t\treturn pid in self._agents", "def is_user_event(self, is_user_event):\n self._is_user_event = is_user_event", "def setBot(self, present, dir=-1):\n if dir == -1:\n # print(\"Bot set present at current node: \" + str(present))\n self.botPresent = present\n else:\n self.dirNodes[dir].botPresent = present", "def set_is_default_org(self, is_default_org):\n self.is_default_org = is_default_org", "def is_voice_roaming_enabled(self, is_voice_roaming_enabled):\n\n self._is_voice_roaming_enabled = is_voice_roaming_enabled", "def set_bool_attribute(self, id: str, b: Optional[bool]):\n self.set_attribute(id, None if not b else ConstInt(1))" ]
[ "0.6343722", "0.6289822", "0.58650166", "0.57027304", "0.544486", "0.5285852", "0.51772743", "0.5169832", "0.50523525", "0.5034742", "0.5024811", "0.50076777", "0.48692012", "0.48260602", "0.48116446", "0.4810998", "0.48001114", "0.4750782", "0.47010517", "0.46964487", "0.46854365", "0.4650688", "0.46459943", "0.4630177", "0.46200874", "0.4600986", "0.45955223", "0.45897764", "0.4569611", "0.45645013" ]
0.7644477
0
Gets the avatar_url of this UserBase.
def avatar_url(self): return self._avatar_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()", "def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'", "def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")", "def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")", "def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)", "def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'", "def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off", "def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)", "def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url", "def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )", "def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )", "def avatar_id(self):\n return self._avatar_id", "def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url", "def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture", "def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar", "def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'", "def app_avatar(self):\n with open(self.AVATAR_PATH, 'rb') as handle:\n return handle.read()", "def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url", "def image_url(self) -> str:\n return self._image_url", "def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )", "def get_photo_url(self):\n try:\n return self.profile_data[\"photoUrl\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve photo url: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)", "def get_avatar_url_for_user(user_id: UserID) -> str | None:\n avatar_urls_by_user_id = get_avatar_urls_for_users({user_id})\n return avatar_urls_by_user_id.get(user_id)", "def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)", "def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar", "def get_profile_picture_url(cls, filename):\n if filename is None:\n return None\n profile_picture = bucket.blob('images/users/'+filename)\n if profile_picture.exists():\n profile_picture.make_public()\n return profile_picture.public_url\n return None", "def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)", "def get_thumb_url(self):\n return self.thumb_url", "def get_thumbnail_url(self):\n return self.thumbnail_url", "def image_url(self) -> str:\n return pulumi.get(self, \"image_url\")", "def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g" ]
[ "0.83125377", "0.80434006", "0.79306704", "0.7921607", "0.7852214", "0.7280996", "0.7254777", "0.7066657", "0.6957944", "0.69364053", "0.6856294", "0.6781074", "0.6680307", "0.6594353", "0.6536579", "0.6501867", "0.634738", "0.6297715", "0.6278013", "0.6261349", "0.625929", "0.62500554", "0.61314595", "0.6099951", "0.6088467", "0.60858476", "0.60179067", "0.6008921", "0.5938852", "0.5938551" ]
0.8626505
0
Sets the avatar_url of this UserBase.
def avatar_url(self, avatar_url): self._avatar_url = avatar_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'", "def avatar_url(self):\n return self._avatar_url", "def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()", "def profile_image_url(self, profile_image_url):\n\n self._profile_image_url = profile_image_url", "def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")", "def avatar_id(self, avatar_id):\n\n self._avatar_id = avatar_id", "async def avatar(self, url):\n # [p]set avatar <url>\n\n try:\n async with self.session.get(url) as r:\n data = await r.read()\n await self.bot.edit_profile(avatar=data)\n await self.bot.say(\"Done.\")\n log.debug(\"Changed avatar.\")\n except Exception as e:\n await self.bot.say(\"Error, check your console or logs for \"\n \"more information.\")\n log.exception(e)\n traceback.print_exc()", "def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))", "async def utils_set_avatar(self, ctx, url: str=None):\r\n if url is None:\r\n if not ctx.message.attachments:\r\n return await ctx.say(\"No avatar found! \"\r\n \"Provide an Url or Attachment!\")\r\n else:\r\n url = ctx.message.attachments[0].get(\"url\")\r\n\r\n ext = url.split(\".\")[-1]\r\n mime = mimetypes.types_map.get(ext)\r\n if mime is not None and not mime.startswith(\"image\"):\r\n # None can still be an image\r\n return await ctx.send(\"Url or Attachment is not an Image!\")\r\n\r\n async with aiohttp.ClientSession() as s, s.get(url) as r:\r\n if 200 <= r.status < 300:\r\n content = await r.read()\r\n else:\r\n return await ctx.send(\"Invalid Response code: {}\"\r\n .format(r.status_code))\r\n\r\n try:\r\n await self.amethyst.user.edit(avatar=content)\r\n except BaseException: # I don't know the exact Exception type\r\n return await ctx.send(\"Avatar was too big or not an image!\")\r\n\r\n await ctx.send(\"Successfully updated avatar!\")", "def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)", "def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )", "def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")", "def set_avatar(self, asset_id):\n # Implemented from template for osid.resource.ResourceForm.set_avatar_template\n if self.get_avatar_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_id(asset_id):\n raise errors.InvalidArgument()\n self._my_map['avatarId'] = str(asset_id)", "def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"", "def SetAvatar(self, fileName):\n\n self.__PostFile(\"/avatar\", fileName, \"avatar[file]\")", "def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'", "def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )", "def registerAvatar(self, avatar):\r\n assert self._avatar is None\r\n self._avatar = avatar", "def avatar_version(self, avatar_version):\n\n self._avatar_version = avatar_version", "def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url", "def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar", "async def async_set_media_image_url(self, url):\n self._media_image_url = url", "async def set_avatar(self, avatar_id, delay=0, lifespan=math.inf):\n await self.add_output(\n \"|/avatar {}\".format(avatar_id), delay=delay, lifespan=lifespan\n )", "async def avatarurl(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n embed = discord.Embed(description=f\"{target} Profile Picture\")\n embed.set_image(url=str(target.avatar_url))\n await ctx.send(embed=embed)", "def url(self, image_url):\n\n self._url = image_url", "def user_url(self, user_url):\n\n self._user_url = user_url", "def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url", "def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off", "def set_image(self, **kwargs):\n self.image = kwargs.get('url')", "def image_url(self, image_url):\n\n self._image_url = image_url" ]
[ "0.7171562", "0.7052809", "0.6945081", "0.65909845", "0.65333134", "0.64174134", "0.63824266", "0.6325658", "0.6313757", "0.62597895", "0.62147564", "0.6207663", "0.61743844", "0.6130403", "0.5995444", "0.59891415", "0.59879833", "0.5912891", "0.5886393", "0.5850929", "0.58470213", "0.5778821", "0.57513624", "0.5595089", "0.55389977", "0.55209386", "0.5502915", "0.549757", "0.54379195", "0.5411947" ]
0.83707076
0
Gets the avatar_version of this UserBase. Version for the user's avatar. Used for cachebusting requests for the user's avatar. Clients generally shouldn't need to use this; most avatar URLs sent by Zulip will already end with `?v={avatar_version}`.
def avatar_version(self): return self._avatar_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avatar_version(self, avatar_version):\n\n self._avatar_version = avatar_version", "def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)", "def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")", "def avatar_url(self):\n return self._avatar_url", "def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()", "def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'", "def avatar_id(self):\n return self._avatar_id", "def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'", "def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off", "def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")", "def app_avatar(self):\n with open(self.AVATAR_PATH, 'rb') as handle:\n return handle.read()", "def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url", "def GetAvatarForUser(self, userName):\n\n return self.__GetJsonOrNone(\"/users/\"+userName+\"/avatar\", False)", "def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'", "def get_version(self):\n args = {\"access_token\": self.access_token}\n try:\n response = self.session.request(\n \"GET\",\n FACEBOOK_GRAPH_URL + self.version + \"/me\",\n params=args,\n timeout=self.timeout,\n proxies=self.proxies,\n )\n except requests.HTTPError as e:\n response = json.loads(e.read())\n raise GraphAPIError(response)\n\n try:\n headers = response.headers\n version = headers[\"facebook-api-version\"].replace(\"v\", \"\")\n return str(version)\n except Exception:\n raise GraphAPIError(\"API version number not available\")", "def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )", "def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)", "def latest_image_version(self) -> str:\n return self._latest_image_version", "def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )", "def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )", "def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url", "def avatar_hash(self) -> typing.Optional[str]:", "def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar", "def current_image_version(self) -> str:\n return self._current_image_version", "def get_avatar_id(self):\n # Implemented from template for osid.resource.Resource.get_avatar_id_template\n if not self._my_map['avatarId']:\n raise errors.IllegalState('this Resource has no avatar')\n else:\n return Id(self._my_map['avatarId'])", "def getAvatarInfo(self):\n return \", \".join(self._get_avatar_info())", "def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"", "def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))", "def get_avatar(request, username):\n try:\n user = User.objects.get(username=username)\n file = user.get_profile().avatar.file\n except:\n file = open(os.path.join(settings.MEDIA_ROOT, \"avatars\", \"default.png\"))\n ext = file.name.split('.')[-1]\n return HttpResponse(file, mimetype=\"image/\"+ext)", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''" ]
[ "0.6582307", "0.6567599", "0.627431", "0.62414306", "0.6070075", "0.60053706", "0.5953287", "0.5907676", "0.59010553", "0.5892529", "0.58835626", "0.58578086", "0.58211213", "0.55421185", "0.5502481", "0.54706377", "0.5448654", "0.5431678", "0.540974", "0.53088725", "0.5273445", "0.5252839", "0.5223232", "0.51925504", "0.5183061", "0.5154204", "0.51488006", "0.5133219", "0.5133151", "0.51166785" ]
0.80662423
0
Sets the avatar_version of this UserBase. Version for the user's avatar. Used for cachebusting requests for the user's avatar. Clients generally shouldn't need to use this; most avatar URLs sent by Zulip will already end with `?v={avatar_version}`.
def avatar_version(self, avatar_version): self._avatar_version = avatar_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avatar_version(self):\n return self._avatar_version", "def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url", "def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))", "def avatar_id(self, avatar_id):\n\n self._avatar_id = avatar_id", "def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"", "async def avatar(self, url):\n # [p]set avatar <url>\n\n try:\n async with self.session.get(url) as r:\n data = await r.read()\n await self.bot.edit_profile(avatar=data)\n await self.bot.say(\"Done.\")\n log.debug(\"Changed avatar.\")\n except Exception as e:\n await self.bot.say(\"Error, check your console or logs for \"\n \"more information.\")\n log.exception(e)\n traceback.print_exc()", "def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'", "async def utils_set_avatar(self, ctx, url: str=None):\r\n if url is None:\r\n if not ctx.message.attachments:\r\n return await ctx.say(\"No avatar found! \"\r\n \"Provide an Url or Attachment!\")\r\n else:\r\n url = ctx.message.attachments[0].get(\"url\")\r\n\r\n ext = url.split(\".\")[-1]\r\n mime = mimetypes.types_map.get(ext)\r\n if mime is not None and not mime.startswith(\"image\"):\r\n # None can still be an image\r\n return await ctx.send(\"Url or Attachment is not an Image!\")\r\n\r\n async with aiohttp.ClientSession() as s, s.get(url) as r:\r\n if 200 <= r.status < 300:\r\n content = await r.read()\r\n else:\r\n return await ctx.send(\"Invalid Response code: {}\"\r\n .format(r.status_code))\r\n\r\n try:\r\n await self.amethyst.user.edit(avatar=content)\r\n except BaseException: # I don't know the exact Exception type\r\n return await ctx.send(\"Avatar was too big or not an image!\")\r\n\r\n await ctx.send(\"Successfully updated avatar!\")", "def SetAvatar(self, fileName):\n\n self.__PostFile(\"/avatar\", fileName, \"avatar[file]\")", "def set_avatar(self, asset_id):\n # Implemented from template for osid.resource.ResourceForm.set_avatar_template\n if self.get_avatar_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_id(asset_id):\n raise errors.InvalidArgument()\n self._my_map['avatarId'] = str(asset_id)", "def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'", "def avatar_url(self):\n return self._avatar_url", "def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )", "def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()", "def test_resource_user_resource_change_user_avatar_patch(self):\n pass", "async def update_version(self, version: int):\n async with open(self.__file_name, mode=\"r\") as auth_file:\n tag_data = json.loads(await auth_file.read())\n await auth_file.close()\n async with open(self.__file_name, mode=\"w\") as auth:\n tag_data[\"version\"] = version\n await auth.write(json.dumps(tag_data, indent=2, sort_keys=True))\n await auth.close()\n self.__version = version", "def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )", "def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")", "async def set_avatar(self, avatar_id, delay=0, lifespan=math.inf):\n await self.add_output(\n \"|/avatar {}\".format(avatar_id), delay=delay, lifespan=lifespan\n )", "def registerAvatar(self, avatar):\r\n assert self._avatar is None\r\n self._avatar = avatar", "def rotate_avatar(instance, **kwargs):\n if instance.avatar and not instance.is_avatar_rotated:\n rotate_user_avatar.apply_async(kwargs={'user_id': instance.pk})", "def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)", "def ip_version(self, ip_version):\n\n self._ip_version = ip_version", "def avatar_id(self):\n return self._avatar_id", "def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")", "def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)", "def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url", "def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar", "def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url", "def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'" ]
[ "0.6936227", "0.649719", "0.59142345", "0.5799783", "0.57989764", "0.5582525", "0.54501593", "0.5385766", "0.5370928", "0.5284951", "0.5270443", "0.5203477", "0.5187659", "0.51164407", "0.5090491", "0.50683516", "0.5042792", "0.503107", "0.50070167", "0.498359", "0.49506572", "0.4885295", "0.486961", "0.485535", "0.48539692", "0.4788982", "0.47376263", "0.47132015", "0.46971616", "0.46870306" ]
0.8566245
0
Gets the is_admin of this UserBase. A boolean specifying whether the user is an organization administrator.
def is_admin(self): return self._is_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_admin(self):\n return self.admin", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self):\r\n return self.admin", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def is_superuser(self):\n return self.is_admin", "def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def admin(self):\n if self.is_admin:\n return True\n return False", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_admin(self, user):\n return user.name in self.admins", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff", "def is_admin(self):\n return Role.query.get(2) in self.roles", "def user_is_admin(user):\n return user in admins", "def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def is_user_admin(self, user):\n return user == self.created_by" ]
[ "0.7881886", "0.77665955", "0.77486885", "0.7689977", "0.7461093", "0.73404866", "0.7156036", "0.71471244", "0.71004605", "0.7084834", "0.7068", "0.7034636", "0.70345366", "0.6991166", "0.69782645", "0.6957708", "0.6923771", "0.6915408", "0.6905435", "0.6894784", "0.6880022", "0.6856704", "0.6856704", "0.6811209", "0.67997295", "0.6794856", "0.6762388", "0.67253304", "0.672264", "0.668783" ]
0.7862511
1
Sets the is_admin of this UserBase. A boolean specifying whether the user is an organization administrator.
def is_admin(self, is_admin): self._is_admin = is_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self):\n return self.admin", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin", "def is_admin(self):\r\n return self.admin", "def super_admin(self, super_admin):\n\n self._super_admin = super_admin", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)", "def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def admin(self):\n if self.is_admin:\n return True\n return False", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_admin(self, user):\n return user.name in self.admins", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_superuser(self):\n return self.is_admin", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def is_admin(self):\n return False", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_user_admin(self, user):\n return user == self.created_by", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def is_staff(self) -> bool:\n return self.is_admin", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)" ]
[ "0.83135295", "0.66889644", "0.6337106", "0.626878", "0.62481576", "0.62481576", "0.62017304", "0.6183351", "0.6072974", "0.60349", "0.601608", "0.59858966", "0.59276825", "0.58737713", "0.58411723", "0.5830839", "0.5752557", "0.5735811", "0.5708521", "0.57063186", "0.57017344", "0.5699304", "0.5573182", "0.556661", "0.5545417", "0.55386096", "0.5518142", "0.5484885", "0.544499", "0.54327714" ]
0.8068177
1
Gets the is_owner of this UserBase.
def is_owner(self): return self._is_owner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def get_owner_object(self):\n return False", "def isowner(self, o):\n return self._owner is o", "def get_owner(self):\n return self.__owner", "def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None", "def get_owner(self):\n return self.settings.get(\"owner\", None)", "def get_owner(self, obj):\n return obj.user.username", "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def getOwner(self):\n return self.__owner", "def getOwner(self):\n return self.__owner", "def is_current_session_owner(self):\n\t\treturn bool(call_sdk_function('PrlAcl_IsCurrentSessionOwner', self.handle))", "def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )", "def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")", "def owner_type(self) -> str:\n return pulumi.get(self, \"owner_type\")", "def getOwner(self):\r\n return self.owner", "def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()", "def owner_id(self):\n return self._owner_id", "def GetOwnerManager(self):\r\n\r\n return self._owner_mgr", "def bot_owner_id(self):\n return self._bot_owner_id", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def user(self):\n return self.owner.user", "def owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner\")", "def is_active_user(self):\n\n return self.is_active", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return None", "def get_owner(self):\n owner = gdef.PSID()\n lpbOwnerDefaulted = gdef.BOOL()\n winproxy.GetSecurityDescriptorOwner(self, owner, lpbOwnerDefaulted)\n # Return None of owner is NULL\n return owner or None", "def business_owner(self):\n return self._business_owner", "def owner(self):\n return self._owner", "def owner(self):\n return self._owner" ]
[ "0.7645217", "0.7104894", "0.6914149", "0.6853681", "0.67683494", "0.6743861", "0.66868156", "0.65692985", "0.6566306", "0.6566306", "0.6566118", "0.6552337", "0.65283376", "0.6492521", "0.64828247", "0.6478465", "0.64477557", "0.6443881", "0.64351815", "0.6400954", "0.63200957", "0.6316049", "0.6304822", "0.62856317", "0.62856317", "0.62856317", "0.62702477", "0.6268768", "0.6229864", "0.6229864" ]
0.79326665
0
Sets the is_owner of this UserBase.
def is_owner(self, is_owner): self._is_owner = is_owner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_owner(self, owner):\n self.__owner = owner", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner", "def owner_id(self, owner_id):\n\n self._owner_id = owner_id", "def owner_id(self, owner_id):\n self._owner_id = owner_id", "def owner_type(self, owner_type):\n\n self._owner_type = owner_type", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def owner(self, owner: str):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def is_owner(self):\n return self._is_owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def scope_owner(self, scope_owner):\n\n self._scope_owner = scope_owner", "def owner_reference(self, owner_reference):\n\n self._owner_reference = owner_reference", "def set_owner(self, owner: Optional[\"STACObject_Type\"]) -> \"Link\":\n self.owner = owner\n return self", "def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id", "def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def isowner(self, o):\n return self._owner is o", "def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner", "def transfer_ownership(self, user):\n new_owner = get_user_model().objects.filter(is_active=True) \\\n .get(pk=user.pk)\n self.owner = new_owner", "def business_owner(self, business_owner):\n\n self._business_owner = business_owner", "def pre_save(self, obj):\n obj.owner = self.request.user", "def set_entity_owner_account_type(self, username, account_type):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_SET_ENTITY_OWNER_ACCOUNT_TYPE + ':' + username + '|' + account_type)" ]
[ "0.6996318", "0.69127315", "0.6491776", "0.64011246", "0.63992107", "0.63773704", "0.636248", "0.63547474", "0.63318616", "0.6193567", "0.6193567", "0.6193567", "0.6193567", "0.6184652", "0.61401767", "0.61401767", "0.61401767", "0.61401767", "0.6025569", "0.59794116", "0.5975996", "0.5959993", "0.59532636", "0.5921516", "0.5841493", "0.5720495", "0.5709794", "0.56894433", "0.5669848", "0.55898285" ]
0.8064309
0
Gets the is_billing_admin of this UserBase.
def is_billing_admin(self): return self._is_billing_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin", "def is_admin(self):\n return self.admin", "def is_admin(self):\r\n return self.admin", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def admin(self):\n if self.is_admin:\n return True\n return False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def billing(self):\n return self._billing", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def default_billing(self):\n return self._default_billing", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def is_superuser(self):\n return self.is_admin", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def is_admin(self):\n return False", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]", "def is_admin(self, user):\n return user.name in self.admins", "def is_user_admin(request):\n return request.user.is_superuser", "def user_is_admin(user):\n return user in admins", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def is_user_admin(self, user):\n return user == self.created_by" ]
[ "0.75745505", "0.6962725", "0.6885912", "0.6775686", "0.67065483", "0.6678298", "0.6678298", "0.66013443", "0.6554211", "0.6432071", "0.6286309", "0.6203971", "0.61566216", "0.6140026", "0.61183566", "0.60726625", "0.6026393", "0.6017969", "0.59559476", "0.59489256", "0.59279305", "0.5905573", "0.5896796", "0.58894575", "0.58808905", "0.5857964", "0.57998943", "0.57950556", "0.57774943", "0.57565796" ]
0.8872368
0
Sets the is_billing_admin of this UserBase.
def is_billing_admin(self, is_billing_admin): self._is_billing_admin = is_billing_admin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_billing_admin(self):\n return self._is_billing_admin", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin", "def default_billing(self, default_billing):\n\n self._default_billing = default_billing", "def billing(self, billing):\n\n self._billing = billing", "def super_admin(self, super_admin):\n\n self._super_admin = super_admin", "def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)", "def is_admin(self):\n return self.admin", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)", "def admin_email(self, admin_email):\n\n self._admin_email = admin_email", "def is_admin(self):\r\n return self.admin", "def default_billing(self):\n return self._default_billing", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def billing_info(self, billing_info):\n\n self._billing_info = billing_info", "def admin(self):\n if self.is_admin:\n return True\n return False", "def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def admin_host(self, admin_host):\n\n self._admin_host = admin_host", "def billing_currency(self, billing_currency):\n\n self._billing_currency = billing_currency", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True", "def billing(self):\n return self._billing", "def is_admin(self):\n return False", "def is_taxable(self, is_taxable: bool):\n if is_taxable is None:\n raise ValueError(\"Invalid value for `is_taxable`, must not be `None`\") # noqa: E501\n\n self._is_taxable = is_taxable", "def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False" ]
[ "0.737039", "0.6722866", "0.6429481", "0.59705704", "0.5679304", "0.5440798", "0.5371101", "0.52895933", "0.52759075", "0.52700037", "0.52601206", "0.5256608", "0.5244197", "0.5224175", "0.517771", "0.51590234", "0.5110502", "0.50515014", "0.503452", "0.503452", "0.50335866", "0.5030447", "0.498472", "0.4955349", "0.49360868", "0.49040446", "0.48618492", "0.48221278", "0.48101145", "0.4800235" ]
0.8755264
0
Gets the bot_type of this UserBase.
def bot_type(self): return self._bot_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chat_type(self) -> str:\n return self.user.chat_type.name", "def lobby_type(self):\n return self._get(\"lobby_type\")", "def get_type(self) -> str:\n return Tables.USER.name", "def user_type(self):\n if \"userType\" in self._prop_dict:\n return self._prop_dict[\"userType\"]\n else:\n return None", "def user_type(self):\n if \"userType\" in self._prop_dict:\n return self._prop_dict[\"userType\"]\n else:\n return None", "def get_type(self):\n return self.type", "def get_type(self):\n return self.type", "def get_type(self):\n return self._type", "def get_type(self):\n return self._type", "def bot(self):\n return self._bot", "def get_type(self):\n return self._type_obj", "def get_type(self) -> str:\n return self.type", "def get_type(self):\n return self._TYPE", "def user_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_type\")", "def user_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_type\")", "def user_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_type\")", "def bot_type(self, bot_type):\n\n self._bot_type = bot_type", "def get_entity_type(self):\n return self.entity_type", "def getType(self):\n return self.type_", "def get_session_type(self) -> Type[ba.Session]:\n return self._sessiontype", "def getType(self):\n return self.type", "def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)", "def get_hashtype(self):\n return self.__hashtype", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type" ]
[ "0.64627266", "0.6358613", "0.614378", "0.6125625", "0.6125625", "0.59903854", "0.59903854", "0.5968016", "0.5968016", "0.59540266", "0.5942109", "0.58541214", "0.585159", "0.58268684", "0.58268684", "0.58098865", "0.57802206", "0.5750634", "0.5730113", "0.56939965", "0.5684667", "0.5662987", "0.5648909", "0.56009966", "0.56009966", "0.56009966", "0.56009966", "0.56009966", "0.56009966", "0.56009966" ]
0.83526576
0
Sets the bot_type of this UserBase.
def bot_type(self, bot_type): self._bot_type = bot_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bot_type(self):\n return self._bot_type", "def set_type(self, rtype=ALL_USERS):\r\n self.type = rtype", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def is_bot(self, is_bot):\n\n self._is_bot = is_bot", "def set_type(self, type_balle):\n self.type_balle = type_balle", "def set_type(self, type):\n self._type = type", "def set_as_type_user(self):\n self.type = MessageTypes.USER", "def set_auth_type(self, auth_type):\n pass", "def entity_type(self, entity_type: str):\n\n self._entity_type = entity_type", "def engine_type(self, engine_type):\n\n self._engine_type = engine_type", "def owner_type(self, owner_type):\n\n self._owner_type = owner_type", "def entity_type(self, entity_type):\n\n self._entity_type = entity_type", "def type(self, type: str):\n\n self._type = type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def entity_type(self, entity_type):\n self._entity_type = entity_type", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.6873118", "0.60826707", "0.5980632", "0.5980632", "0.5958783", "0.59285235", "0.58882517", "0.56953204", "0.56326497", "0.5610567", "0.55728096", "0.5503789", "0.5461631", "0.54612577", "0.5455133", "0.5455133", "0.5455133", "0.5455133", "0.5442875", "0.5397702", "0.5397702", "0.53877443", "0.53877443", "0.53877443", "0.53877443", "0.53877443", "0.53877443", "0.53877443", "0.53877443", "0.53877443" ]
0.8411738
0
Gets the bot_owner_id of this UserBase.
def bot_owner_id(self): return self._bot_owner_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])", "def owner_id(self) -> int:\n return self.proto.owner", "def owner_id(self):\n return self._owner_id", "def owner_id(self) -> str:\n return self.__owner_id", "def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")", "def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")", "def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None", "def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")", "def get_owner(self, obj):\n return obj.user.username", "def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")", "def get_owner(self):\n return self.__owner", "def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()", "def get_owner(self):\n return self.settings.get(\"owner\", None)", "def getOwner(self):\n return self.__owner", "def getOwner(self):\n return self.__owner", "def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id", "def owner(self) -> str:\n return self._owner", "def user(self):\n return self.owner.user", "def business_owner(self):\n return self._business_owner", "def getOwner(self):\r\n return self.owner", "def owner(self):\n return Organization.objects.get(id=self.owner_id)", "def owner(self):\n answer = self._call('owner')\n return answer.owner", "def owner(self):\n return self._owner", "def owner(self):\n return self._owner", "def owner(self):\n return self._owner", "def owner(self):\n return self._owner", "def owner(self):\n return self._owner", "def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user", "def get_owner_object(self):\n return None", "def get_owner_object(self):\n return None" ]
[ "0.74863726", "0.7462389", "0.7429973", "0.73903984", "0.7214011", "0.7123705", "0.70963526", "0.7041297", "0.6885209", "0.6862868", "0.68442214", "0.6656169", "0.66238314", "0.65125304", "0.65125304", "0.6489912", "0.6465008", "0.639576", "0.636058", "0.63590413", "0.63192767", "0.6281676", "0.6201579", "0.6201579", "0.6201579", "0.6201579", "0.6201579", "0.616952", "0.6150902", "0.6150902" ]
0.86790913
0
Sets the bot_owner_id of this UserBase.
def bot_owner_id(self, bot_owner_id): self._bot_owner_id = bot_owner_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def owner_id(self, owner_id):\n\n self._owner_id = owner_id", "def owner_id(self, owner_id):\n self._owner_id = owner_id", "def bot_owner_id(self):\n return self._bot_owner_id", "def set_owner(self, owner):\n self.__owner = owner", "def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def owner(self, owner: str):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def scope_owner(self, scope_owner):\n\n self._scope_owner = scope_owner", "def business_owner(self, business_owner):\n\n self._business_owner = business_owner", "def set_owner(self, owner: Optional[\"STACObject_Type\"]) -> \"Link\":\n self.owner = owner\n return self", "def owner_id(self) -> str:\n return self.__owner_id", "def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner", "def owner_id(self):\n return self._owner_id", "def migrate_fix_invalid_bot_owner_values(\n apps: StateApps, schema_editor: BaseDatabaseSchemaEditor\n) -> None:\n UserProfile = apps.get_model(\"zerver\", \"UserProfile\")\n UserProfile.objects.filter(is_bot=False).exclude(bot_owner=None).update(bot_owner=None)", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "def owner_reference(self, owner_reference):\n\n self._owner_reference = owner_reference", "def owner_id(self) -> int:\n return self.proto.owner", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email", "def owner_type(self, owner_type):\n\n self._owner_type = owner_type" ]
[ "0.73704034", "0.733693", "0.71219534", "0.69730103", "0.6940321", "0.6678052", "0.64090276", "0.62162405", "0.62162405", "0.62162405", "0.62162405", "0.6192035", "0.6192035", "0.6192035", "0.6192035", "0.60748756", "0.6034818", "0.59885144", "0.5901746", "0.58401716", "0.57850367", "0.5725079", "0.57130444", "0.56528014", "0.5643406", "0.56280756", "0.5620602", "0.560691", "0.5572163", "0.5538237" ]
0.8572632
0
Gets the is_guest of this UserBase. A boolean specifying whether the user is a guest user.
def is_guest(self): return self._is_guest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guest(self):\n return self._guest", "def is_guest(self, is_guest):\n\n self._is_guest = is_guest", "def get_guest_user():\n\n guest = User.query.filter_by(username=\"guest\").first()\n\n if guest == None:\n guest = User(username=\"guest\",\n password_token=pbkdf2_sha256.hash(\"password\"))\n db.session.add(guest)\n db.session.commit()\n\n return guest", "def guest(self) -> Optional[GuestContainer]:\n return self._guest", "def guest_configuration_enabled(self) -> bool:\n return pulumi.get(self, \"guest_configuration_enabled\")", "def is_superuser(self):\n return self.is_admin", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def is_guest_sharing_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsGuestSharingEnabled', self.handle))", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def is_admin(self) -> bool:\n return self._is_admin", "def is_user_admin(self, user):\n return user == self.created_by", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def is_active_user(self):\n\n return self.is_active", "def is_admin(self):\n return self.admin", "def user_verified(self):\n return self.user.verified", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def get_has_password(self, user):\n return True if user.password else False", "def has_guest(self, key: int) -> bool:\r\n return self.guests.__contains__(key)", "def is_admin(self):\r\n return self.admin", "def get_is_portal_enabled(self):\n return self.is_portal_enabled", "def is_logged_in(self):\n return self.__is_logged_in", "def logged_in(self):\n return self.user is not None", "def is_guest_active(self, *guest_of_list: GuestOf) -> bool:\n if len(guest_of_list) == 0:\n return self._is_any_guest_active()\n\n for guest_of in guest_of_list:\n usergroup = self._get_user_group(guest_of)\n if usergroup and usergroup.is_home:\n return True\n\n return False", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_active(self):\n return self.user.is_active", "def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)" ]
[ "0.67751104", "0.652903", "0.6333875", "0.5840743", "0.5831468", "0.57090145", "0.54347944", "0.53395015", "0.5297339", "0.52234644", "0.5145995", "0.514241", "0.5127566", "0.5127566", "0.50955236", "0.50857407", "0.5083316", "0.5064882", "0.5058896", "0.5041641", "0.50172037", "0.50107485", "0.5009928", "0.4997323", "0.49940282", "0.49795017", "0.49791917", "0.49771118", "0.4947195", "0.49445486" ]
0.78533375
0
Sets the is_guest of this UserBase. A boolean specifying whether the user is a guest user.
def is_guest(self, is_guest): self._is_guest = is_guest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guest(self, guest):\n if guest is None:\n raise ValueError(\"Invalid value for `guest`, must not be `None`\")\n\n self._guest = guest", "def is_guest(self):\n return self._is_guest", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin", "def guest(self):\n return self._guest", "def get_guest_user():\n\n guest = User.query.filter_by(username=\"guest\").first()\n\n if guest == None:\n guest = User(username=\"guest\",\n password_token=pbkdf2_sha256.hash(\"password\"))\n db.session.add(guest)\n db.session.commit()\n\n return guest", "def set_guest_sharing_enabled(self, bVmGuestSharingEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetGuestSharingEnabled', self.handle, bVmGuestSharingEnabled)", "def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin", "def test_guest_user_created(self):\n self.assertFalse(USER_MODEL.objects.all().exists())\n self.fill_session_cart()\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n self.assertTrue(USER_MODEL.objects.get().is_guest)", "def guest_configuration_enabled(self) -> bool:\n return pulumi.get(self, \"guest_configuration_enabled\")", "def set_is_default(self):\n self.is_default = True", "def set_is_default(self):\n self.is_default = True", "def is_default(self, is_default):\n\n self._is_default = is_default", "def is_default(self, is_default: bool):\n\n self._is_default = is_default", "def is_default(self, is_default):\n # type: (bool) -> None\n\n if is_default is not None:\n if not isinstance(is_default, bool):\n raise TypeError(\"Invalid type for `is_default`, type has to be `bool`\")\n\n self._is_default = is_default", "def virtual_flag(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"virtual_flag must be bool.\")\n self._virtual_flag = value", "def is_live(self, is_live):\n # type: (bool) -> None\n\n if is_live is not None:\n if not isinstance(is_live, bool):\n raise TypeError(\"Invalid type for `is_live`, type has to be `bool`\")\n\n self._is_live = is_live", "def is_user_event(self, is_user_event):\n self._is_user_event = is_user_event", "def _set_user_authenticated(user_id: int, device_id: int, value: bool = True) -> None:\n client = net_interface.get_user()\n client.is_authenticated = value\n client.user_id = user_id\n client.device_id = device_id", "def is_flagged(self, is_flagged):\n \n self._is_flagged = is_flagged", "def SetIsDown(self, isDown):\n\n self.isDown = isDown", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)", "def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)", "def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)", "def set_user(self, user):\r\n self.user = user", "def _setForBinding (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forBinding = value\n return value", "def super_admin(self, super_admin):\n\n self._super_admin = super_admin", "def set_is_portal_enabled(self, is_portal_enabled):\n self.is_portal_enabled = is_portal_enabled", "def is_ghost(self, is_ghost):\n\n self._is_ghost = is_ghost" ]
[ "0.63713956", "0.63543206", "0.5549084", "0.52629703", "0.50986946", "0.48912513", "0.47999674", "0.47975162", "0.474734", "0.4700092", "0.46394247", "0.46394247", "0.4637681", "0.46297148", "0.45938712", "0.4591176", "0.45438093", "0.4535325", "0.45209828", "0.45110002", "0.4494369", "0.44848543", "0.44292068", "0.44109356", "0.4408892", "0.43858194", "0.43818155", "0.43723354", "0.43681446", "0.43606648" ]
0.82178175
0
Gets the date_joined of this UserBase. The time the user account was created.
def date_joined(self): return self._date_joined
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_account_created_date(self):\n return self.account_created_date", "def getJoiningTime(self):\n return self.__joinTime", "def created_at(self):\n created_at = self.joined_at\n if created_at is None:\n created_at = DISCORD_EPOCH_START\n \n return created_at", "def date_joined(self, date_joined):\n\n self._date_joined = date_joined", "def getUserSignupDate(self):\r\n userinfo = self.getUserInfo()\r\n timestamp = int(float(userinfo[\"signupTimeSec\"]))\r\n return time.strftime(\"%m/%d/%Y %H:%M\", time.gmtime(timestamp))", "def joined_days(self):\n return (timezone.now() - self.user.date_joined).days", "def created_date_time(self) -> str:\n return pulumi.get(self, \"created_date_time\")", "def date_created(self):\n return self._date_created", "def date_created(self):\n return self._date_created", "def date_created(self):\n return self._date_created", "def date_created(self) -> datetime:\n return self._date_created", "def created_date(self):\n return self._created_date", "def created_date(self):\n return self._created_date", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def date_added(self) -> str:\n return self._date_added.strftime('%Y-%m-%d')", "def created_on(self):\n return self.get_time(\"created_on\")", "def get_account_created_date_formatted(self):\n return self.account_created_date_formatted", "def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")", "def GetDateCreated(self):\n return str(self.datecreated)", "def registered_date(self):\n return sa.Column(sa.TIMESTAMP(timezone=False), default=datetime.datetime.utcnow, server_default=sa.func.now())", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def date_registered(self) -> datetime:\n return datetime.utcfromtimestamp(self.registered)", "def created_user(self):\n return self._created_user", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time" ]
[ "0.62656933", "0.6219153", "0.59980553", "0.59758735", "0.5934436", "0.587002", "0.57954764", "0.5775895", "0.5775895", "0.5775895", "0.57507694", "0.57064766", "0.57064766", "0.56547153", "0.56547153", "0.56422794", "0.5618498", "0.5597825", "0.55175257", "0.550172", "0.54831547", "0.5463635", "0.5463635", "0.5463635", "0.5463635", "0.54320157", "0.54100984", "0.54035217", "0.54035217", "0.54035217" ]
0.7982348
0
Sets the date_joined of this UserBase. The time the user account was created.
def date_joined(self, date_joined): self._date_joined = date_joined
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_joined(self):\n return self._date_joined", "def test_user_creation_old_date_joined(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, '[email protected]')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiry_date = datetime_now() - timedelta(settings.ACCOUNT_ACTIVATION_DAYS)\n self.assertGreater(new_user.date_joined, expiry_date)", "def set_account_created_date(self, account_created_date):\n self.account_created_date = account_created_date", "def test_unexpired_account_old_date_joined(self):\n self.user_info['date_joined'] = datetime_now(\n ) - timedelta(settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activation_key_expired())", "def save(self, *args, **kwargs):\n if not self.user_id or not self.created:\n self.created = datetime.datetime.today()\n self.modified = datetime.datetime.today()\n return super(UserProfile, self).save(*args, **kwargs)", "def _set_joined(self, data):\n if self.joined_at is None:\n self.joined_at = parse_joined_at(data)", "def SetDateCreated(self, date):\n self.datecreated = str(date)", "def date_created(self, date_created):\n self._date_created = date_created", "def date_created(self, date_created):\n self._date_created = date_created", "def created_date(self, created_date):\n self._created_date = created_date", "def date_created(self, date_created: datetime):\n\n self._date_created = date_created", "def created_user(self, created_user):\n self._created_user = created_user", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n \n self._date_created = date_created", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def joined_days(self):\n return (timezone.now() - self.user.date_joined).days", "async def on_member_join(self, member: Member):\n\n if not self._is_tracked(member.guild, EventPriority.join):\n return\n\n em = self.em_base(\n member,\n f\"User {member.mention} ({member.name}) joined\",\n EventColors.join.value\n )\n\n em.add_field(\n name=\"Account Creation Timestamp\",\n value=self._get_timestamp()\n )\n\n await self.log_event(em, member.guild, priority=EventPriority.join)", "def joined_dts(self, joined_dts):\n\n self._joined_dts = joined_dts", "def set_last_used_on(self):\n self.last_used_on = datetime.now()\n self.save()", "def expire_date(self, expire_date):\n\n self._expire_date = expire_date", "def set_account_created_date_formatted(self, account_created_date_formatted):\n self.account_created_date_formatted = account_created_date_formatted", "def created_at(self):\n created_at = self.joined_at\n if created_at is None:\n created_at = DISCORD_EPOCH_START\n \n return created_at", "def test_user_creation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, '[email protected]')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiration_date = datetime_now() - timedelta(\n settings.ACCOUNT_ACTIVATION_DAYS\n )\n self.assertGreater(new_user.date_joined, expiration_date)" ]
[ "0.66438895", "0.581493", "0.56453663", "0.51747286", "0.5124787", "0.5048352", "0.49426377", "0.49251464", "0.49251464", "0.49176228", "0.49075496", "0.49066377", "0.49043387", "0.48563272", "0.48563272", "0.48563272", "0.48529363", "0.48464775", "0.48464775", "0.48464775", "0.48464775", "0.48464775", "0.47991368", "0.47317973", "0.47167116", "0.47155413", "0.47145188", "0.4700385", "0.4690831", "0.46861255" ]
0.7769368
0
Gets the delivery_email of this UserBase. The user's real email address. This field is present only if [email address visibility](/help/restrictvisibilityofemailaddresses) is limited and you are an administrator with access to real email addresses under the configured policy.
def delivery_email(self): return self._delivery_email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "def get_email(self):\n return self._email", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def get_email(self):\n return self.email", "def getEmail(self):\n return self.__email", "def customer_email(self):\n return self._customer_email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")", "def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def getEmail(self):\n return self.email", "def delivery_identity(self) -> pulumi.Output[Optional['outputs.EventSubscriptionDeliveryIdentity']]:\n return pulumi.get(self, \"delivery_identity\")", "def email(self, instance):\r\n return instance.user.email", "def business_email(self):\n return self._business_email", "def email_address(self) -> str:\n return self._email_address", "def getEmail(self):\n\t\treturn self.Email", "def email(self) -> str:\n return self._email", "def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)", "def delivery_email(self, delivery_email):\n\n self._delivery_email = delivery_email", "def email(self):\n return self._dict.get('email')", "def delivery(self):\n return self._delivery", "def business_owner_email(self):\n return self._business_owner_email", "def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")", "def log_useremail(self):\n return self.user.email", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", "def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]" ]
[ "0.6896622", "0.68197286", "0.66776025", "0.6648298", "0.64765364", "0.6463804", "0.6460512", "0.64135796", "0.64135796", "0.64135796", "0.64135796", "0.63343066", "0.63343066", "0.6321957", "0.63091224", "0.62989455", "0.6274076", "0.6220656", "0.62095183", "0.6136559", "0.61223", "0.6116247", "0.6104205", "0.60089004", "0.5989312", "0.5968295", "0.5952045", "0.5948589", "0.59365845", "0.5927442" ]
0.8150735
0
Sets the delivery_email of this UserBase. The user's real email address. This field is present only if [email address visibility](/help/restrictvisibilityofemailaddresses) is limited and you are an administrator with access to real email addresses under the configured policy.
def delivery_email(self, delivery_email): self._delivery_email = delivery_email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delivery_email(self):\n return self._delivery_email", "def delivery(self, delivery):\n if self.local_vars_configuration.client_side_validation and delivery is None: # noqa: E501\n raise ValueError(\"Invalid value for `delivery`, must not be `None`\") # noqa: E501\n\n self._delivery = delivery", "def setEmail(self, email):\n self.email = email\n return self", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email", "def admin_email(self, admin_email):\n\n self._admin_email = admin_email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def customer_email(self, customer_email):\n self._customer_email = customer_email", "def email(self, email: str):\n\n self._email = email", "def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email", "def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email", "def business_email(self, business_email):\n\n self._business_email = business_email", "def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)", "def message_delivery(self, delivery: MessageDelivery):\n self._message_delivery = delivery", "def technical_owner_email(self, technical_owner_email):\n\n self._technical_owner_email = technical_owner_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "def ga_at_delivery(self, ga_at_delivery):\n\n self.logger.debug(\"In 'ga_at_delivery' setter.\")\n\n self._ga_at_delivery = ga_at_delivery", "def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")", "def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")" ]
[ "0.69637316", "0.6370568", "0.59695375", "0.59579545", "0.58683085", "0.5863861", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.58421206", "0.582248", "0.5751209", "0.5748383", "0.5698401", "0.5613598", "0.5521131", "0.55090696", "0.5491955", "0.5447653", "0.5447653", "0.5443579", "0.5429883", "0.5398202", "0.5398202" ]
0.812475
0
Gets the profile_data of this UserBase. A dictionary containing custom profile field data for the user. Each entry maps the integer ID of a custom profile field in the organization to a dictionary containing the user's data for that field. Generally the data includes just a single `value` key; for those custom profile fields supporting Markdown, a `rendered_value` key will also be present.
def profile_data(self): return self._profile_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_profile_data(id):\n user = User.query.get(id)\n return user.to_dict_profile()", "def get_object_data(self, **kwargs):\n user = self.request.user\n return UserProfile.objects.get(user=user)", "def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret", "def get_user_data(self):\n return self.user_data", "def get_user_data(self):\n return self.user_data", "def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile", "def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile", "def get_user_profile(self):\n return self.user.profile", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def _parse_profile_data (self, netflix_page_data):\n profiles = {};\n important_fields = [\n 'profileName',\n 'isActive',\n 'isAccountOwner',\n 'isKids'\n ]\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for profile_id in netflix_page_data.get('profiles'):\n if self._is_size_key(key=profile_id) == False and type(netflix_page_data['profiles'][profile_id]) == dict and netflix_page_data['profiles'][profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n if important_field == 'profileName':\n profile.update({important_field: HTMLParser.HTMLParser().unescape(netflix_page_data['profiles'][profile_id]['summary'][important_field]).encode('utf8')})\n else:\n profile.update({important_field: netflix_page_data['profiles'][profile_id]['summary'][important_field]})\n avatar_base = netflix_page_data['nf'].get(netflix_page_data['profiles'][profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar, 'isFirstUse': False})\n profiles.update({profile_id: profile})\n return profiles\n\n # values are stored in lists (returned from JS parser)\n # TODO: get rid of this christmas tree of doom\n for item in netflix_page_data:\n if 'hasViewedRatingWelcomeModal' in dict(item).keys():\n for profile_id in item:\n if self._is_size_key(key=profile_id) == False and type(item[profile_id]) == dict and item[profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n profile.update({important_field: item[profile_id]['summary'][important_field]})\n avatar_base = item['nf'].get(item[profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar})\n profiles.update({profile_id: profile})\n return profiles", "def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data", "def user_profile():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user profile\")\n user_profile = get_user_profile(access_token)\n user_profile['access_token'] = access_token\n return json.dumps(user_profile)", "def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None", "def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile", "def get_profile():\n\n if request['user_id']:\n\n user = User.select().where(User.id == request['user_id']).get()\n uSchema = UserSchema()\n jsonUser = uSchema.dumps(user)\n\n del request['user_id']\n return jsonUser.data\n\n return", "def parse(self):\n details = self.details()\n\n return Profile(\n book_id=self.book_id,\n title=self.title(),\n user_id=self.user_id(),\n username=self.username(),\n summary=self.summary(),\n published=self.published(),\n updated=self.updated(),\n **details\n )", "def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info", "def get_profile(self,fields=('id','first-name','last-name','headline','summary')):\n\n if not self._access_token:\n raise FBError(\"Authentication needed!\")\n \n token = oauth.Token(self._access_token['oauth_token'], self._access_token['oauth_token_secret'])\n client = oauth.Client(self.consumer, token)\n profile_url = self.profile_url % \",\".join(fields)\n resp, content = client.request(profile_url,headers={\"x-li-format\":'json'})\n \n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n \n try:\n return json.loads(content)\n except Exception, e:\n raise FBError(\"Invalid json %s.\" % unicode(e))", "def user_data(self):\n itemuser = self.data['user']\n my_user_dict = {'user_id': itemuser['id'], 'user_name': itemuser['name'],\n 'user_handle': itemuser['screen_name'], 'user_desc': itemuser['description'],\n 'twitter_birthday': itemuser['created_at'], 'user_location': itemuser['location'],\n 'followers': itemuser['followers_count'], 'favorites': itemuser['favourites_count'],\n 'statuses': itemuser['statuses_count']}\n return my_user_dict", "def getProfileInfo(self):\n doc = minidom.parse(urllib.urlopen(serverString + \"/rest/user/\" + self.name))\n for element in doc.getElementsByTagName(\"user\")[0].childNodes:\n if element.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif element.tagName == \"status\" and int(element.firstChild.data) != 1:\n raise ServerError(element.firstChild.data)\n elif element.tagName == \"input\":\n self.name = element.firstChild.data\n elif element.tagName == \"id\":\n self.id = element.firstChild.data\n elif element.tagName == \"image\":\n self.image = element.firstChild.data\n elif element.tagName == \"tagline\":\n if element.firstChild == None:\n self.tagline = None\n else:\n self.tagline = element.firstChild.data\n elif element.tagName == \"creation\":\n self.created = datetime.datetime.strptime(element.firstChild.data[:element.firstChild.data.rfind(\".\")]+\".GMT\", \"%Y-%m-%d %H:%M:%S.%Z\")", "def get_meta(self) -> Meta:\n return Meta(\n object_type=\"profile\",\n extra_custom_props=[\n (\"property\", \"profile.username\", self.user.username),\n (\"property\", \"profile.first_name\", self.user.first_name),\n (\"property\", \"profile.last_name\", self.user.last_name),\n ]\n if self.user\n else [],\n title=self.display_name or self.name,\n image=self.image.large,\n )", "def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info", "def GetUserData(self):\r\n\r\n return self.user_data", "def user_data(self, access_token, *args, **kwargs):\n params = self.setting(\"PROFILE_EXTRA_PARAMS\", {})\n response = kwargs.get('response') or {}\n params[\"access_token\"] = access_token\n headers = {\n \"Authorization\": \"%s %s\" % (\n response.get(\"token_type\", \"Bearer\").capitalize(),\n access_token),\n \"Accept\": 'application/json',\n \"Content-type\": 'application/json;charset=utf-8'}\n return self.get_json(self.USER_DATA_URL,\n params=params, headers=headers)", "def get_login_user_profile(uid):\n # fetch the user info from db,\n # just in case the info has been updated somewhere\n json_user = User.find(uid).to_dict_with_mobile()\n json_user['work_experiences'] = work_service.get_work_experiences(uid)\n return json_user", "def user_data(self):\n return self._user_data", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def fetch_my_profile(self, api_token: str) -> dict:\n query = \"\"\"\n query myProfile {\n myProfile {\n id\n firstName\n lastName\n }\n }\n \"\"\"\n path = jmespath.compile(\n \"\"\"\n data.myProfile.{\n id: id\n first_name: firstName\n last_name: lastName\n }\n \"\"\"\n )\n\n data = self.do_query(query, api_token=api_token)\n\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"id\", \"first_name\", \"last_name\"])\n return parsed_data", "def get_profile_info(self):\n\n drill_path = str(Path.home())+\"/Documents/ball_e_profiles/drill_profiles/{drill_name}/{drill_name}.csv\".format(\n drill_name=self.drill_name)\n with open(drill_path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n row_count = 0\n info_dict = dict()\n for row in csv_reader:\n if row_count == 0:\n row_count += 1\n else:\n info_dict[row[0]] = [row[1], row[2], row[3]]\n row_count += 1\n\n return info_dict", "def json(self):\n\n this_user_detail = dict(\n arn=self.arn,\n create_date=self.create_date,\n id=self.user_id,\n inline_policies=self.inline_policies_json,\n inline_policies_count=len(self.inline_policies_json),\n # groups=self.groups,\n groups=self.groups_json,\n path=self.path,\n managed_policies_count=len(self.attached_managed_policies),\n managed_policies=self.attached_managed_policies_pointer_json,\n risks=self.consolidated_risks\n )\n return this_user_detail" ]
[ "0.69240314", "0.6456976", "0.64322084", "0.6248396", "0.6248396", "0.62341315", "0.622801", "0.6204653", "0.6138357", "0.611971", "0.6103204", "0.6066835", "0.6032334", "0.60309625", "0.5982074", "0.5944301", "0.59439224", "0.5938915", "0.5907476", "0.58976126", "0.5869983", "0.58552057", "0.5839788", "0.5809002", "0.5780876", "0.57699656", "0.5752503", "0.5743313", "0.5703813", "0.57016736" ]
0.6958784
0
Sets the profile_data of this UserBase. A dictionary containing custom profile field data for the user. Each entry maps the integer ID of a custom profile field in the organization to a dictionary containing the user's data for that field. Generally the data includes just a single `value` key; for those custom profile fields supporting Markdown, a `rendered_value` key will also be present.
def profile_data(self, profile_data): self._profile_data = profile_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_user_data(self, user_data, user_info):\n self.user_info = AttrDict(user_info)\n self.user_data = AttrDict(user_data)\n self.score_user = self.scoresaber.get_user_data(self.user_data.id)", "def update_user_profile(req_data):\n logger.debug(\"entering function update_user_profile\")\n\n update_fields = {}\n for field in req_data:\n update_fields[field] = req_data[field]\n if \"password\" in req_data:\n update_fields[\"password\"] = generate_password_hash(req_data[\"password\"])\n\n find_query = {\"user_id\": current_user.id}\n update_query = {\"$set\": update_fields}\n run_update_one_query(config.USERS_COL, find_query, update_query,\n error=True, error_msg=PROFILE_UPDATE_FAILED_ERR_MSG)\n logger.info(\"Profile update success for %s\", current_user.id)\n\n logger.debug(\"exiting function update_user_profile\")\n return get_success_response(PROFILE_UPDATE_SUCCESS_MSG)", "def custom_profile_fields(self, custom_profile_fields):\n\n self._custom_profile_fields = custom_profile_fields", "def _parse_profile_data (self, netflix_page_data):\n profiles = {};\n important_fields = [\n 'profileName',\n 'isActive',\n 'isAccountOwner',\n 'isKids'\n ]\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for profile_id in netflix_page_data.get('profiles'):\n if self._is_size_key(key=profile_id) == False and type(netflix_page_data['profiles'][profile_id]) == dict and netflix_page_data['profiles'][profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n if important_field == 'profileName':\n profile.update({important_field: HTMLParser.HTMLParser().unescape(netflix_page_data['profiles'][profile_id]['summary'][important_field]).encode('utf8')})\n else:\n profile.update({important_field: netflix_page_data['profiles'][profile_id]['summary'][important_field]})\n avatar_base = netflix_page_data['nf'].get(netflix_page_data['profiles'][profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar, 'isFirstUse': False})\n profiles.update({profile_id: profile})\n return profiles\n\n # values are stored in lists (returned from JS parser)\n # TODO: get rid of this christmas tree of doom\n for item in netflix_page_data:\n if 'hasViewedRatingWelcomeModal' in dict(item).keys():\n for profile_id in item:\n if self._is_size_key(key=profile_id) == False and type(item[profile_id]) == dict and item[profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n profile.update({important_field: item[profile_id]['summary'][important_field]})\n avatar_base = item['nf'].get(item[profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar})\n profiles.update({profile_id: profile})\n return profiles", "def save_profile(self, data, suffix=''):\n # pylint: disable=unused-argument\n self.display_name = data.get('display_name', self.display_name)\n\n users_included_email = data.get('users_included_email', self.users_included_email)\n try:\n regexp_string = self.regexp_from_users_included_email(users_included_email)\n re.compile(regexp_string)\n except:\n raise JsonHandlerError(400, 'Users to exclude by email is causing an error, please edit.')\n self.users_included_email = users_included_email\n\n self.profile_display_job_title = data.get('profile_display_job_title', self.profile_display_job_title)\n self.profile_display_organisation = data.get('profile_display_organisation', self.profile_display_organisation)\n self.profile_display_work_country = data.get('profile_display_work_country', self.profile_display_work_country)\n self.profile_display_email_button = data.get('profile_display_email_button', self.profile_display_email_button)\n self.profile_display_bio = data.get('profile_display_bio', self.profile_display_bio)\n self.enable_cohorts = data.get('enable_cohorts', self.enable_cohorts)", "def update_profile(orcid_id, data=None):\n \n u = db.session.query(User).filter_by(orcid_id=orcid_id).first()\n if u:\n u.updated = datetime.utcnow()\n if data:\n u.profile = data\n # save the user\n db.session.begin_nested()\n try:\n db.session.add(u)\n db.session.commit()\n except exc.IntegrityError as e:\n db.session.rollback()\n # per PEP-0249 a transaction is always in progress \n db.session.commit()", "def create(self, validated_data):\r\n user_data = validated_data.pop('user')\r\n user = UserSerializer.create(UserSerializer(), validated_data = user_data)\r\n profile, created = Profile.objects.update_or_create(user = user,\r\n bio = validated_data.pop('bio'),\r\n location = validated_data.pop('location'),\r\n birth_date = validated_data.pop('birth_date'))\r\n return profile", "def save(self, profile_callback=None):\n\n # First, save the parent form\n new_user = super(BodbRegistrationForm, self).save(profile_callback=profile_callback)\n\n # Update user with first, last names\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Update profile with affiliation\n profile = new_user.get_profile()\n profile.affiliation = self.cleaned_data['affiliation']\n profile.save()\n\n cache.set('%d.profile' % new_user.id, profile)\n\n return new_user", "def user_profile_data(id):\n user = User.query.get(id)\n return user.to_dict_profile()", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n profile, created = Profile.objects.update_or_create(\n user=user,\n avatar=validated_data.pop('avatar'),\n biography=validated_data.pop('biography'),\n link=validated_data.pop('link') \n )\n return profile", "def _profile(user):\n profile = UserProfile()\n profile.user_id = user.id\n profile.save()", "def create(self, validated_data):\n request = self.context.get('request')\n profile = Profile(**validated_data)\n profile.user = request.user\n profile.save()\n return profile", "def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile", "def __init__(self, data=None, **kw):\n def _get_class_by_id(profile_id):\n from solariat_bottle.db.user_profiles.social_profile import DELIMITER, TwitterProfile, FacebookProfile\n pos = unicode(profile_id).rfind(DELIMITER) + 1\n if pos == 0:\n return self.__class__\n platform = None\n try:\n index = int(profile_id[pos:])\n except ValueError:\n logger.info(u\"Could not obtain platform from profile id: {}\".format(profile_id))\n else:\n platform = PLATFORM_BY_INDEX.get(index)\n class_ = {\n TwitterProfile.platform: TwitterProfile,\n FacebookProfile.platform: FacebookProfile\n }.get(platform, self.__class__)\n return class_\n\n if data:\n profile_id = data.get('_id')\n else:\n profile_id = kw.get('id')\n if isinstance(profile_id, basestring):\n self.__class__ = _get_class_by_id(profile_id)\n super(UserProfile, self).__init__(data, **kw)", "def set_user_config(self, data):\n config = self.read_config_obj(self.account_file)\n for key, value in data.items():\n config.set(self.user, str(key), value)\n\n self.write_config(self.account_file, config)", "def set_user_info(self, usrs):\r\n logger.info('Starting set user profile info')\r\n user = choice(usrs)\r\n self.title = user['title']\r\n self.fname = user['fname']\r\n self.lname = user['lname']\r\n self.email = user['email']\r\n self.password = user['password']\r\n self.dob = user['dob']\r\n self.company = user['company']\r\n self.address = user['address']\r\n self.city = user['city']\r\n self.postalcode = user['postalcode']\r\n self.phone = user['phone']\r\n logger.info('Ending set user profile info')", "def user_custom_data(self, user_custom_data):\n\n self._user_custom_data = user_custom_data", "def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data", "def save_user_profile(instance, **_):\n instance.profile.save()", "def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info", "def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info", "def profile_data(self):\n return self._profile_data", "def update_user_profile(user_info):\n user_id = user_info[\"USER_ID\"]\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"username\": user_info[\"username\"],\n \"email\": user_info[\"email\"],\n \"avatar\": user_info[\"avatar\"],\n \"githubURL\": user_info[\"githubURL\"],\n \"linkedinURL\": user_info[\"linkedinURL\"],\n \"stackoverflowURL\": user_info[\"stackoverflowURL\"],\n \"skills\": user_info[\"skills\"],\n }\n },\n upsert=False,\n )", "def build_profile(first, last, **user_info):\n\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info", "def user_profile(first, last, **add_info):\n profile = {}\n profile['firstname'] = first\n profile['lastname'] = last\n\n for key, value in add_info.items():\n profile[key] = value\n \n return profile", "def user_profile():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user profile\")\n user_profile = get_user_profile(access_token)\n user_profile['access_token'] = access_token\n return json.dumps(user_profile)", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile", "def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret" ]
[ "0.6163082", "0.6090175", "0.6075812", "0.6008002", "0.59890383", "0.5896696", "0.5854722", "0.58466715", "0.58388495", "0.58365136", "0.58214825", "0.58029795", "0.58013165", "0.57626474", "0.5759382", "0.5732638", "0.56859213", "0.56347704", "0.5612301", "0.5607427", "0.5607427", "0.55879056", "0.5559909", "0.55548525", "0.5544581", "0.55234855", "0.5515105", "0.5515105", "0.5515105", "0.55126953" ]
0.7260224
0
Check the northbound queue for RPC's queued by GUI or SOAP requests. A client should connect triggered by a CONNECTION_REQUEST and any RPC's queued by the northbound will be then added to the session queue by this function.
def add_nb_queue_to_session_queue(self, session): rpc_list = [] client_id = get_element('cid', session['client']) if client_id is not None and client_id in RPCS.Northbound_Queue: # Check if all commands have been serviced if RPCS.Northbound_Queue[client_id]: # Get first request in the client queue, in the form: # (Client_COMMAND, RESPONSE STREAM) # TODO pop might be unresolved nb_request = RPCS.Northbound_Queue[client_id].pop(0) # Parse and queue request(s) client_command = nb_request[0] rpc_list.append(client_command) # Insert nb commands to the front of queue session['queue'] = queued_nb_methods + session['queue'] # Store stream which expects the client response in the session session['nb_response_stream'] = nb_request[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)", "def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)", "def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())", "def run(self, session):\n rpc = None\n if session['client']['event'] == 'CONNECTION_REQUEST':\n self.add_nb_queue_to_session_queue(session)\n\n while rpc is None and session['queue']:\n try:\n # Loop through queue until there is an RPC to send, or until\n # there are no more RPCs queued, or until an error occurs\n session['rpc']['method'] = session['queue'].pop(0)\n rpc = session['rpc']['method'].send_request(session)\n except ClientMethodException:\n # Failed to send this RPC, move on to the next\n LOG.debug(\"Error during preparation of client method: %s\" % str(session['rpc']['method']))\n continue\n except Exception:\n traceback.print_exc()\n LOG.debug(\"Unexpected error during preparation of client method: %s\" % str(session['rpc']['method']))\n return RPCS.SendingRpc, None\n\n if rpc is not None:\n # RPC ready: Send it and ExpectResponse\n return RPCS.ExpectResponse, rpc\n else:\n # If there are no (more) RPCs to send, log ok\n # and send done, indicating communication is complete\n session['log'] = {'rc': 'ok', 'msg': ''}\n session['db'].clear_dirtyflag(session['client']['cid'])\n return RPCS.Listening, {'method': 'done'}", "def on_bindok(self, unused_frame):\n\n self.logger.info('queue bound')\n if self.acked:\n # if we wish to care about the servers replies, this is were we set up things\n self.logger.info('issuing confirm.select RPC')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n if self.sender:\n pass\n self.send()\n else:\n self.start_consuming()", "def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()", "def on_bindok(self, unused_frame):\n logger.info('Queue bound')\n self.setup_error_queue()", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def _accept_requests(self):\n try:\n request = self.request_queue.get(timeout=self.REQUESTS_TIMEOUT)\n self.logger.debug(\"Adding new requests\")\n for _ in xrange(self.REQUESTS_MAX_AMOUNT):\n self._requests.append(request)\n request = self.request_queue.get_nowait()\n\n except EmptyQueueError:\n return\n\n self.logger.debug(\"Done adding new requests\")", "def on_queue(self):\n self.ws_opened.wait()\n\n while self.wsapp.keep_running:\n try:\n msg = self.shot_outbox.get(timeout=0.001)\n except:\n continue\n action = msg['action']\n payload = msg['payload']\n\n if action == 'remote':\n # Choose the remote server\n buf = json.dumps(payload)\n self.__ws_conn.send(f\"{buf}\\n\")\n elif action == 'local':\n # Choose the local server\n result = payload['params']['result']\n shot = payload['shot']\n prev = self.ret.get(result, 0)\n self.ret[result] = prev + 1\n\n del self.shot_threadings[shot]\n self.__shot_finished[shot] = True\n self.__bar.update(1)\n if all(self.__shot_finished):\n # All shots are completed\n self.failed = False\n self.wsapp.keep_running = False\n break", "def check_session_queue_full(self) -> None:\n if (\n self.project.sessions_queued is None\n ): # no limit set so always return (success)\n return\n\n queued_request_count = self.project.session_requests.count()\n if queued_request_count >= self.project.sessions_queued:\n raise SessionException(\n \"There are already {}/{} requests for sessions for this project.\".format(\n queued_request_count, self.project.sessions_queued\n )\n )", "def on_queue_declareok(self, method_frame):\n\n for queue in self._handlers.keys():\n LOGGER.debug('Binding %s to %s with %s',\n self.EXCHANGE, queue, self.ROUTING_KEY)\n self._channel.queue_bind(self.on_bindok, queue,\n self.EXCHANGE, self.ROUTING_KEY)", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def test_is_queued(self):\r\n\r\n answer_ids = sorted(self.problem.get_question_answers())\r\n\r\n # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state\r\n cmap = CorrectMap()\r\n for answer_id in answer_ids:\r\n cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))\r\n self.problem.correct_map.update(cmap)\r\n\r\n self.assertEquals(self.problem.is_queued(), False)\r\n\r\n # Now we queue the LCP\r\n cmap = CorrectMap()\r\n for i, answer_id in enumerate(answer_ids):\r\n queuestate = CodeResponseTest.make_queuestate(i, datetime.now(UTC))\r\n cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))\r\n self.problem.correct_map.update(cmap)\r\n\r\n self.assertEquals(self.problem.is_queued(), True)", "def check_queue():\n while cmd_source.poll(): # Pop off all the values.\n stage, operation, value = cmd_source.recv()\n if operation == 'kill':\n # Destroy everything\n window.destroy()\n # mainloop() will quit, this function will too, and\n # all our stuff will die.\n return\n elif operation == 'hide':\n window.withdraw()\n elif operation == 'show':\n window.deiconify()\n elif operation == 'value':\n stage_values[stage] = value\n set_nums(stage)\n elif operation == 'length':\n set_length(stage, value)\n elif operation == 'skip':\n skip_stage(stage)\n else:\n raise ValueError('Bad operation {!r}!'.format(operation))\n \n # Continually re-run this function in the TK loop.\n window.after_idle(check_queue)", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "async def _listen_on_orders(self):\n # The lock is used to make sure the websocket is setup before using it.\n await self._orders_sock_info.connected_event.wait()\n try:\n async for message in self._orders_sock_info.ws:\n self._orders_sock_info.ready.set()\n if self._orders_sock_info.queue.qsize() >= 100:\n log.warning(\"Websocket message queue is has \"\n f\"{self._orders_sock_info.queue.qsize()} pending \"\n \"messages.\")\n await self._orders_sock_info.queue.put(message)\n finally:\n await self._orders_sock_info.ws.close()", "def check(self):\n\n print('Requester object is active: \\t', str(self.is_active))\n print('Number of requests sent: \\t', str(self.n_requests))\n print('Requester opened: \\t\\t', str(self.st_time))\n print('Requester closed: \\t\\t', str(self.en_time))", "def _flash_queued_window(self) -> None:\n try:\n message = self.events.get(timeout=1)\n self.processing_event = True\n except Empty:\n return None\n\n try:\n self.router.route_request(message)\n except UnexpectedMessageType:\n logging.error(f\"Unexpected request type - {message.event_type}. Aborting...\")\n self.shutdown()\n except WMError:\n pass\n finally:\n self.processing_event = False", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def is_call_waiting(self) -> bool:", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def is_incall_connected(self) -> bool:", "def queue():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Add the operation to the local queue.\n operation = manager.enqueue(session.get_sid())\n\n # If the local list of operation to solve is empty the Mmanager.enqueue`\n # method returns `None`, ence if the value of `operation` is `None`\n # the client should stop.\n return dict(ok=True, operation=operation, halt=operation is None)", "def drain_call_queue(self):\n pass", "def ctrlqueue_do_all_queue(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(11), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to *Do All Control Actions of CrlQueue* check if *Queue* is NOT \"\n \"empty\", e)", "def on_queue_declareok(self, method_frame):\n self.logger.info('binding %s and %s together with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)" ]
[ "0.6413275", "0.63343513", "0.5929305", "0.59155464", "0.5869772", "0.57600075", "0.56298584", "0.5565722", "0.5554399", "0.55359644", "0.5532582", "0.5485062", "0.54757154", "0.54550743", "0.54375905", "0.53650355", "0.53436047", "0.53087854", "0.53064525", "0.5301316", "0.5291118", "0.5267402", "0.5251246", "0.5250603", "0.5235898", "0.52276045", "0.5218124", "0.5213547", "0.5213527", "0.52091396" ]
0.6457678
0
When NB requests are queued put them in the session queue. If there are RPCs in queue to be sent, return prepared RPC and move to ExpectResponse. Otherwise, go to Listening and send a 'done' RPC.
def run(self, session): rpc = None if session['client']['event'] == 'CONNECTION_REQUEST': self.add_nb_queue_to_session_queue(session) while rpc is None and session['queue']: try: # Loop through queue until there is an RPC to send, or until # there are no more RPCs queued, or until an error occurs session['rpc']['method'] = session['queue'].pop(0) rpc = session['rpc']['method'].send_request(session) except ClientMethodException: # Failed to send this RPC, move on to the next LOG.debug("Error during preparation of client method: %s" % str(session['rpc']['method'])) continue except Exception: traceback.print_exc() LOG.debug("Unexpected error during preparation of client method: %s" % str(session['rpc']['method'])) return RPCS.SendingRpc, None if rpc is not None: # RPC ready: Send it and ExpectResponse return RPCS.ExpectResponse, rpc else: # If there are no (more) RPCs to send, log ok # and send done, indicating communication is complete session['log'] = {'rc': 'ok', 'msg': ''} session['db'].clear_dirtyflag(session['client']['cid']) return RPCS.Listening, {'method': 'done'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_nb_queue_to_session_queue(self, session):\n rpc_list = []\n client_id = get_element('cid', session['client'])\n\n if client_id is not None and client_id in RPCS.Northbound_Queue:\n # Check if all commands have been serviced\n if RPCS.Northbound_Queue[client_id]:\n # Get first request in the client queue, in the form:\n # (Client_COMMAND, RESPONSE STREAM)\n # TODO pop might be unresolved\n nb_request = RPCS.Northbound_Queue[client_id].pop(0)\n # Parse and queue request(s)\n client_command = nb_request[0]\n rpc_list.append(client_command)\n # Insert nb commands to the front of queue\n session['queue'] = queued_nb_methods + session['queue']\n # Store stream which expects the client response in the session\n session['nb_response_stream'] = nb_request[1]", "def answer_waiting_call(self) -> None:", "def send_pending_requests(self):\n while self.pending_requests:\n stream_id = self.pending_requests.popleft()\n\n log.debug(\"initiating request, new stream %s\", stream_id)\n\n # send headers immediately rather than waiting for data. this ensures\n # streams are established with increasing stream ids regardless of when\n # the request data is available\n self.send_headers(stream_id, immediate=True)\n self.send_data(stream_id)", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def _finish_pending_requests(self) -> None:\n while True:\n num_q, ok_list, err_list = self._multi.info_read()\n for curl in ok_list:\n self._finish(curl)\n for curl, errnum, errmsg in err_list:\n self._finish(curl, errnum, errmsg)\n if num_q == 0:\n break\n self._process_queue()", "def sync_request(self, *args):\r\n seq = self.send_request(*args)\r\n while seq not in self.sync_replies:\r\n self.serve()\r\n return self.sync_replies.pop(seq)", "def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)", "def testSendNextMessage(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(3)\n self.mgr.queueMsg(2)\n self.mgr.queueMsg(1)\n self.mgr.processMsgQueue()\n self.v.send_mavlink.assert_called_with(3)\n self.assertEqual( self.mgr.msgQueue.qsize(), 2)", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def on_bindok(self, unused_frame):\n\n self.logger.info('queue bound')\n if self.acked:\n # if we wish to care about the servers replies, this is were we set up things\n self.logger.info('issuing confirm.select RPC')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n if self.sender:\n pass\n self.send()\n else:\n self.start_consuming()", "def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()", "def __call__(self):\n dv = None\n #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for.\n while self.active_call_count < self.parallel and self.queue:\n #Get a chunk of entries from the command queue so we can make a batch.\n subqueue = self.queue[:self.max_batch_size]\n self.queue = self.queue[self.max_batch_size:]\n #Send a single batch to the currently selected RPC node.\n dv = self._process_batch(subqueue)\n #If there is nothing left to do, there is nothing left to do\n if not self.queue and self.active_call_count == 0:\n self.log.error(\"Queue is empty and no active HTTPS-POSTs remaining.\")\n if self.stop_when_empty:\n #On request, stop reactor when queue empty while no active queries remain.\n self.reactor.stop() \n return dv", "def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())", "async def handle_request():\n nonlocal process, process_task\n logger.debug(\"Waiting for request\")\n request = await queue.get()\n\n if request.name == RequestTypes.run_process:\n assert process is None, \"Process must not have been started\"\n process_state = request.contents\n process = self._start_callback(process_state)\n process_task = asyncio.create_task(process.wait())\n pid = process.pid\n logger.debug(\"Running process in handler: %d\", pid)\n await connection.send(Response(pid))\n\n elif request.name == RequestTypes.wait_process_done:\n assert process is not None, \"Process must have been started\"\n logger.debug(\"Waiting for process to exit\")\n # We don't want the process.wait() task to be cancelled in case\n # our connection gets broken.\n exitcode = await asyncio.shield(process_task)\n logger.debug(\"Result: %d\", exitcode)\n await connection.send(Response(exitcode))\n\n return True", "def send_async_requests(self):\n\t\tif len(self._async_http_requests) <= 0:\n\t\t\treturn ()\n\n\t\tif self._session is None:\n\t\t\tself.start_new_session()\n\t\tsession = self._session\n\n\t\tresponses = [None] * len(self._async_http_requests)\n\t\t\":type : list\"\n\n\t\tfutures = []\n\t\tfor req, uri, host, auth, decode, ignored in self._async_http_requests:\n\t\t\tif host is None:\n\t\t\t\thost = self._host\n\t\t\t_log_http_request(req, uri, host, auth, self.log_full_request)\n\t\t\tf = self._async_executor.submit(session.send, req)\n\t\t\t# mini data-structure, Tuple[done_yet, future]\n\t\t\tfutures.append((False, f, decode, ignored))\n\t\tself._async_http_requests = []\n\n\t\t# now wait for them to complete\n\t\twhile len([x for x in futures if not x[0]]) > 0:\n\t\t\tnext_futures = []\n\t\t\tfor idx, f in enumerate(futures):\n\t\t\t\tdone_now = f[0]\n\t\t\t\tif not done_now:\n\t\t\t\t\tif f[1].done():\n\t\t\t\t\t\tr = f[1].result()\n\t\t\t\t\t\t_log_http_response(r, self.log_full_response)\n\t\t\t\t\t\tresponses[idx] = (r, f[2], f[3])\n\t\t\t\t\t\tdone_now = True\n\t\t\t\tnext_futures.append((done_now, f[1], f[2], f[3]))\n\t\t\tfutures = next_futures\n\t\t\ttime.sleep(0.01)\n\t\t# they are now done\n\n\t\t# we need to re-raise any exceptions that occur\n\t\tbad_responses = []\n\t\tfor idx, resp_items in enumerate(responses):\n\t\t\tresp, decode, ignored = resp_items\n\t\t\tif resp.status_code not in ignored:\n\t\t\t\ttry:\n\t\t\t\t\tresp.raise_for_status()\n\t\t\t\texcept requests.HTTPError as e:\n\t\t\t\t\t_log.exception(\"HTTPError in request #\" + str(idx) + \": \" + str(e))\n\t\t\t\t\tbad_responses.append(idx)\n\t\tif len(bad_responses) > 0:\n\t\t\tself._async_transforms = []\n\t\t\traise AsyncHTTPError(bad_responses)\n\n\t\t# finally, call the transform function on each one\n\t\ttransformed = []\n\t\tfor r_items, xform in zip(responses, self._async_transforms):\n\t\t\tr, decode, ignored = r_items\n\t\t\tdata = None\n\t\t\tif r.content is not None:\n\t\t\t\tif decode == 'text':\n\t\t\t\t\tdata = r.text\n\t\t\t\telif decode == 'json':\n\t\t\t\t\tdata = r.json(parse_float=decimal.Decimal)\n\t\t\t\telif decode == 'binary':\n\t\t\t\t\tdata = r.content\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Bad response_payload encoding: \" + decode)\n\t\t\t\tdata = xform(data)\n\t\t\ttransformed.append(data)\n\t\tself._async_transforms = []\n\t\treturn transformed", "def _handle_pending(self):\r\n if not self.pending:\r\n self._post_message('')\r\n return\r\n info, desired = self.pending\r\n if desired and self.plugins[desired].busy:\r\n return\r\n self.busy = True\r\n\r\n if desired:\r\n plugins = [self.plugins[desired]]\r\n elif info.name == 'definition' and not info.editor.is_python():\r\n plugins = [p for p in self.plugins.values() if not p.busy]\r\n else:\r\n # use all but the fallback\r\n plugins = [p for p in list(self.plugins.values())[:-1] if not p.busy]\r\n\r\n self.request = RequestHandler(info, plugins)\r\n self.request.introspection_complete.connect(\r\n self._introspection_complete)\r\n self.pending = None", "def user_next_command(self, tracer):\n try:\n self.queue.get_nowait()(tracer)\n except Empty:\n return", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def testSessionTimeout(self):\n\n def testTimeout(res):\n self.failUnlessEqual(res.value.args[0], b'404')\n\n def testCBTimeout(res):\n # check for terminate if we expire\n terminate = res[0].getAttribute('type',False)\n self.failUnlessEqual(terminate, 'terminate')\n\n def sendTest():\n sd = self.send()\n sd.addCallback(testCBTimeout)\n sd.addErrback(testTimeout)\n return sd\n\n def testResend(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n s = self.b.service.sessions[self.sid]\n self.failUnless(s.inactivity==2,'Wrong inactivity value')\n self.failUnless(s.wait==2, 'Wrong wait value')\n return task.deferLater(reactor, s.wait+s.inactivity+1, sendTest)\n\n def testSessionCreate(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n self.failUnless(res[0].hasAttribute('sid'),'Not session id')\n self.sid = res[0]['sid']\n\n # send and wait\n sd = self.send()\n sd.addCallback(testResend)\n return sd\n\n\n\n BOSH_XML = \"\"\"<body content='text/xml; charset=utf-8'\n hold='1'\n rid='%(rid)i'\n to='localhost'\n route='xmpp:127.0.0.1:%(server_port)i'\n ver='1.6'\n wait='2'\n ack='1'\n inactivity='2'\n xml:lang='en'\n xmlns='http://jabber.org/protocol/httpbind'/>\n \"\"\"% { \"rid\": self.rid, \"server_port\": self.server_port }\n\n return self.proxy.connect(BOSH_XML).addCallbacks(testSessionCreate)", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def fake_twisted_request(*args, **kwargs):\n kwargs.setdefault(\n 'Request', lambda channel: Request(channel=channel, queued=False))\n request = fake_nevow_request(*args, **kwargs)\n request.finish = lambda: next(request.finish.counter)\n request.finish.counter = itertools.count()\n return request", "def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def ProcessRequests(self, manager):\n self._CreateSpool()\n metrics_set = self._MetricsSet(\n *(constructor(self._METRIC_PREFIX + name)\n for name, constructor in self._METRICS_CONSTRUCTORS))\n pending_requests = []\n timestamps = {}\n tick_count = 0\n next_heartbeat = time.time()\n while True:\n tick_count += 1\n if time.time() >= next_heartbeat:\n next_heartbeat = time.time() + self._HEARTBEAT_INTERVAL\n logging.debug('Starting tick number %d', tick_count)\n manager.StartTick()\n\n num_completed = 0\n for request_id, result in manager.Reap():\n num_completed += 1\n metrics_set.total_completed.increment(fields={'status': 'normal'})\n time_running = time.time() - timestamps.pop(request_id)\n metrics_set.time_running.add(time_running)\n self._CompleteRequest(request_id, result)\n\n num_added = 0\n for request_id in self._GetNewRequests():\n num_added += 1\n metrics_set.total_received.increment()\n timestamps[request_id] = time.time()\n pending_requests.append(request_id)\n\n num_aborted = 0\n for abort_id in self._GetAbortRequests():\n num_aborted += 1\n metrics_set.total_completed.increment(fields={'status': 'abort'})\n if abort_id in timestamps:\n time_to_abort = time.time() - timestamps.pop(abort_id)\n metrics_set.time_to_abort.add(time_to_abort)\n self._ProcessAbort(abort_id, pending_requests, manager)\n\n num_started = 0\n while pending_requests and manager.HasCapacity():\n num_started += 1\n request_id = pending_requests.pop(0)\n time_now = time.time()\n time_waiting = time_now - timestamps[request_id]\n metrics_set.time_waiting.add(time_waiting)\n timestamps[request_id] = time_now\n self._StartRequest(request_id, manager)\n\n if num_completed or num_added or num_aborted or num_started:\n logging.info('new: %d, started: %d, aborted: %d, completed: %d',\n num_added, num_started, num_aborted, num_completed)\n num_pending = len(pending_requests)\n num_running = len(manager)\n logging.info('pending: %d, running: %d', num_pending, num_running)\n metrics_set.task_count.set(num_pending,\n fields={'state': 'pending'})\n metrics_set.task_count.set(num_running,\n fields={'state': 'running'})\n metrics_set.ticks.increment()\n time.sleep(manager.sample_interval)", "def queue():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Add the operation to the local queue.\n operation = manager.enqueue(session.get_sid())\n\n # If the local list of operation to solve is empty the Mmanager.enqueue`\n # method returns `None`, ence if the value of `operation` is `None`\n # the client should stop.\n return dict(ok=True, operation=operation, halt=operation is None)" ]
[ "0.63733757", "0.58320886", "0.5644144", "0.5606107", "0.55666107", "0.55509126", "0.55193716", "0.551203", "0.5509601", "0.5445808", "0.5436328", "0.5436328", "0.5414003", "0.5397534", "0.5392256", "0.5372994", "0.53597814", "0.5358608", "0.52984124", "0.5293183", "0.5270412", "0.52681863", "0.52507424", "0.5246172", "0.52423346", "0.52421176", "0.52363044", "0.5224766", "0.52161074", "0.5212833" ]
0.6577181
0
This function determines the common supported protocol version. This is determined by a version supported by the RPCS server that is in the range of numbers that exist between the value of the first integer of the earliest supported protocol version and the value of the first integer of the latest supported protocol version.
def determine_supported_protocol(self, earliest, latest): earliest = int(earliest.split('.')[0]) latest = int(latest.split('.')[0]) if earliest <= latest: supported = range(earliest, latest + 1) for version in (reversed(supported)): if version in RPCS.SUPPORTED_PROTOCOL_VERSIONS: return str(version) # If no common protocol version is found, raise fatal error raise ClientRequestError('NoValidProtocolVersionInCommon')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_protocol_versions(self, session):\n # First parse protocol version strings to check for invalid formatting\n invalid_string = self.parse_protocol_version(\n [self.earliest_protocol_version, self.latest_protocol_version])\n if invalid_string is not None:\n # Error during protocol string parsing\n data = ('earliest_protocol_version'\n if invalid_string == self.earliest_protocol_version else 'latest_protocol_version')\n raise ClientRequestError('InvalidParameterValue', data=data)\n\n # Check if protocol version is supported and define the one to use\n self.protocol_version = self.determine_supported_protocol(\n self.earliest_protocol_version, self.latest_protocol_version)", "def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError", "def do_protocol_version(self):\n return \"2\", True", "def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version", "async def get_protocol_version(self):\n if self.query_reply_data.get(PrivateConstants.REPORT_VERSION) == '':\n await self._send_command([PrivateConstants.REPORT_VERSION])\n while self.query_reply_data.get(\n PrivateConstants.REPORT_VERSION) == '':\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.REPORT_VERSION)", "async def get_protocol_version(self):\n if self.query_reply_data.get(PrivateConstants.REPORT_VERSION) == '':\n await self._send_command([PrivateConstants.REPORT_VERSION])\n while self.query_reply_data.get(\n PrivateConstants.REPORT_VERSION) == '':\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.REPORT_VERSION)", "def parse_protocol_version(self, version_string_list):\n # Verify for every provided string if it is in proper versioning format\n for version_string in version_string_list:\n\n try:\n parsed_version_string = version_string.split('.')\n if len(parsed_version_string) == 1 and version_string.isdigit():\n # No dots in version string, it is a simple integer.\n continue\n\n StrictVersion(version_string)\n\n except (AttributeError, ValueError):\n LOG.debug('Invalid protocol version string provided')\n return version_string\n\n # Check for malformatting\n for i in range(len(parsed_version_string)):\n if len(parsed_version_string[i]) > 1:\n if parsed_version_string[i][0] == '0': # Leading 0's\n return version_string\n if len(parsed_version_string[i]) < 1: # Empty strings\n return version_string\n\n # Protocol version formating: OK\n return None", "def supported_marshaller_api_versions() -> Tuple[str]:\n return (\"1.0\",)", "def getNativeChangesetVersion(protocolVersion):\n # Add more versions as necessary, but do remember to add them to\n # netclient's FILE_CONTAINER_* constants\n if protocolVersion < 38:\n return filecontainer.FILE_CONTAINER_VERSION_NO_REMOVES\n elif protocolVersion < 43:\n return filecontainer.FILE_CONTAINER_VERSION_WITH_REMOVES\n # Add more changeset versions here as the currently newest client is\n # replaced by a newer one\n return filecontainer.FILE_CONTAINER_VERSION_FILEID_IDX", "def _supported_versions(self, jarm_details, grease):\n if (jarm_details[7] == \"1.2_SUPPORT\"):\n # TLS 1.3 is not supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\"]\n else:\n # TLS 1.3 is supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\", b\"\\x03\\x04\"]\n\n # Change supported version order, by default, the versions are from\n # oldest to newest.\n if jarm_details[8] != \"FORWARD\":\n tls = self._cipher_mung(tls, jarm_details[8])\n\n # Assemble the extension.\n ext = b\"\\x00\\x2b\"\n # Add GREASE if applicable.\n if grease:\n versions = self._choose_grease()\n else:\n versions = b\"\"\n\n for version in tls:\n versions += version\n\n second_length = len(versions)\n first_length = second_length+1\n ext += struct.pack(\">H\", first_length)\n ext += struct.pack(\">B\", second_length)\n ext += versions\n\n return ext", "def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]", "def check_capability_negotiation(\n self, environ, start_response, response_headers):\n ua = sa = None\n if \"HTTP_DATASERVICEVERSION\" in environ:\n major, minor, ua = core.parse_dataservice_version(\n environ[\"HTTP_DATASERVICEVERSION\"])\n else:\n major = 2\n minor = 0\n if \"HTTP_MAXDATASERVICEVERSION\" in environ:\n # (unused max_minor)\n max_major, max_minor, sa = core.parse_max_dataservice_version(\n environ[\"HTTP_MAXDATASERVICEVERSION\"]) # noqa\n else:\n max_major = major\n if major > 2 or (major == 2 and minor > 0):\n # we can't cope with this request\n return None\n elif max_major >= 2:\n response_headers.append(\n ('DataServiceVersion', '2.0; pyslet %s' % info.version))\n return 2\n else:\n response_headers.append(\n ('DataServiceVersion', '1.0; pyslet %s' % info.version))\n return 1", "def get_server_version():\n url_address = 'https://raw.githubusercontent.com/muhammadfredo/FrMaya/master/FrMaya/version.py'\n url_data = urllib2.urlopen(url_address).read(200)\n result = re.search(r'(\\d+), (\\d+), (\\d+)', url_data, re.MULTILINE)\n if result:\n version_list = [int(v) for v in result.groups()]\n return version_list\n else:\n raise ValueError('Cannot get server version!!!')", "def test_get_protocol_version_name(self):\n server, client = loopback()\n client_protocol_version_name = client.get_protocol_version_name()\n server_protocol_version_name = server.get_protocol_version_name()\n\n assert isinstance(server_protocol_version_name, str)\n assert isinstance(client_protocol_version_name, str)\n\n assert server_protocol_version_name == client_protocol_version_name", "def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION", "def RemoteVersion(self):\n try:\n ver = self.metamanager.Version(connectme_pb2.VersionRequest())\n return (ver.major, ver.minor)\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e", "def get_protocol_version(api_url : str, session : Optional[requests.Session] = None) -> ProtocolVersionResponse:\n return protocolVersion(api_url, session)", "def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)", "def version_compare(compare_ver, min_version, max_version):\n if max_version == \"*\":\n return True\n if max_version == \"-\" or not max_version:\n max_version = \"0\"\n if not min_version or min_version == \"*\" or min_version == \"-\":\n min_version = \"0\"\n if compare_ver == \"-\" or compare_ver == \"*\":\n compare_ver = \"0\"\n if compare_ver == min_version or compare_ver == max_version:\n return True\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n # If all versions follow proper versioning then perform a simple numerical comparison\n if len(compare_ver_parts) == len(min_version_parts) and len(\n compare_ver_parts\n ) == len(max_version_parts):\n compare_ver_num = normalise_num(compare_ver, len(compare_ver_parts))\n min_version_num = normalise_num(min_version, len(compare_ver_parts))\n max_version_num = normalise_num(max_version, len(compare_ver_parts))\n if compare_ver_num >= min_version_num and compare_ver_num <= max_version_num:\n return True\n\n normal_len = len(compare_ver_parts)\n if len(min_version_parts) > normal_len:\n normal_len = len(min_version_parts)\n if len(max_version_parts) > normal_len:\n normal_len = len(max_version_parts)\n\n # Normalise the version numbers to be of same length\n compare_ver = normalise_version_str(compare_ver, normal_len)\n min_version = normalise_version_str(min_version, normal_len)\n max_version = normalise_version_str(max_version, normal_len)\n\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n for i in range(0, normal_len):\n if (\n not compare_ver_parts[i].isdigit()\n or not min_version_parts[i].isdigit()\n or not max_version_parts[i].isdigit()\n ):\n if (\n compare_ver_parts[i] == min_version_parts[i]\n and compare_ver_parts[i] == max_version_parts[i]\n ):\n continue\n else:\n return False\n elif int(compare_ver_parts[i]) >= int(min_version_parts[i]) and int(\n compare_ver_parts[i]\n ) <= int(max_version_parts[i]):\n continue\n else:\n return False\n return True", "def version_max():\n return VERSION_MAX", "def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected", "def __get_best_version(self):\n\t\tif self.length < 32:\n\t\t\treturn 2 # version 2\n\t\telif self.length < 53:\n\t\t\treturn 3 # version 3\n\t\telif self.length < 78:\n\t\t\treturn 4 # version 4\n\t\telif self.length < 106:\n\t\t\treturn 5 # version 5\n\t\telif self.length < 134:\n\t\t\treturn 6 # version 6\n\t\telse:\n\t\t\treturn \"Too long data\"", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))", "def getAPIVersion(self, req):\n import re\n import tracrpc\n match = re.match(r'([0-9]+)\\.([0-9]+)\\.([0-9]+)', tracrpc.__version__)\n return map(int, match.groups())", "def get_version(client):\n version = client.info()['version']['number']\n version = version.split('-')[0]\n if len(version.split('.')) > 3:\n version = version.split('.')[:-1]\n else:\n version = version.split('.')\n return tuple(map(int, version))", "def get_min_cli_version(k8s_cli):\n return MIN_OC_VERSION_SUPPORT_RETRIES if (k8s_cli and k8s_cli.endswith(OC_K8S_CLI))\\\n else MIN_KUBECTL_VERSION_SUPPORT_RETRIES", "def compare_versions(current_version, supported_version):\n try:\n current = current_version.split(\".\")\n supported = supported_version.split(\".\")\n\n if int(current[0]) < int(supported[0]):\n return False\n if int(current[0]) > int(supported[0]):\n return True\n return int(current[1]) >= int(supported[1])\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version\")\n return False", "def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_version = \"1.0\"\n elif ofproto.OFP_VERSION == 4:\n _of_version = \"1.3\"\n else:\n _of_version = \"Unknown version \" + \\\n str(ofproto.OFP_VERSION)\n return _of_version", "def select_versions(self):\n super(ChannelBackend, self).select_versions()\n return [('1.1', '1.1')]" ]
[ "0.7081918", "0.70504236", "0.6895096", "0.63376284", "0.63264036", "0.63264036", "0.61619365", "0.60787296", "0.60647607", "0.6016684", "0.5973124", "0.5794255", "0.57867813", "0.5767949", "0.5725214", "0.5694531", "0.5666855", "0.56289035", "0.562115", "0.5615987", "0.5604014", "0.5587643", "0.5484007", "0.547843", "0.54692096", "0.54465586", "0.5434421", "0.54283506", "0.541454", "0.5408799" ]
0.8174987
0
This function is responsible for parsing, validating and making all necessary comparisons between provided and supported protocol versions.
def compare_protocol_versions(self, session): # First parse protocol version strings to check for invalid formatting invalid_string = self.parse_protocol_version( [self.earliest_protocol_version, self.latest_protocol_version]) if invalid_string is not None: # Error during protocol string parsing data = ('earliest_protocol_version' if invalid_string == self.earliest_protocol_version else 'latest_protocol_version') raise ClientRequestError('InvalidParameterValue', data=data) # Check if protocol version is supported and define the one to use self.protocol_version = self.determine_supported_protocol( self.earliest_protocol_version, self.latest_protocol_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]", "def parse_protocol_version(self, version_string_list):\n # Verify for every provided string if it is in proper versioning format\n for version_string in version_string_list:\n\n try:\n parsed_version_string = version_string.split('.')\n if len(parsed_version_string) == 1 and version_string.isdigit():\n # No dots in version string, it is a simple integer.\n continue\n\n StrictVersion(version_string)\n\n except (AttributeError, ValueError):\n LOG.debug('Invalid protocol version string provided')\n return version_string\n\n # Check for malformatting\n for i in range(len(parsed_version_string)):\n if len(parsed_version_string[i]) > 1:\n if parsed_version_string[i][0] == '0': # Leading 0's\n return version_string\n if len(parsed_version_string[i]) < 1: # Empty strings\n return version_string\n\n # Protocol version formating: OK\n return None", "def determine_supported_protocol(self, earliest, latest):\n earliest = int(earliest.split('.')[0])\n latest = int(latest.split('.')[0])\n if earliest <= latest:\n supported = range(earliest, latest + 1)\n for version in (reversed(supported)):\n if version in RPCS.SUPPORTED_PROTOCOL_VERSIONS:\n return str(version)\n\n # If no common protocol version is found, raise fatal error\n raise ClientRequestError('NoValidProtocolVersionInCommon')", "def do_protocol_version(self):\n return \"2\", True", "def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)", "def compare_versions(current_version, supported_version):\n try:\n current = current_version.split(\".\")\n supported = supported_version.split(\".\")\n\n if int(current[0]) < int(supported[0]):\n return False\n if int(current[0]) > int(supported[0]):\n return True\n return int(current[1]) >= int(supported[1])\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version\")\n return False", "def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError", "def test_parse_version(self):\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B110SPC003'),\n [100, 1, 0, 110, 3],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012'),\n [100, 1, 0, 60, 12],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),\n [100, 1, 0, 60, 12],\n )\n # Incorrect number of digits\n self.assertEqual(\n _parse_sw_version('BaiStation_V10R001C00B060SPC012'),\n None,\n )\n self.assertEqual(\n _parse_sw_version('XYZ123'),\n None,\n )\n self.assertEqual(\n _parse_sw_version(''),\n None,\n )", "def _supported_versions(self, jarm_details, grease):\n if (jarm_details[7] == \"1.2_SUPPORT\"):\n # TLS 1.3 is not supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\"]\n else:\n # TLS 1.3 is supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\", b\"\\x03\\x04\"]\n\n # Change supported version order, by default, the versions are from\n # oldest to newest.\n if jarm_details[8] != \"FORWARD\":\n tls = self._cipher_mung(tls, jarm_details[8])\n\n # Assemble the extension.\n ext = b\"\\x00\\x2b\"\n # Add GREASE if applicable.\n if grease:\n versions = self._choose_grease()\n else:\n versions = b\"\"\n\n for version in tls:\n versions += version\n\n second_length = len(versions)\n first_length = second_length+1\n ext += struct.pack(\">H\", first_length)\n ext += struct.pack(\">B\", second_length)\n ext += versions\n\n return ext", "def compare(v1=\"\", v2=\"\"):\n if any([v1 == \"\", v2 == \"\"]):\n return 'One or both versions are not provided.'\n\n characters1 = list(v1)\n characters2 = list(v2)\n\n if not characters1.index('.'):\n return 'v1 is in wrong format'\n if not characters2.index('.'):\n return 'v2 is in wrong format'\n\n def extract_number(characters):\n working_list = []\n resulting_list = []\n dot_index = characters.index('.')\n go_on = True\n for i in range(dot_index):\n if characters[i] == '0' and go_on:\n continue\n go_on = False\n working_list.append(characters[i])\n if not working_list:\n working_list.append('0')\n num_str = ''.join(working_list)\n resulting_list.append(num_str)\n resulting_list.append('.')\n working_list.clear()\n go_on = True\n for i in range(len(characters)-(dot_index+1)):\n index = i + (dot_index+1)\n if characters[index] == '0' and go_on:\n continue\n go_on = False\n working_list.append(characters[index])\n if not working_list:\n working_list.append('0')\n num_str = ''.join(working_list)\n resulting_list.append(num_str)\n return resulting_list\n\n list1 = extract_number(characters1)\n list2 = extract_number(characters2)\n\n def check(a_list):\n if a_list[0].isdigit() and a_list[2].isdigit():\n return True\n return False\n\n if not check(list1):\n return 'Invalid input - {}'.format(v1)\n if not check(list2):\n return 'Invalid input - {}'.format(v2)\n\n if list1[0] > list2[0]:\n return 'Version {0} is greater than Version {1}'.format(v1, v2)\n elif list1[0] < list2[0]:\n return 'Version {0} is smaller than Version {1}'.format(v1, v2)\n else:\n if list1[2] > list2[2]:\n return 'Version {0} is greater than Version {1}'.format(v1, v2)\n elif list1[2] < list2[2]:\n return 'Version {0} is smaller than Version {1}'.format(v1, v2)\n else:\n return 'Version {0} is equal to Version {1}'.format(v1, v2)", "def version_compare(compare_ver, min_version, max_version):\n if max_version == \"*\":\n return True\n if max_version == \"-\" or not max_version:\n max_version = \"0\"\n if not min_version or min_version == \"*\" or min_version == \"-\":\n min_version = \"0\"\n if compare_ver == \"-\" or compare_ver == \"*\":\n compare_ver = \"0\"\n if compare_ver == min_version or compare_ver == max_version:\n return True\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n # If all versions follow proper versioning then perform a simple numerical comparison\n if len(compare_ver_parts) == len(min_version_parts) and len(\n compare_ver_parts\n ) == len(max_version_parts):\n compare_ver_num = normalise_num(compare_ver, len(compare_ver_parts))\n min_version_num = normalise_num(min_version, len(compare_ver_parts))\n max_version_num = normalise_num(max_version, len(compare_ver_parts))\n if compare_ver_num >= min_version_num and compare_ver_num <= max_version_num:\n return True\n\n normal_len = len(compare_ver_parts)\n if len(min_version_parts) > normal_len:\n normal_len = len(min_version_parts)\n if len(max_version_parts) > normal_len:\n normal_len = len(max_version_parts)\n\n # Normalise the version numbers to be of same length\n compare_ver = normalise_version_str(compare_ver, normal_len)\n min_version = normalise_version_str(min_version, normal_len)\n max_version = normalise_version_str(max_version, normal_len)\n\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n for i in range(0, normal_len):\n if (\n not compare_ver_parts[i].isdigit()\n or not min_version_parts[i].isdigit()\n or not max_version_parts[i].isdigit()\n ):\n if (\n compare_ver_parts[i] == min_version_parts[i]\n and compare_ver_parts[i] == max_version_parts[i]\n ):\n continue\n else:\n return False\n elif int(compare_ver_parts[i]) >= int(min_version_parts[i]) and int(\n compare_ver_parts[i]\n ) <= int(max_version_parts[i]):\n continue\n else:\n return False\n return True", "def test_VersionOptionalFields():\n # onlyRequiredVersion is a version message that only contains the\n # required versions and all other values set to their default values.\n onlyRequiredVersion = minimumMsgVersion()\n\n onlyRequiredVersionEncoded = baseVersionEncoded()[:-55]\n\n # addrMeVersion is a version message that contains all fields through\n # the AddrMe field.\n addrMe = netaddress.NetAddress(\n ip=\"127.0.0.1\", port=8333, services=wire.SFNodeNetwork, stamp=0,\n )\n addrMeVersion = minimumMsgVersion()\n addrMeVersion.addrMe = addrMe\n\n addrMeVersionEncoded = baseVersionEncoded()[:-29]\n\n # nonceVersion is a version message that contains all fields through\n # the Nonce field.\n nonceVersion = minimumMsgVersion()\n nonceVersion.addrMe = addrMe\n nonceVersion.nonce = 123123 # 0x1e0f3\n nonceVersionEncoded = baseVersionEncoded()[:-21]\n\n # uaVersion is a version message that contains all fields through\n # the UserAgent field.\n uaVersion = minimumMsgVersion()\n uaVersion.addrMe = addrMe\n uaVersion.nonce = 123123\n uaVersion.userAgent = \"/dcrdtest:0.0.1/\"\n uaVersionEncoded = baseVersionEncoded()[:-4]\n\n # lastBlockVersion is a version message that contains all fields\n # through the LastBlock field.\n lastBlockVersion = minimumMsgVersion()\n lastBlockVersion.addrMe = addrMe\n lastBlockVersion.nonce = 123123\n lastBlockVersion.userAgent = \"/dcrdtest:0.0.1/\"\n lastBlockVersion.lastBlock = 234234 # 0x392fa\n lastBlockVersionEncoded = baseVersionEncoded()\n\n tests = [\n (onlyRequiredVersion, onlyRequiredVersionEncoded),\n (addrMeVersion, addrMeVersionEncoded),\n (nonceVersion, nonceVersionEncoded),\n (uaVersion, uaVersionEncoded),\n (lastBlockVersion, lastBlockVersionEncoded),\n ]\n\n for expMsg, buf in tests:\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(buf, wire.ProtocolVersion)\n assert sameMsgVersion(msg, expMsg)", "def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))", "def check_http_request_validity(http_raw_data) -> HttpRequestState:\n\n global version\n r1 = http_raw_data.split('\\n')[0]\n r2 = http_raw_data.split('\\n')[1]\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"/\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"http://\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1)!=None) and (re.search(\"/\", r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None) :\n if (re.search(\":\", r2) == None) :\n return HttpRequestState.INVALID_INPUT\n\n if(re.search(\"GOAT\", r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" , r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n\n return HttpRequestState.NOT_SUPPORTED\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" ,r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\", r1) != None) and (re.search(\"HTTP/1.0\", r1) == None) and (re.search(\":\", r2) != None):\n return HttpRequestState.INVALID_INPUT\n print(\"*\" * 50)\n print(\"[check_http_request_validity] Implement me!\")\n print(\"*\" * 50)\n\n return HttpRequestState.PLACEHOLDER", "def supports_http_1_1():", "def _check_compat(sock_info):\n ...", "def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version", "def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions", "def compare_versions(self, version1, version2):\n max_segments = max(len(version1.split(\".\")), len(version2.split(\".\")))\n return cmp(self.__normalize_version(version1, desired_segments=max_segments), self.__normalize_version(version2, desired_segments=max_segments))", "def rangeCompare(reqtuple, provtuple):\n (reqn, reqf, (reqe, reqv, reqr)) = reqtuple\n (n, f, (e, v, r)) = provtuple\n if reqn != n:\n return 0\n\n # unversioned satisfies everything\n if not f or not reqf:\n return 1\n\n # and you thought we were done having fun\n # if the requested release is left out then we have\n # to remove release from the package prco to make sure the match\n # is a success - ie: if the request is EQ foo 1:3.0.0 and we have \n # foo 1:3.0.0-15 then we have to drop the 15 so we can match\n if reqr is None:\n r = None\n if reqe is None:\n e = None\n if reqv is None: # just for the record if ver is None then we're going to segfault\n v = None\n\n # if we just require foo-version, then foo-version-* will match\n if r is None:\n reqr = None\n\n rc = compareEVR((e, v, r), (reqe, reqv, reqr))\n\n # does not match unless\n if rc >= 1:\n if reqf in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['EQ', 8, '=']:\n if f in ['LE', 10, 'LT', 2,'<=', '<']:\n return 1\n if reqf in ['LE', 'LT', 'EQ', 10, 2, 8, '<=', '<', '=']:\n if f in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n\n if rc == 0:\n if reqf in ['GT', 4, '>']:\n if f in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['GE', 12, '>=']:\n if f in ['GT', 'GE', 'EQ', 'LE', 4, 12, 8, 10, '>', '>=', '=', '<=']:\n return 1\n if reqf in ['EQ', 8, '=']:\n if f in ['EQ', 'GE', 'LE', 8, 12, 10, '=', '>=', '<=']:\n return 1\n if reqf in ['LE', 10, '<=']:\n if f in ['EQ', 'LE', 'LT', 'GE', 8, 10, 2, 12, '=', '<=', '<' , '>=']:\n return 1\n if reqf in ['LT', 2, '<']:\n if f in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n if rc <= -1:\n if reqf in ['GT', 'GE', 'EQ', 4, 12, 8, '>', '>=', '=']:\n if f in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n# if rc >= 1:\n# if reqf in ['GT', 'GE', 4, 12, '>', '>=']:\n# return 1\n# if rc == 0:\n# if reqf in ['GE', 'LE', 'EQ', 8, 10, 12, '>=', '<=', '=']:\n# return 1\n# if rc <= -1:\n# if reqf in ['LT', 'LE', 2, 10, '<', '<=']:\n# return 1\n\n return 0", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def test_VersionWireErrors():\n # Use protocol version 60002 specifically here instead of the latest because\n # the test data is using bytes encoded with that protocol version.\n pver = 60002\n\n # Get a base version, and change the user agent to exceed max limits.\n bvc = baseVersion()\n exceedUAVer = bvc\n newUA = \"/\" + \"t\" * (msgversion.MaxUserAgentLen - 8 + 1) + \":0.0.1/\"\n exceedUAVer.userAgent = newUA\n\n # Encode the new UA length as a varint.\n newUAVarIntBuf = wire.writeVarInt(pver, len(newUA))\n\n # Make a new buffer big enough to hold the base version plus the new\n # bytes for the bigger varint to hold the new size of the user agent\n # and the new user agent string. Then stitch it all together.\n bvEnc = baseVersionEncoded()\n exceedUAVerEncoded = ByteArray()\n exceedUAVerEncoded += bvEnc[0:80]\n exceedUAVerEncoded += newUAVarIntBuf\n exceedUAVerEncoded += newUA.encode()\n exceedUAVerEncoded += bvEnc[97:100]\n\n with pytest.raises(DecredError):\n msgversion.MsgVersion.btcDecode(exceedUAVerEncoded, pver)\n\n bv = baseVersion()\n bv.userAgent = \"t\" * msgversion.MaxUserAgentLen + \"1\"\n\n with pytest.raises(DecredError):\n bv.btcEncode(pver)", "def valid_version(self, new_version):\n if not re.match(r\"\\d+\\.\\d+\\.\\d+\", new_version):\n return False\n\n x1, y1, z1 = [int(i) for i in self.current_version().split(\".\")]\n x2, y2, z2 = [int(i) for i in new_version.split(\".\")]\n\n if x2 < x1:\n return False\n\n if x2 == x1 and y2 < y1:\n return False\n\n if x2 == x1 and y2 == y1 and z2 <= z1:\n return False\n\n return True", "def validate_backend_version(self):\n pass", "def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))", "def test_version(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.version == \"1.0\"\n\n bb = parse_input(\"name testname\\nversion 1.12\")\n assert bb.version == \"1.12\"", "def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh", "def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def test_VersionWire():\n # verRelayTxFalse and verRelayTxFalseEncoded is a version message as of\n # BIP0037Version with the transaction relay disabled.\n verRelayTxFalse = baseVersionBIP0037()\n verRelayTxFalse.disableRelayTx = True\n verRelayTxFalseEncoded = baseVersionBIP0037Encoded()\n verRelayTxFalseEncoded[-1] = 0\n\n bv = baseVersionBIP0037()\n tests = [\n (bv, bv, baseVersionBIP0037Encoded()),\n (verRelayTxFalse, verRelayTxFalse, verRelayTxFalseEncoded),\n ]\n\n for msgIn, msgOut, msgEnc in tests:\n # Encode the message to wire format.\n b = msgIn.btcEncode(wire.ProtocolVersion)\n assert b == msgEnc\n\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(msgEnc, wire.ProtocolVersion)\n assert sameMsgVersion(msg, msgOut)" ]
[ "0.7269251", "0.6852072", "0.68450886", "0.6574904", "0.645231", "0.6396166", "0.63311136", "0.6159254", "0.61516625", "0.6143125", "0.6079563", "0.6043297", "0.6020891", "0.5971723", "0.5925532", "0.5903397", "0.58636093", "0.57814234", "0.57633996", "0.5755667", "0.57490414", "0.57051283", "0.5693326", "0.56907916", "0.568826", "0.5685663", "0.5668759", "0.56587327", "0.5655409", "0.5632572" ]
0.7876382
0
If a client_id is provided together with the inform message, Now we have enough information to get the data from the database
def handle_client_id(self, session): if session['client']['cid'] is not None: # A subscriber ID may only contain letters, numbers, spaces and # the following special characters: - _ \ / ( ) # . p = re.compile('^[A-Za-z0-9-_\\\. #/()]+$') if p.match(session['client']['cid']) is None: raise ClientRequestError('InvalidClientId') try: session['client'] = session['db'].client_data_query( session['client']['cid']) except DbException as db_err: session['log'] = {'rc': 'error', 'msg': 'Non matching ClientID'} raise ClientRequestError('UnknownClient', data=session['client']['cid'] + ' does not match data in database') if session['client'] is None: # The client could not be found. # It means that the client is not yet defined in the database. msg = ' cid:' + session['client']['cid'] LOG.info("Client not in database, " + msg) session['log'] = {'rc': 'ok', 'msg': 'Unknown CLIENT '} raise ClientRequestError('UnknownClient', data='No entry for client in database')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_inform(self, session, request):\n # Verify the parameters\n params = get_element('params', request)\n # Default value for protocol_compression\n protocol_compression = 'NONE'\n response = {}\n\n if params is not None:\n try:\n # Fetch inform parameters and load into session\n self.fetch_inform_params(session, params)\n # handle a possible subscriber id (MACless communication)\n self.compare_protocol_versions(session)\n # If protocol_compression method is provided, check if valid\n self.handle_protocol_compression(session)\n # Validate and check reason (event) for this session\n self.handle_client_id(session)\n # Parse provided protocol version parameters and check validity\n self.handle_connection_event(session)\n # Check for unknown parameters provided in RPC\n for key in params:\n if key not in RPCS.VALID_INFORM_PARAMETERS:\n raise ClientRequestError(\"InvalidParameterName\", data=key)\n\n except ClientRequestError as inform_error:\n next_state = inform_error.error['next_state']\n error_message = {\"error\": inform_error.error['error']}\n\n if inform_error.error_name == \"InvalidClientId\":\n # As per defined in the protocol: Log in database\n session['log'] = {'rc': 'error', 'msg': 'Invalid client_id value'}\n\n LOG.debug(\"ExpectInform Error: \" + str(inform_error))\n return next_state, error_message\n except DbException:\n return (RPCS.ExpectInform, {\n 'error': {'code': -31997,\n 'message': 'Database access error'}})\n\n # Everything is OK with Inform RPC\n next_state = RPCS.ExpectRpc\n response['result'] = {\n 'protocol_version': self.protocol_version,\n 'protocol_compression': protocol_compression\n }\n\n # No parameters provided with inform RPC\n else:\n next_state = RPCS.ExpectInform\n response['error'] = {\n 'code': -32602, 'message': 'Invalid parameter'}\n\n return next_state, response", "def detail(client_id):\n try:\n # Fetch client details from the BancBox api and render\n clientId = { 'bancBoxId': client_id }\n request_params = {'subscriberId': subscriber_id, 'clientId': clientId}\n results = api.service.getClient(request_params) \n client = results.client\n except Exception, e:\n logger.error('Error retrieving client [%s]: %s', client_id, e)\n client = {}\n return render_template('detail.html', client=client)", "def on_report_to_master(client_id, data):", "def get_client_data(self, client_id):\n query = \"\"\"SELECT id,\n secret\n FROM clients\n WHERE active = 1\n AND id = %s\"\"\"\n self._execute(query, (client_id,))\n return self._dictfetchone()", "def from_client(self, data):\r\n pass", "def request_client_id(self) -> None:\n GCR.log.log(Logger.INFORMATION, \"Demande d'un id client\")\n self.send({\"action\": \"request_id\", \"username\": self.username})", "def received_information(update: Update, context: CallbackContext) -> int:\n text = update.message.text\n for a in user_d:\n category = user_d[a]\n if category == 'Public_Key' and len(text) == 58:\n assert len(text) == 58, update.message.reply_text(\"The address is invalid address\")\n user_d[category] = text\n elif category == 'Quantity' and type(int(text) == int):\n user_d[category] = int(text)\n elif category == 'Secret_Key' and len(text) > 58:\n user_d[category] = text\n else:\n user_d[category] = text\n user_data = context.user_data\n user_data[category] = user_d[category]\n\n update.message.reply_text(\n \"I got this from you:\\n\"\n f\"{facts_to_str(user_d)}\",\n reply_markup=markup_r,\n )\n user_d.clear()\n\n return CHOOSING", "def create_client_request(self):\n try:\n query_to_add_client_request = \"INSERT INTO client_request(produce_name, quantity, price_range, users_id, date_created,date_modified) VALUES(%s,%s,%s,%s,%s,%s)\"\n connection.cursor.execute(query_to_add_client_request,( self.produce_name,self.quantity, self.price_range, self.current_user, self.date_created, self.date_modified))\n query_to_search_client_request = \"SELECT * FROM client_request WHERE produce_name=%s\"\n connection.cursor.execute(query_to_search_client_request, [self.produce_name])\n added_produce = connection.cursor.fetchone()\n result = {\n 'id': added_produce[0],\n 'produce_name': added_produce[1],\n 'quantity':added_produce[2],\n 'price_range': added_produce[3],\n }\n\n return result\n \n except Exception as exc:\n print(exc)", "def recieve_information_from_client():\r\n client_data = request.forms.get('json')\r\n client_data_dict = json.loads(client_data)\r\n return client_data_dict", "def get_info(self,honeypotids):\n req = {\"type\":\"get_info\",\n \"from\":self.network.mc_id,\n \"to\": honeypotids}\n expect_dict = {\"type\":\"send_info\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"info\"]\n return answer", "def set_client_id(self):\n data = self.receive() # deserialized data\n client_id = data['clientid'] # extracts client id from data\n self.client_id = client_id # sets the client id to this client\n print(\"Successfully connected to server: \" + self.userInfo['host'] + \" / \" + str(self.userInfo['port']))\n print(\"Your client info is:\\n\" + \"Client Name: \" + self.userInfo['name'] + \"\\nClient ID: \" + str(client_id))", "def change_client_info(request: Request) -> Dict:\n ser = ChangeClientInfoSerializer(data=request.data)\n if ser.is_valid():\n if ser.validated_data.get('email') and request.user.client.email != ser.validated_data['email']:\n request.user.client.activated = False\n new_email = UserEmail(template_id=1, user=request.user)\n new_email.generate_code()\n is_send = send_email_to_user(1, [request.user.client.email], f'https://royal-lion.bet/activate/{new_email.code}')\n if is_send:\n new_email.save()\n request.user.client.save()\n ser.update(request.user.client, validated_data=ser.validated_data)\n return {'data': 'ok', 'success': True}\n else:\n return {'errors': ser.errors, 'success': False}", "def client_details(self, value):\n self._client_details = value", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def datapack_details(request):\n print 'get datapack details'\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n network_details = data[telephone_number]['data_details']\n print network_details\n\n\n\n reply = \"Our Initial Investigation shows that you're are currently using \" + network_details['network_services_available'] + \" and have subscribed for \" + network_details['network_services_subscribed'] + \".\"\n if network_details['megabytes_available'] == 0:\n reply += \" You have exhausted your datapack. Change your network settings to use pay2go plan or recharge now with available datapacks. Please check http://www.airtel.in/Airtel3G/tariff.html\"\n elif network_details['network_services_available'] != network_details['network_services_subscribed']:\n reply += \" Your subscribed datapack settings does not match with services available. Please change your network settings\"\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n context['datapack'] = reply\n\n return context", "def getInfo(notification):", "def __ServiceClient(self,Client):\n\t\twhile True:\n\t\t\tDataClient = Client.recv(1024)\n\t\t\tprint(DataClient)\n\t\t\t# your source code here\n\t\t\tmessage = DataClient\n\t\t\t# data to be sent to api\n\t\t\tdata = {'message': message}\n\t\t\t# sending post request and saving response as response object\n\t\t\tr = requests.post(url = self.API_ENDPOINT, data = data)\n\t\t\t# extracting response text\n\t\t\t#pastebin_url = r.text\n\t\t\t#print(\"The pastebin URL is:%s\"%pastebin_url)", "async def execute(self):\n payload = self.integration_client.dict()\n\n await self.rethink_handler.single_insert_update(\n db='mesxpert_view',\n table='integration_clients',\n payload=payload\n )", "def __set_client_detail(self):\r\n ClientDetail = self.client.factory.create('ClientDetail')\r\n ClientDetail.AccountNumber = self.config_obj.account_number\r\n ClientDetail.MeterNumber = self.config_obj.meter_number\r\n ClientDetail.IntegratorId = self.config_obj.integrator_id\r\n if hasattr(ClientDetail, 'Region'):\r\n ClientDetail.Region = self.config_obj.express_region_code\r\n self.ClientDetail = ClientDetail", "def choose(self, _id):\n app = App.get_running_app()\n self.manager.client = app.session.query(Client).filter(Client.id == _id).one()\n self.manager.current = 'info'", "def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return", "def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "async def tell(client, data):\n conn = client.bot.dbs[data.server]\n split = data.split_message\n\n tables = db.get_table_names(conn)\n if 'tells' not in tables:\n asyncio.create_task(client.message(data.target, 'Tell table uninitialized, ask your nearest bot admin to restart the bot.'))\n\n if len(split) > 1:\n recipient = split[0]\n recipient = recipient.lower()\n message = ' '.join(split[1:])\n else:\n return\n \n telldata = (recipient, data.nickname, message, int(time.time()), '0', '0')\n db.set_row(conn, 'tells', telldata)\n db.ccache()\n\n asyncio.create_task(client.notice(data.nickname, 'Your message will be sent.'))", "def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')", "def contains_id(self, message_id, customer_id):\r\n \r\n #id_str = f\"SELECT count(*) FROM auditlog where customerID={customer_id} and message_id='{message_id}'\"\r\n id_str = f\"SELECT count(*) FROM auditlog where customerID=? and message_id=?\"\r\n params = (customer_id, message_id)\r\n\r\n try:\r\n # execute insertion into table\r\n self.cursor.execute(id_str, params)\r\n except:\r\n # something failed. Wait a few seconds to try to get a decent close of connection, reconnect and execute again\r\n time.sleep(30)\r\n self.connect()\r\n self.cursor.execute(id_str, params)\r\n\r\n # get count of id\r\n count = self.cursor.fetchone()[0]\r\n print(f\"item count in auditlog: {count}\")\r\n\r\n id_str = f\"SELECT message_id,timestamp_email,text FROM auditlog where customerID=? and message_id=?\"\r\n params = (customer_id, message_id)\r\n self.cursor.execute(id_str, params)\r\n alt_count = self.cursor.fetchall()\r\n print(f\"item alt_count in auditlog: {alt_count}\")\r\n\r\n return count > 0", "def client_id(self) -> str:", "def on_text_message(self, update, context):\n chat_id = update.effective_chat.id\n log.info(\"Msg from:%s `%s`\", chat_id, update.effective_message.text)\n\n if context.user_data[\"state\"] == c.State.EXPECTING_AMOUNT:\n log.info(\"Vol:%s spent %s MDL on this request\", chat_id, update.effective_message.text)\n # TODO validate the message and make sure it is a number, discuss whether this is necessary at all\n # TODO send this to the server, we need to define an API for that\n request_id = context.user_data[\"current_request\"]\n\n # Write this amount to the persistent state, so we can rely on it later\n context.bot_data[request_id][\"amount\"] = update.effective_message.text\n\n # Then we have to ask them to send a receipt.\n self.send_message_ex(update.message.chat_id, c.MSG_FEEDBACK_RECEIPT)\n context.user_data[\"state\"] = c.State.EXPECTING_RECEIPT\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_FURTHER_COMMENTS:\n log.info(\"Vol:%s has further comments: %s\", chat_id, update.effective_message.text)\n request_id = context.user_data[\"current_request\"]\n context.bot_data[request_id][\"further_comments\"] = update.effective_message.text\n self.finalize_request(update, context, request_id)\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_PROFILE_DETAILS:\n self.build_profile(update, context, raw_text=update.effective_message.text)\n return\n\n # if we got this far it means it is some sort of an arbitrary message that we weren't yet expecting\n log.warning(\"unexpected message ..........\")", "def query_client(self, client_id):\n try:\n return self.client_model.objects.get(client_id=client_id)\n except self.client_model.DoesNotExist:\n return None", "def on_get(self, req, resp, **kwargs):\n note_id = kwargs['note_id']\n self.validate_note_id(note_id)\n note = self.get_note_with_access_check(req.context, note_id)\n resp.text = self.get_note_details(note)\n resp.status = falcon.HTTP_200", "def get_announcement(self, cid):\n cid = cid.upper()\n query = \"SELECT * from announcement where cid = %s\"\n inputs = (cid, )\n result = self.database_manager.execute_query(query, inputs)\n if result:\n announcement = \"Announcement for {} ({}): {}\".format(result[0][0], result[0][3], result[0][2])\n else:\n announcement = \"No announcement for this {}\".format(cid)\n return announcement" ]
[ "0.5811888", "0.55938053", "0.5496945", "0.54101413", "0.5383944", "0.53775185", "0.5357722", "0.53567284", "0.5239797", "0.5234531", "0.5225566", "0.5214078", "0.51944524", "0.519301", "0.518485", "0.51753753", "0.5170485", "0.5164536", "0.51571465", "0.51501626", "0.5137739", "0.51334566", "0.51262134", "0.5115311", "0.51064754", "0.5098813", "0.5091624", "0.50884926", "0.5081421", "0.50682795" ]
0.6117542
0
Validates received protocol compression parameter. Sets protocol compression to be used in session according to result of this validation.
def handle_protocol_compression(self, session): if self.protocol_compression is not None: valid = RPCS.VALID_COMPRESSION_METHODS if self.protocol_compression not in valid: self.protocol_compression = 'NONE' raise ClientRequestError('InvalidParameterValue', data='protocol_compression') else: self.protocol_compression = 'NONE'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(\n self,\n is_full_msg: bool,\n msg_type: Optional[bytes],\n header_len: int,\n payload_len: Optional[int],\n input_buffer: InputBuffer\n ) -> None:\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_starting_sequence(input_buffer)\n\n if self._size_validation_settings is not None:\n self._validate_payload_length(msg_type, payload_len)\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_control_flags(is_full_msg, header_len, payload_len, input_buffer)", "def test_compression_level(self):\n test_compression_level = 8\n self.encoder._compression_level = test_compression_level", "def setProtocolOptions(self,\n version=None,\n utf8validateIncoming=None,\n acceptMaskedServerFrames=None,\n maskClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n serverConnectionDropTimeout=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionOffers=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None):", "def check_codec(self):\n codecs = list(set([s[\"codec\"] for s in self.segments]))\n for c in codecs:\n if c != \"h264\":\n raise P1203StandaloneError(\"Unsupported codec: {}\".format(c))", "def validate_form_fields(self, protocol):\n\n if protocol and protocol not in [p.get(\"name\") for p in self.protocols]:\n return UNKNOWN_PROTOCOL", "def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))", "def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False", "def validate(self, options):\n try:\n codecs.getencoder(options.char_encoding)\n except LookupError:\n self.parser.error(\"invalid 'char-encoding' %s\" % options.char_encoding)", "def validate_required_protocol(dictionary, yaml_file):\n\n validate_dict_contains_value(dictionary, 'defaults', 'protocol', yaml_file)\n validate_type(dictionary['protocol'], 'protocol', str, 'str', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']", "def check_encoding_negotiation(self):\n from .telopt import DO, BINARY\n if self._closing:\n return\n\n # encoding negotiation is complete\n if self.outbinary and self.inbinary:\n self.log.debug('negotiated outbinary and inbinary with client.')\n\n # if (WILL, BINARY) requested by begin_negotiation() is answered in\n # the affirmitive, then request (DO, BINARY) to ensure bi-directional\n # transfer of non-ascii characters.\n elif self.outbinary and not self.inbinary and (\n not (DO, BINARY,) in self.stream.pending_option):\n self.log.debug('outbinary=True, requesting inbinary.')\n self.stream.iac(DO, BINARY)\n self._loop.call_later(self.CONNECT_DEFERRED,\n self.check_encoding_negotiation)\n\n elif self.duration > self.CONNECT_MAXWAIT:\n # Perhaps some IAC interpreting servers do not differentiate\n # 'local' from 'remote' options -- they are treated equivalently.\n self.log.debug('failed to negotiate both outbinary and inbinary.')\n\n else:\n self._loop.call_later(self.CONNECT_DEFERRED,\n self.check_encoding_negotiation)", "def validate(self) -> None:\n super().validate()\n if self.pipe_mode.value is SocketMode.CONNECT and self.pipe_format.value is None:\n raise Error(\"'pipe_format' required for CONNECT pipe mode.\")", "def handle_ProtocolHeaderFrame(self,\n frame: amqpframe.ProtocolHeaderFrame):\n self._fsm.trigger('receive_ProtocolHeaderFrame')\n raise exceptions.UnsupportedProtocol(\n frame.payload.protocol_major,\n frame.payload.protocol_minor,\n frame.payload.protocol_revision,\n )", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def validate(self, protocol, *args, **kwargs):\n assert len(args) >= len(protocol[\"required\"]), \\\n \"Protocol {} has an arity of {}. Called with {}\".format(\n protocol['procedure'], len(protocol[\"required\"]), len(args))\n\n assert len(args) <= len(protocol[\"required\"]) + \\\n len(protocol[\"optional\"]), \\\n \"Protocol {} has an arity of {}. Called with {}\".format(\n protocol['procedure'], len(protocol[\"required\"]), len(args))", "def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n self.protocol = self.protocol or \"http\"", "def validate_backend_version(self):\n pass", "def load_balance_compression(self, load_balance_compression):\n allowed_values = [\"DO_NOT_COMPRESS\", \"COMPRESS_ATTRIBUTES_ONLY\", \"COMPRESS_ATTRIBUTES_AND_CONTENT\"]\n if load_balance_compression not in allowed_values:\n raise ValueError(\n \"Invalid value for `load_balance_compression` ({0}), must be one of {1}\"\n .format(load_balance_compression, allowed_values)\n )\n\n self._load_balance_compression = load_balance_compression", "def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None", "def _validate_header(self, cleartext_header, decrypted_header):\n import struct\n\n magic_number1 = struct.unpack(\"!I\", decrypted_header[:4])[0]\n # file_version = struct.unpack(\"!I\", decrypted_header[4:8])[0]\n # key_stretches = struct.unpack(\"!I\", decrypted_header[8:12])[0]\n magic_number2 = struct.unpack(\"!I\", decrypted_header[12:])[0]\n if (self.__magic_number != magic_number1 or\n self.__magic_number != magic_number2):\n raise DecryptionError()\n if cleartext_header != decrypted_header:\n raise FileCorruptionError()", "def validate_subprotocol(subprotocol, hixie):\n\n if not subprotocol:\n raise HandshakeException('Invalid subprotocol name: empty')\n if hixie:\n # Parameter should be in the range U+0020 to U+007E.\n for c in subprotocol:\n if not 0x20 <= ord(c) <= 0x7e:\n raise HandshakeException(\n 'Illegal character in subprotocol name: %r' % c)\n else:\n # Parameter should be encoded HTTP token.\n state = http_header_util.ParsingState(subprotocol)\n token = http_header_util.consume_token(state)\n rest = http_header_util.peek(state)\n # If |rest| is not None, |subprotocol| is not one token or invalid. If\n # |rest| is None, |token| must not be None because |subprotocol| is\n # concatenation of |token| and |rest| and is not None.\n if rest is not None:\n raise HandshakeException('Invalid non-token string in subprotocol '\n 'name: %r' % rest)", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def validate(self):\n other = HandshakeSettings()\n\n self._copy_cipher_settings(other)\n self._copy_extension_settings(other)\n self._copy_key_settings(other)\n\n other.pskConfigs = self.pskConfigs\n other.psk_modes = self.psk_modes\n\n if not other.certificateTypes:\n raise ValueError(\"No supported certificate types\")\n\n self._sanityCheckKeySizes(other)\n\n self._sanityCheckPrimitivesNames(other)\n\n self._sanityCheckProtocolVersions(other)\n\n self._sanityCheckExtensions(other)\n\n if other.maxVersion < (3, 3):\n # No sha-2 and AEAD pre TLS 1.2\n other.macNames = [e for e in self.macNames if\n e == \"sha\" or e == \"md5\"]\n\n self._sanityCheckPsks(other)\n\n self._sanityCheckTicketSettings(other)\n\n self._sanity_check_implementations(other)\n self._sanity_check_ciphers(other)\n\n return other", "def _set_compression_capabilities(self, caps, compression, algorithms=None):\n compression_data = caps.get(\"compression\")\n if compression_data is None:\n msg = \"Compression requested but the server does not support it\"\n if compression == Compression.REQUIRED:\n raise NotSupportedError(msg)\n _LOGGER.warning(msg)\n return None\n\n compression_dict = {}\n if isinstance(compression_data, dict): # C extension is being used\n for fld in compression_data[\"value\"][\"obj\"][\"fld\"]:\n compression_dict[fld[\"key\"]] = [\n value[\"scalar\"][\"v_string\"][\"value\"].decode(\"utf-8\")\n for value in fld[\"value\"][\"array\"][\"value\"]\n ]\n else:\n for fld in compression_data.value.obj.fld:\n compression_dict[fld.key] = [\n value.scalar.v_string.value.decode(\"utf-8\")\n for value in fld.value.array.value\n ]\n\n server_algorithms = compression_dict.get(\"algorithm\", [])\n algorithm = None\n\n # Try to find an algorithm from the requested compression algorithms\n # list, which is supported by the server\n if algorithms:\n # Resolve compression algorithms aliases and ignore unsupported\n client_algorithms = [\n COMPRESSION_ALGORITHMS[item]\n for item in algorithms\n if item in COMPRESSION_ALGORITHMS\n ]\n matched = [item for item in client_algorithms if item in server_algorithms]\n if matched:\n algorithm = COMPRESSION_ALGORITHMS.get(matched[0])\n elif compression == Compression.REQUIRED:\n raise InterfaceError(\n \"The connection compression is set as \"\n \"required, but none of the provided \"\n \"compression algorithms are supported.\"\n )\n else:\n return None # Disable compression\n\n # No compression algorithms list was provided or couldn't found one\n # supported by the server\n if algorithm is None:\n if HAVE_ZSTD and \"zstd_stream\" in server_algorithms:\n algorithm = \"zstd_stream\"\n elif HAVE_LZ4 and \"lz4_message\" in server_algorithms:\n algorithm = \"lz4_message\"\n else:\n algorithm = \"deflate_stream\"\n\n if algorithm not in server_algorithms:\n msg = (\n \"Compression requested but the compression algorithm \"\n \"negotiation failed\"\n )\n if compression == Compression.REQUIRED:\n raise InterfaceError(msg)\n _LOGGER.warning(msg)\n return None\n\n self.protocol.set_capabilities(compression={\"algorithm\": algorithm})\n return algorithm", "def parse_handshake(self, data):\n\n if (data[0] != len(PSTR) or data[1:20] != PSTR\n or data[28:48] != self.factory.torrent.info_hash):\n\n self.transport.loseConnection()\n else:\n self.handshaked = True\n\n reserved = data[20:28]\n if reserved[7] & ord('\\x04'):\n self.fast_extension = True\n\n if reserved[7] & ord('\\x01'):\n self.dht = True", "def compare_protocol_versions(self, session):\n # First parse protocol version strings to check for invalid formatting\n invalid_string = self.parse_protocol_version(\n [self.earliest_protocol_version, self.latest_protocol_version])\n if invalid_string is not None:\n # Error during protocol string parsing\n data = ('earliest_protocol_version'\n if invalid_string == self.earliest_protocol_version else 'latest_protocol_version')\n raise ClientRequestError('InvalidParameterValue', data=data)\n\n # Check if protocol version is supported and define the one to use\n self.protocol_version = self.determine_supported_protocol(\n self.earliest_protocol_version, self.latest_protocol_version)", "def validate_encryption(self) -> bool:\n # Receive the first encrypted message from server\n message = self.receive()\n if message != Message.HI:\n print(\"Encryption error! Closing this socket...\")\n return False\n # Send the first encrypted message\n self.send(Message.HI)\n # Receive the encrypted OK message\n message = self.receive()\n if message == Message.OK:\n print(\"Encryption is established.\")\n return True\n else:\n print(\"Encryption error! Closing this socket...\")\n return False", "def isProtocolDefined(self) -> bool:\n ...", "def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None", "def protocol_match(cls, script_bytes: bytes) -> bool:\n # fast test -- most ScriptOutputs that aren't SLP will fail here quickly\n if not script_bytes.startswith(cls._protocol_prefix):\n return False\n # fast test passed -- next try the slow test -- attempt to parse and\n # validate OP_RETURN message\n try:\n # raises on parse error\n slf = cls(script_bytes)\n # should always be not None\n if slf.message is not None:\n # save parsed message since likely it will be needed again very soon\n # by class c'tor\n cls._script_message_cache.put(slf.script, slf.message)\n return True\n except Error:\n pass\n except Exception:\n # DEBUG XXX FIXME\n import sys\n import traceback\n\n traceback.print_exc(file=sys.stderr)\n pass\n return False", "def validate_optional_protocol(dictionary, yaml_file):\n\n if 'protocol' in dictionary:\n validate_type(dictionary['protocol'], 'protocol', str, 'protocol', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']" ]
[ "0.51522434", "0.513435", "0.5105154", "0.50020367", "0.4987997", "0.49288347", "0.48937827", "0.48890725", "0.48640823", "0.48285913", "0.48197266", "0.47871244", "0.47853112", "0.47634792", "0.4761809", "0.47489363", "0.47361204", "0.47114277", "0.4680672", "0.46734345", "0.46563947", "0.46507746", "0.46429485", "0.46197894", "0.46024603", "0.45702195", "0.453306", "0.45139205", "0.4506355", "0.4492941" ]
0.73079675
0
(str) > bool Return True if correct service name
def is_service_name_correct(self, service): return service in self.services
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_name(name, allow_services=False):", "def istype(client, service_name: str):\n\n if is_client(client):\n return (\n client.meta.service_model.service_name.lower()\n == service_name.strip().lower()\n )\n return False", "def definition_of_services(self):\r\n return True", "def isService(self, serviceInterface: java.lang.Class) -> bool:\n ...", "def _interesting_service(self, service: UpnpService) -> bool:\n service_type = service.service_type\n for service_types in self._SERVICE_TYPES.values():\n if service_type in service_types:\n return True\n\n return False", "def compServiceCheck():\n # global compileService\n return compileService.state.name", "def start_services(self, service_name):\r\n\r\n from Services import Services\r\n result = Services.start_service(service_name, self.machine_name)\r\n if result is None:\r\n return None\r\n elif result:\r\n return True\r\n else:\r\n return False", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")", "def is_service_installed(klass, service):\n return True", "def test_get_service_string(self):\n pass", "def term(name):\n cmd = \"svc -t {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)", "def getServiceName(self) -> str:\n ...", "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def future_supported_service(service_name):\n print('Service {} linked.'.format(service_name))\n pass", "def service_name(self):\n return self._service_name", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def name_registered(name, state):\n for client in state['clients']:\n if client['name'] == name:\n return True\n return False", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")", "def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True", "def service(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service\")", "def IsServiceRunning(self, service_name):\n if self.HasSystemd():\n # Querying for the pid of the service will return 'MainPID=0' if\n # the service is not running.\n stdout, stderr = self.RunCmdOnDevice(\n ['systemctl', 'show', '-p', 'MainPID', service_name], quiet=True)\n running = int(stdout.split('=')[1]) != 0\n else:\n stdout, stderr = self.RunCmdOnDevice(['status', service_name], quiet=True)\n running = 'running, process' in stdout\n assert stderr == '', stderr\n logging.debug(\"IsServiceRunning(%s)->%s\" % (service_name, running))\n return running", "def wsdl_service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"wsdl_service_name\")", "def service_type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_type_name\")", "def IsServiceRunning(self, service_name):\n stdout, stderr = 'stdout', 'system does not have systemd'\n if self.HasSystemd():\n # Querying for the pid of the service will return 'MainPID=0' if\n # the service is not running.\n stdout, stderr = self.RunCmdOnDevice(\n ['systemctl', 'show', '-p', 'MainPID', service_name], quiet=True)\n running = int(stdout.split('=')[1]) != 0\n assert stderr == '', stderr\n logging.debug(\"IsServiceRunning(%s)->%s\" % (service_name, running))\n return running" ]
[ "0.74874175", "0.7102973", "0.6658793", "0.65929854", "0.6531604", "0.6523252", "0.64808494", "0.64768475", "0.64768475", "0.64768475", "0.6421063", "0.6411934", "0.63543", "0.63014257", "0.62521374", "0.6246466", "0.61115396", "0.60908616", "0.60830384", "0.60830384", "0.6069704", "0.6036148", "0.6036148", "0.6036148", "0.60160005", "0.6010067", "0.5999235", "0.59940726", "0.59873366", "0.59838617" ]
0.83579403
0
(str) > bool Return True if correct class mail type
def is_class_mail_types_correct(self, class_mail_type): return class_mail_type in self.class_mail_types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_mail_types_correct(self, mail_type):\r\n return mail_type in self.mail_types", "def email_type(verifield, required):\n return verifield is None or parseaddr(verifield) != ('','')", "def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t", "def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret", "def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def check_type(self):\n return True", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def verify_mail(self):\n raise NotImplementedError", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)", "def is_valid_content_type(cls, content_type: str) -> bool:\n return content_type in cls.CONTENT_TYPES.value", "def is_type(self, ent_type):\n # type: (str) -> bool\n # its always an entity ...\n if ent_type.lower() in ('entity', self.settings['_type'].lower()):\n return True\n else:\n return False", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def __bool__(self):\n return _libsbml.string___bool__(self)", "def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False", "def isValidType(self, fsn, fsn_type):\n if ((OINKM.checkIfFSN(fsn)) and (\"SEO\" in fsn_type)) or (not(OINKM.checkIfFSN(fsn)) and (\"SEO\" not in fsn_type)):\n #If the value in the fsn field is an FSN and the description type is an SEO type, then it could be invalid.\n #If the value in the fsn field is not an FSN and the description type is not an SEO type, then it could be invalid.\n if \"SEO\" in fsn_type:\n question = \"You seem to be writing an FSN article but the description type appears to be an SEO. Are you sure you want to submit that?\"\n else:\n question = \"You seem to be writing about something that's not an FSN. Are you sure you want to submit that?\"\n change_type = QtGui.QMessageBox.question(\n self,\n \"Possible Description Type Mismatch\",\n question,\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, \n QtGui.QMessageBox.No\n )\n if change_type is not None:\n if change_type == QtGui.QMessageBox.Yes:\n is_valid = True\n else:\n is_valid = False\n else:\n is_valid = False\n else:\n #If the value in the FSN field is an FSN and the type is not an SEO type.\n #if the value is not an FSN and the type is one of the SEO types.\n is_valid = True\n return is_valid", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def streettype(self):\n if self.index >= self.length:\n return False\n \n self._typ, n = self.parse_streettype()\n if self._typ is not None:\n self.idx_typ = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == '.':\n self.index += 1\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"TYP\", self._typ, self.idx_typ)\n self.isaddr = True\n return True\n return False", "def is_text(self):\n return self.value_type in (str, unicode)", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types", "def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result", "def stringable(self):\n return True", "def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE" ]
[ "0.7468366", "0.6942029", "0.6129135", "0.6121263", "0.60874057", "0.60748357", "0.59902066", "0.5907582", "0.5859858", "0.58163", "0.5788119", "0.57557553", "0.5746743", "0.57291144", "0.5729054", "0.5723128", "0.5698093", "0.56697905", "0.56547284", "0.55967015", "0.5573413", "0.5569571", "0.55416805", "0.55357385", "0.5503957", "0.5500609", "0.54806626", "0.54765123", "0.5470845", "0.54673046" ]
0.7834716
0
(str) > bool Return True if correct container type
def is_container_type_correct(self, container): return container in self.containers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_container(item):\n if isinstance(item, str):\n return False\n elif hasattr(item, \"__iter__\"):\n return True\n\n return False", "def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))", "def is_iterable_container(value):\n # strings are iterable too so we have to treat them as a special case\n return not isinstance(value, str) and isinstance(value, collections.Iterable)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def is_simple(self) -> bool:\n return self.data in ('int', 'bool', 'float', 'str')", "def test_is_container(self):\n # verify ----------------------\n try:\n 1 in self.collection\n except TypeError:\n msg = \"'Collection' object is not container\"\n self.fail(msg)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_string(value):\n return isinstance(value, (str, bytes))", "def check_type(self):\n return True", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def is_container(value: object) -> TypeGuard[AnyContainer]:\n if isinstance(value, Container):\n return True\n if hasattr(value, \"__pt_container__\"):\n return is_container(cast(\"MagicContainer\", value).__pt_container__())\n return False", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def is_collection(var):\n return isinstance(var, Iterable) and not isinstance(var, str)", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False", "def __contains__(self, a):\n try:\n self.convert(a)\n except CoercionFailed:\n return False\n\n return True", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def _is_non_string_iterable(value):\n if isinstance(value, str):\n return False\n if hasattr(value, '__iter__'):\n return True\n if isinstance(value, collections.abc.Sequence):\n return True\n return False", "def _is_str(item):\n return isinstance(item, str)", "def test_return_type(self):\n self.assertEqual(type(self.s0.from_json_string(self.string)), list)", "def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))", "def is_container(self, scopestr: str):\n return scopestr in self.con_scopestr_to_node", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )", "def is_string(document):\r\n return isinstance(document, str)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False" ]
[ "0.7596315", "0.6735085", "0.6629723", "0.6603197", "0.6592958", "0.65436935", "0.6490295", "0.64833474", "0.6360425", "0.634218", "0.6331468", "0.633145", "0.63053995", "0.6277777", "0.6248238", "0.6234861", "0.6221624", "0.6171583", "0.61659265", "0.61386937", "0.6130888", "0.61276805", "0.61137885", "0.6105598", "0.61053765", "0.6099245", "0.6091279", "0.6083298", "0.60821974", "0.6081491" ]
0.7524484
1
(class xml.etree.ElementTree.Element) > list Return dictionary with all Postage information
def get_postage_from_response(self, xml_response): postages = xml_response.find("Package").findall("Postage") postages_list = [] if postages: for postage in postages: postages_list.append(self.get_response_information(postage)) return postages_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xml_to_dict(self):\n medicine_node = ['medicines', 'dose', 'unit', 'unit_price', 'goods_num', 'dose_that',\n 'remark', 'm_usage', 'goods_norms', 'goods_orgin', 'MedPerDos', 'MedPerDay']\n dict_data = {}\n n = 0\n xq_childs = 0\n for child in self.root.iter():\n # print(child.tag)\n # print(child.text)\n if child.tag not in medicine_node:\n if child.tag == 'xq':\n xq_childs = child.__len__() # __len__:返回元素大小,元素的大小为元素的子元素数量\n dict_data[child.tag] = child.text\n else:\n if n < xq_childs:\n dict_data[child.tag] = [child.text]\n n += 1\n else:\n dict_data[child.tag].append(child.text)\n return dict_data", "def _construct_data_xml(self, xml_file_list):\n award_dict = {}\n award_list = []\n for xml_file in xml_file_list:\n xml_file.seek(0)\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n for response in root:\n temp_dict = {}\n for award in response:\n if award.tag == 'entry':\n continue\n try:\n # temp_dict[award.tag].append(award.text)\n temp_dict[award.tag] = award.text\n except KeyError:\n print(\"KeyError\")\n # temp_dict[award.tag] = [award.text]\n\n # if 'entry' in temp_dict.keys():\n # del temp_dict['entry']\n if len(temp_dict) > 0:\n award_list.append(temp_dict)\n\n return award_list", "def extract_xml(self, xml_list):\n craziness = dict()\n for i in range(len(xml_list)):\n if xml_list[i]['@type'] == 'EMBL':\n craziness['EMBL']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'RefSeq':\n craziness['RefSeq']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Ensembl':\n craziness['Ensembl']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'OrthoDB':\n craziness['OrthoDB']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'PROSITE':\n craziness['PROSITE']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Pfam':\n craziness['Pfam']=(xml_list[i]['@id'])\n return craziness", "def get_postage_from_response(self, xml_response):\r\n services = xml_response.find(\"Package\").findall(\"Service\")\r\n postages_list = []\r\n\r\n if services:\r\n for postages in services:\r\n postages_list.append(postages.find(\"Postage\").text)\r\n\r\n return postages_list", "def parse_element(elem):\n return_dict = {}\n for e in elem:\n return_dict[e.tag] = e.text\n return return_dict", "def dict(self):\n return xmltodict.parse(str(self))", "def _get_information(self):\n pros_cons = []\n pros_cons_dict = {}\n\n for i, div in enumerate(self._tab.find_all(\"div\")):\n for p in div.find_all(\"p\"):\n pro_con = p.get_text(strip=True)\n pros_cons.append(pro_con)\n pros_cons_dict.update({self._keys_dict[i]: pros_cons})\n pros_cons = []\n\n return pros_cons_dict", "def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)", "def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))", "def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object", "def _get_information(self):\n reviews = self._tab.find_all(\"div\", class_=\"review\", attrs={'itemprop': 'review'})\n return [(self._get_review(elem), self._get_published_date(elem)) for elem in reviews]", "def xmlpost_to_dict(post):\n\n tree = ET.parse(post)\n root = tree.getroot()\n msg = root.find('message')\n\n post_data = {}\n\n board_id = msg.find('board_id')\n post_data['board_id'] = int(board_id.text)\n\n root_post = msg.find('root').attrib['href']\n post_data['root_post'] = root_post.split('/')[-1]\n\n kudos = msg.find('kudos')\n count = kudos.find('count')\n post_data['kudos_count'] = int(count.text)\n\n edit_author_id = msg.find('last_edit_author').attrib['href']\n post_data['edit_author_id'] = int(edit_author_id.split('/')[-1])\n\n post_time = msg.find('post_time')\n post_data['post_time'] = post_time.text\n\n last_edit_time = msg.find('last_edit_time')\n post_data['last_edit_time'] = last_edit_time.text\n\n body = msg.find('body')\n post_data['body'] = body.text\n\n thread = msg.find('thread').attrib['href']\n post_data['thread'] = int(thread.split('/')[-1])\n\n board = msg.find('board').attrib['href']\n post_data['board'] = board.split('/')[-1]\n\n try:\n parent_post = msg.find('parent').attrib['href']\n post_data['parent_post'] = int(parent_post.split('/')[-1])\n except KeyError:\n post_data['parent_post'] = None\n\n views = msg.find('views')\n post_data['views'] = int(views.find('count').text)\n\n subject = msg.find('subject')\n post_data['subject'] = subject.text\n\n post_id = msg.find('id')\n post_data['post_id'] = int(post_id.text)\n\n author_id = msg.find('author').attrib['href']\n post_data['author_id'] = int(author_id.split('/')[-1])\n\n return post_data", "def to_dict(xml):\n children = xml.getchildren()\n if not children:\n return xml.text\n else:\n out = {}\n for node in xml.getchildren():\n if node.tag in out:\n if not isinstance(out[node.tag], list):\n out[node.tag] = [out[node.tag]]\n out[node.tag].append(to_dict(node))\n else:\n out[node.tag] = to_dict(node)\n return out", "def _get_elements(self):\n address_elements = {\n 'organisation': \"{}{}\".format(\n self.organisation if self.organisation else \"\",\n '\\n' + self.department if self.department else \"\",\n ),\n 'sub-building name': self.sub_building_name,\n 'building name': self.building_name,\n 'building number': self.building_number,\n 'PO box': self.po_box_num,\n 'dependent thoroughfare': self.dependent_thoroughfare,\n 'thoroughfare': self.thoroughfare,\n 'double dependent locality': self.double_dependent_locality,\n 'dependent locality': self.dependent_locality,\n 'town': self.town,\n 'postcode': \"{} {}\".format(\n self.postcode[:-3], \n self.postcode[-3:]\n ),\n 'concatenation indicator': self.concatenation_indicator\n }\n return address_elements", "def get_attachments(xml):\r\n items = get_items(xml)\r\n names = {}\r\n attachments = []\r\n\r\n for item in items:\r\n kind = item.find('post_type').string\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n\r\n if kind == 'attachment':\r\n attachments.append((item.find('post_parent').string,\r\n item.find('attachment_url').string))\r\n else:\r\n filename = get_filename(filename, post_id)\r\n names[post_id] = filename\r\n attachedposts = {}\r\n for parent, url in attachments:\r\n try:\r\n parent_name = names[parent]\r\n except KeyError:\r\n #attachment's parent is not a valid post\r\n parent_name = None\r\n\r\n try:\r\n attachedposts[parent_name].append(url)\r\n except KeyError:\r\n attachedposts[parent_name] = []\r\n attachedposts[parent_name].append(url)\r\n return attachedposts", "def get_tags(element):\n tags = []\n id_num = element.attrib['id']\n for child in element.iter('tag'):\n attr = child.attrib\n\n # check for problematic characters first and skip if matches\n if PROBLEMCHARS.search(attr['k']):\n continue\n\n child_dict = {}\n child_dict['id'] = id_num\n child_dict['value'] = attr['v']\n\n # stackoverflow.com/questions/6903557/splitting-on-first-occurrence\n child_dict['key'] = attr['k'].split(':', 1)[-1]\n\n # Check if the k tag has : in it and treat according to specs\n if LOWER_COLON.search(attr['k']):\n child_dict['type'] = attr['k'].split(':')[0]\n else:\n child_dict['type'] = default_tag_type\n\n # street name check (not all : matches are addr:)\n if child_dict['type'] == 'addr' & child_dict['key'] == 'street':\n child_dict['value'] = update_street_name(child_dict['value'])\n\n tags.append(child_dict)\n\n return tags", "def getXmlDict(oxml):\n lines = oxml.split(\"\\n\")\n rrd_d = {}\n # <cf> AVERAGE </cf>\n # <pdp_per_row> 288 </pdp_per_row> <!-- 86400 seconds -->\n\n # parse xml file\n key = \"\"\n rows = [] \n for line in lines:\n if (reMatchCF(line)):\n cf = line.split()[1]\n key += cf\n if (reMatchPDP(line)):\n pdp = line.split()[1]\n key += pdp\n if (reMatchRow(line)):\n ele = line.split()\n time = ele[5]\n val = ele[8]\n rows.append([time,val,line])\n # end of rra is reached, store to dict and rest vals\n if (reMatchDBEnd(line) and key and rows):\n rrd_d[key] = rows\n key = \"\"\n rows = []\n return rrd_d", "def _xml_convert(self, element):\n\n children = list(element)\n\n if len(children) == 0:\n return self._type_convert(element.text)\n else:\n # if the fist child tag is list-item means all children are list-item\n if children[0].tag == \"list-item\":\n data = []\n for child in children:\n data.append(self._xml_convert(child))\n else:\n data = {}\n for child in children:\n data[child.tag] = self._xml_convert(child)\n\n return data", "def pitems(self):\n return self.palues().root()", "def pitems(self):\n return self.palues().root()", "def post_data(driver):\n post_info = {\n \"post_age\" : \"li.posted\", \n \"page_views\" : \"ul.posting-info li.views\"\n }\n for key, selector in post_info.items():\n try:\n text = driver.find_element_by_css_selector(selector).text\n if key == \"post_age\":\n post_info[key] = parse_post_age(text)\n else:\n post_info[key] = ''.join(list(filter(lambda c: c.isdigit(), text)))\n except Exception as e:\n post_info[key] = \"\"\n pass\n return post_info", "def extract_node(element, node_attr_fields = NODE_FIELDS, problem_chars=PROBLEMCHARS, default_tag_type='regular') :\r\n attribs = {}\r\n tags = []\r\n\r\n \"\"\" Extraction Routine\"\"\"\r\n for key in node_attr_fields:\r\n attribs[key] = element.attrib[key]\r\n for tag in element.iter(\"tag\"):\r\n node_tag = {}\r\n node_tag[\"type\"] = default_tag_type\r\n node_tag[\"id\"] = attribs[\"id\"]\r\n node_tag[\"value\"] = tag.attrib[\"v\"]\r\n\r\n k = tag.attrib[\"k\"]\r\n\r\n if problem_chars.search(k):\r\n continue\r\n elif \":\" in k:\r\n node_tag[\"key\"] = k.split(\":\", 1)[1]\r\n node_tag[\"type\"] = k.split(\":\", 1)[0]\r\n else:\r\n node_tag[\"key\"] = k\r\n\r\n # Update city name , if any, before appending the dictionary in list\r\n\r\n if node_tag[\"key\"] == \"city\":\r\n node_tag[\"value\"] = update_city_name(node_tag[\"value\"])\r\n\r\n # Update street name, if any , as per mapping\r\n\r\n if node_tag[\"key\"] == \"street\" or \"street:name\":\r\n node_tag[\"value\"] = update_street_name(node_tag[\"value\"], mapping)\r\n\r\n # Check if postcode is valid, if invalid prefix the postcode value with 'fixme:'\r\n\r\n if node_tag[\"key\"] == \"postcode\":\r\n invalid, node_tag[\"value\"] = update_postcode(node_tag[\"value\"])\r\n if invalid:\r\n node_tag[\"value\"] = 'fixme:' + node_tag[\"value\"]\r\n\r\n\r\n tags.append(node_tag)\r\n\r\n return {'node': attribs, 'node_tags': tags}", "def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects", "def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject", "def get_data(tree_elem):\n fly_lst = []\n for element in tree_elem:\n for elem in element.xpath('td/label/div[1]/span'):\n fly_dict = dict()\n fly_info_lst = [item.strip() for item in elem.xpath('@title')[0].split(',')]\n class_cost_lst = fly_info_lst[3].split(':')\n fly_dict['dep/arv'] = fly_info_lst[1]\n fly_dict['dur'] = fly_info_lst[2]\n fly_dict['class'] = class_cost_lst[0]\n fly_dict['cost'] = get_price(class_cost_lst[1])\n fly_lst.append(fly_dict)\n return fly_lst", "def get_items_from_element(element):\n data = {'element': element,\n 'items': []}\n for item in element[len(element)-1]:\n item_info = {'data': item.items(),\n 'tag': item.tag,\n 'keys': item.keys()}\n data['items'].append(item_info)\n return data", "def retrieve_ext_list(self, puid_list):\n xml_iter = self._parse_xml()\n puiddict = {}\n for topelements in xml_iter:\n if (\n topelements.tag\n == \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}FileFormatCollection\"\n ):\n for fileformats in topelements:\n puid = fileformats.get(\"PUID\")\n for puids in puid_list:\n if puids != puid:\n continue\n ext = fileformats.find(\n \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}Extension\"\n )\n if ext is not None:\n # Return the first file format extension.\n puiddict[puids] = ext.text\n break\n puiddict[puids] = None\n break\n notfound = []\n for puid in puid_list:\n if puid not in puiddict:\n if puid not in notfound:\n notfound.append(puid)\n if len(notfound) > 0:\n for puid in notfound:\n puiddict[puid] = \"notfound\"\n return puiddict", "def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict", "def kgml_parser(self, kegg_cpd_id_list):\n result_dic = dict()\n # try:\n kg_tree = et.fromstring(self.kgml)\n for cpd in kegg_cpd_id_list:\n for el in kg_tree.iterfind('entry/graphics[@name=\"%s\"]' % cpd):\n if cpd not in result_dic.keys():\n result_dic[cpd] = [(el.get('x'), el.get('y'))]\n else:\n result_dic[cpd].append((el.get('x'), el.get('y')))\n # except:\n # # todo error exception\n # print 'error while parsing kgml of %s' % self.kegg_id\n return result_dic", "def get_pcr_sequences(self):\n d = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-assays':\n for assay in child:\n attributes = assay.attrib\n assay_id = attributes['id']\n if re.match(r'rs\\d+', assay_id):\n d[assay_id] = [attributes['pcr1'], attributes['pcr2']]\n return d" ]
[ "0.63759995", "0.620662", "0.6160831", "0.6134572", "0.6060549", "0.6026878", "0.60247046", "0.5937487", "0.5935178", "0.5895591", "0.5833998", "0.58304346", "0.5824379", "0.58219045", "0.5707903", "0.5669245", "0.5667443", "0.56660455", "0.5661833", "0.5661833", "0.5661279", "0.562904", "0.5626109", "0.5598012", "0.558992", "0.558302", "0.55548936", "0.5547756", "0.5523492", "0.5521544" ]
0.6353038
1
(str) > bool Return True if correct mail type
def is_mail_types_correct(self, mail_type): return mail_type in self.mail_types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email_type(verifield, required):\n return verifield is None or parseaddr(verifield) != ('','')", "def is_class_mail_types_correct(self, class_mail_type):\r\n return class_mail_type in self.class_mail_types", "def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret", "def verify_mail(self):\n raise NotImplementedError", "def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False", "def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False", "def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def check_mail(eml):\n return eml[::-1] if eml != '#N/A' else '#N/A'", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_email_address(value):\n return _COMPILED[EMAIL].match(value) != None", "def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False", "def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_valid_content_type(cls, content_type: str) -> bool:\n return content_type in cls.CONTENT_TYPES.value", "def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)", "def is_my_type(type_str):\n raise NotImplementedError()", "def is_valid_email(email):\n return \"@\" in email and \".\" in email", "def is_voicemail(self):\n return self._is_voicemail", "def check_email_required(document_text):\n if \"visitor[email]\" in document_text:\n return True\n else:\n return False", "async def can_send_modmail(self, user: discord.User):\n blocked = await self.config.user(user).get_raw(\"blocked\")\n type_waiting = await self.config.user(user).get_raw(\"type_holding\")\n if blocked:\n raise UserIsBlocked\n if type_waiting:\n raise WaitingForMessageType(\n \"Please choose type of message you wish to send\"\n )\n thread_open = await self.config.user(user).get_raw(\"thread_is_open\")\n current_thread = await self.config.user(user).get_raw(\"current_thread\")\n\n if thread_open:\n return True, current_thread\n else:\n return False, None", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def vet_email(email_address):\n ## FIXME: Doesn't warn user!\n if not re.match(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', email_address):\n return False\n local_part = re.sub(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', '\\\\1', email_address)\n domain_part = re.sub(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', '\\\\2', email_address)\n if len(local_part) > 64:\n return False\n if len(domain_part) > 255:\n return False\n return True", "def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE", "def isEmail(email):\n if not re.match(r\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\", email):\n return(0)\n return(1)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def validatePhoneNumberType(self):\n ## Declaring a Flag to control a while loop\n phone_number_type_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_type_ok:\n if self.phone_number_type.lower() in self.valid_phone_number_types:\n phone_number_type_ok = True\n return True\n\n else:\n return False", "def _check_mimetype(self):\n if self.mimetype in Config.aliases:\n mimetype = Config.aliases[self.mimetype]\n else:\n mimetype = self.mimetype\n expected_extensions = mimetypes.guess_all_extensions(mimetype,\n strict=False)\n if expected_extensions:\n if self.has_extension and self.extension not in expected_extensions:\n # LOG: improve this string\n self.make_dangerous('expected extensions')" ]
[ "0.73987883", "0.6967432", "0.67043835", "0.62827927", "0.62815034", "0.61875695", "0.60421157", "0.5943893", "0.590512", "0.5892637", "0.587241", "0.5855261", "0.5816171", "0.5768737", "0.56882554", "0.56593686", "0.56416327", "0.56387013", "0.5634344", "0.55975133", "0.55863184", "0.558388", "0.5576697", "0.5551998", "0.555003", "0.55410945", "0.55377686", "0.55373394", "0.54792535", "0.5475107" ]
0.7832249
0
Get the event information for a Betfair market ID.
def get_event_info(self, market_id: str) -> Tuple[str, str, str]: market_filter_ = market_filter(market_ids=[market_id]) event_type = ( self._client .betting .list_event_types(filter=market_filter_)[0] .event_type .name ) event = ( self._client .betting .list_events(filter=market_filter_)[0] .event .name ) competition = ( self._client .betting .list_competitions(filter=market_filter_)[0] .competition .name ) return event_type, event, competition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_market_info(\n self, market_id: str\n ) -> Tuple[str, datetime, Dict[int, str]]:\n market_filter_ = market_filter(market_ids=[market_id])\n\n market = (\n self._client\n .betting\n .list_market_catalogue(\n filter=market_filter_,\n market_projection=['MARKET_START_TIME', 'RUNNER_DESCRIPTION']\n )[0]\n )\n\n market_name = market.market_name\n market_start_time = market.market_start_time\n\n selections = {}\n for runner in market.runners:\n selections[runner.selection_id] = runner.runner_name\n\n return market_name, market_start_time, selections", "def get_event(self, eventid):\n return self.s.query(Event).get(eventid)", "def retrieve(cls, event_id):\n return Event(Requester.get(cls.endpoint + '/' + event_id))", "def get_event(self, event_id):\n if not event_id:\n return None\n\n return self.service.events().get(calendarId=self.calendar_id, eventId=event_id).execute()", "def getEventById(self, eventid):\n\n e_id = EventId()\n e_id.setHashed(eventid)\n event = Event.getById(e_id)\n return event.getAsDict()", "def get_event_details(eventId):\n response = client.query(\n TableName=\"EventsSingleTable\",\n # IndexName='',\n Select=\"ALL_ATTRIBUTES\",\n KeyConditionExpression=\"pk = :pk\",\n ExpressionAttributeValues={\":pk\": eventId},\n )\n\n items = response[\"Items\"]\n\n # Try serializing multiple entities from a single request\n for item in items:\n if item[\"sk\"] == item[\"pk\"]:\n e = Event(**item)\n pprint.pprint(str(e))\n else:\n c = Comment(**item)\n pprint.pprint(str(c))", "def get_event_info(self, event_id, locale=None):\n req = BFGlobalFactory.create(\"ns1:GetEventsReq\")\n req.eventParentId = event_id\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFGlobalService.getEvents, req)\n if rsp.errorCode not in (GetEventsErrorEnum.OK,\n GetEventsErrorEnum.NO_RESULTS):\n error_code = rsp.errorCode\n if error_code == GetEventsErrorEnum.API_ERROR:\n error_code = rps.header.errorCode\n logger.error(\"{getEvents} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n event_items = rsp.eventItems[0] if rsp.eventItems else []\n event_items = [BFEvent(**{k: v for k, v in evt})\n for evt in event_items if evt]\n market_items = rsp.marketItems[0] if rsp.marketItems else []\n market_items = [MarketSummary(**{k: v for k, v in mi})\n for mi in market_items if mi]\n coupon_links = rsp.couponLinks[0] if rsp.couponLinks else []\n coupon_links = [CouponLink(**{k: v for k, v in cl})\n for cl in coupon_links if cl]\n rsp = EventInfo(event_items, rsp.eventParentId, market_items,\n coupon_links)\n return rsp", "def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event = self.session.query(Event).filter_by(id=id).scalar()\n if not event:\n raise exc.NotFound(\"No such Event {} found\".format(id))\n\n json = event.to_dict(base_uri=self.href_prefix, expand=expand)\n\n self.success(json)", "def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()", "def get_market_info_lite(self, market_id):\n req = BFExchangeFactory.create(\"ns1:GetMarketInfoReq\")\n req.marketId = market_id\n rsp = self._soapcall(BFExchangeService.getMarketInfo, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code != GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarketInfo} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n info = MarketInfoLite(**{k: v for k, v in rsp.marketLite})\n return info", "def get_event(self):\n return self.keys.events.get()", "def get_event(self, param):\n\n if param is None:\n return None\n if isinstance(param, str):\n url = self.build_url(\n self._endpoints.get('get_event').format(id=self.calendar_id,\n ide=param))\n params = None\n by_id = True\n else:\n url = self.build_url(\n self._endpoints.get('get_events').format(id=self.calendar_id))\n params = {'$top': 1}\n params.update(param.as_params())\n by_id = False\n\n response = self.con.get(url, params=params,\n headers={'Prefer': 'outlook.timezone=\"UTC\"'})\n if not response:\n return None\n\n if by_id:\n event = response.json()\n else:\n event = response.json().get('value', [])\n if event:\n event = event[0]\n else:\n return None\n return self.event_constructor(parent=self,\n **{self._cloud_data_key: event})", "def get_event_information(username, name_event):\n config = configparser.ConfigParser() # Use to access to the config file\n config.read('config.ini')\n\n try:\n w3 = Web3(Web3.HTTPProvider(config[username][\"address_node\"]))\n except Exception as e:\n return None, None, e\n\n w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n if w3.isConnected():\n print(\"Connected to the blockchain.\")\n w3.eth.defaultAccount = w3.eth.accounts[0] # Set the sender\n\n address_event, abi_event = get_address_abi(name_event, \"event\")\n event = w3.eth.contract(address=address_event, abi=abi_event)\n\n try:\n date_event = event.functions.getDate().call()\n available_seats_event = event.functions.getAvailableSeats().call()\n seats_price = event.functions.getSeatsPrice().call()\n\n artist_event = event.functions.getArtist().call()\n location_event = event.functions.getLocation().call()\n description_event = event.functions.getDescription().call()\n except Exception as e:\n return None, None, None, None, None, None, e\n\n return date_event, available_seats_event, seats_price, artist_event, location_event, description_event, None", "def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)", "def get_event_details(session, event_id):\n page = session.get('http://www.usacycling.org/results/index.php?ajax=1&act=infoid&info_id='+str(event_id), headers=HDRS)\n # print(page.text)\n event = json_text(page)\n if \"No results found.\" not in event:\n #print('Race is: {} and race name is: {}'.format(e, race.find('h3').getText()))\n info = {'name': ''.join(t for t in event.find('h3').find_all(text=True)), 'races':{}}\n for a in event.find_all('li'):\n info['races'][a.find('a').contents[0]] = a.get('id').split()\n return info\n else:\n return None", "def GetEventIdentifier(self):\n return self._event_identifier", "def quote_endpoint(self, market_id):\n self._wait_before_call()\n market = self._format_market_id(market_id)\n try:\n data, meta_data = self.TS.get_quote_endpoint(\n symbol=market, outputsize=\"full\"\n )\n return data\n except:\n logging.error(\"AlphaVantage wrong api call for {}\".format(market))\n return None", "def query_event_by_id():\n try:\n event_id = request.args['event_id']\n response = requests.put(app.config['EVENTS_ENDPOINT'] + event_id)\n if response.status_code == 200:\n return render_template(\n 'search_results.html',\n auth=is_organizer(get_user()),\n events=parse_events(response.json()),\n app_config=app.config\n )\n else:\n return 'Unable to retrieve events', 500\n except BadRequestKeyError as error:\n return f'Error: {error}.', 400", "def get_event(event_id):\n try:\n return Event.objects.get(id=event_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no event with id={}.'.format(event_id))", "def select_event(self, event_id):\n with self.conn:\n self.c.execute(\n \"\"\"SELECT * FROM {table} WHERE {event} = ?\"\"\".format(\n table=TABLE, event=EVENT\n ),\n (event_id,),\n )\n return self.c.fetchone()", "def event_id(self):\n return self._event_id", "def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)", "def event_get(tenant_id, user_id=None):", "def get_event(self):\r\n return self.events[0]", "def get_market_info(self, market_id, lite=True, coupon_links=False,\n locale=None):\n req = BFExchangeFactory.create(\"ns1:GetMarketReq\")\n req.marketId = market_id\n req.includeCouponLinks = coupon_links\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFExchangeService.getMarket, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code == GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarket} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n market = rsp.market\n coupons = market.couponLinks[0] if market.couponLinks else []\n coupons = [CouponLink(**{k: v for k, v in coupon})\n for coupon in coupons if coupon]\n runners = market.runners[0] if market.runners else []\n runners = [Runner(**{k: v for k, v in runner})\n for runner in runners if runner]\n hierarchies = market.eventHierarchy[0] if market.eventHierarchy else []\n hierarchies = [evt for evt in hierarchies]\n rsp = MarketInfo(**{k: v for k, v in market})\n info.eventHierarchy = hierarchies\n rsp.couponLinks = coupons\n rsp.runners = runners\n return rsp", "def event(self, event_id):\r\n return e.Event(self, event_id)", "def on_market_info(self):\n pass", "def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def get_event(self, event_id):\n mask = \"\"\"mask[\n acknowledgedFlag,\n attachments,\n impactedResources,\n statusCode,\n updates,\n notificationOccurrenceEventType]\n \"\"\"\n return self.client.call('Notification_Occurrence_Event', 'getObject', id=event_id, mask=mask)", "def get_event():\n data = _get_process_detail_expanded_data()[\"event\"]\n return data" ]
[ "0.6362536", "0.63047844", "0.61040074", "0.59841615", "0.5915983", "0.58161205", "0.5800413", "0.5745946", "0.5725486", "0.5675717", "0.56645864", "0.5656346", "0.5621131", "0.55494666", "0.55292183", "0.5473335", "0.5443154", "0.5440933", "0.5433231", "0.5430904", "0.5429236", "0.541626", "0.5402026", "0.5395631", "0.53843874", "0.53761035", "0.5368661", "0.5367785", "0.5367226", "0.5325093" ]
0.78798735
0
Get the market information from a Betfair market ID.
def get_market_info( self, market_id: str ) -> Tuple[str, datetime, Dict[int, str]]: market_filter_ = market_filter(market_ids=[market_id]) market = ( self._client .betting .list_market_catalogue( filter=market_filter_, market_projection=['MARKET_START_TIME', 'RUNNER_DESCRIPTION'] )[0] ) market_name = market.market_name market_start_time = market.market_start_time selections = {} for runner in market.runners: selections[runner.selection_id] = runner.runner_name return market_name, market_start_time, selections
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_market_info_lite(self, market_id):\n req = BFExchangeFactory.create(\"ns1:GetMarketInfoReq\")\n req.marketId = market_id\n rsp = self._soapcall(BFExchangeService.getMarketInfo, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code != GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarketInfo} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n info = MarketInfoLite(**{k: v for k, v in rsp.marketLite})\n return info", "def getMarket(self):\n return self.market", "def query_bid_price(market_data):\n print(\"Consultando BID\")\n if market_data[\"marketData\"][\"BI\"]:\n bid_price = market_data[\"marketData\"][\"BI\"][0][\"price\"]\n print(f\"Precio de BID: ${bid_price:,.2f}\".replace('.', ','))\n return bid_price\n print(\"No hay BIDs activos\")\n return None", "def market(self):\n return self._market", "def market_info(self, symbol):\n r = requests.get(self.base_url + f'/game/locations/{symbol}/marketplace', headers = self.auth_header)\n return r.text", "def quote_endpoint(self, market_id):\n self._wait_before_call()\n market = self._format_market_id(market_id)\n try:\n data, meta_data = self.TS.get_quote_endpoint(\n symbol=market, outputsize=\"full\"\n )\n return data\n except:\n logging.error(\"AlphaVantage wrong api call for {}\".format(market))\n return None", "def get_market_info(self, market_id, lite=True, coupon_links=False,\n locale=None):\n req = BFExchangeFactory.create(\"ns1:GetMarketReq\")\n req.marketId = market_id\n req.includeCouponLinks = coupon_links\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFExchangeService.getMarket, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code == GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarket} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n market = rsp.market\n coupons = market.couponLinks[0] if market.couponLinks else []\n coupons = [CouponLink(**{k: v for k, v in coupon})\n for coupon in coupons if coupon]\n runners = market.runners[0] if market.runners else []\n runners = [Runner(**{k: v for k, v in runner})\n for runner in runners if runner]\n hierarchies = market.eventHierarchy[0] if market.eventHierarchy else []\n hierarchies = [evt for evt in hierarchies]\n rsp = MarketInfo(**{k: v for k, v in market})\n info.eventHierarchy = hierarchies\n rsp.couponLinks = coupons\n rsp.runners = runners\n return rsp", "def get_markets(self, market):\n url = \"{url}/{market}\".format(url=self.MARKET_SERVICE_URL,\n market=market)\n\n return self.make_request(url)", "async def fetch_markets(self, params={}):\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # \"success\": 1,\n # \"data\": {\n # \"pairs\": [\n # {\n # \"name\": \"btc_jpy\",\n # \"base_asset\": \"btc\",\n # \"quote_asset\": \"jpy\",\n # \"maker_fee_rate_base\": \"0\",\n # \"taker_fee_rate_base\": \"0\",\n # \"maker_fee_rate_quote\": \"-0.0002\",\n # \"taker_fee_rate_quote\": \"0.0012\",\n # \"unit_amount\": \"0.0001\",\n # \"limit_max_amount\": \"1000\",\n # \"market_max_amount\": \"10\",\n # \"market_allowance_rate\": \"0.2\",\n # \"price_digits\": 0,\n # \"amount_digits\": 4,\n # \"is_enabled\": True,\n # \"stop_order\": False,\n # \"stop_order_and_cancel\": False\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n pairs = self.safe_value(data, 'pairs', [])\n result = []\n for i in range(0, len(pairs)):\n entry = pairs[i]\n id = self.safe_string(entry, 'name')\n baseId = self.safe_string(entry, 'base_asset')\n quoteId = self.safe_string(entry, 'quote_asset')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n result.append({\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': self.safe_value(entry, 'is_enabled'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': self.safe_number(entry, 'taker_fee_rate_quote'),\n 'maker': self.safe_number(entry, 'maker_fee_rate_quote'),\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.parse_number(self.parse_precision(self.safe_string(entry, 'amount_digits'))),\n 'price': self.parse_number(self.parse_precision(self.safe_string(entry, 'price_digits'))),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(entry, 'unit_amount'),\n 'max': self.safe_number(entry, 'limit_max_amount'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': entry,\n })\n return result", "def get_market_orderbook(self, market):\n return self.__call__('market', 'getmarketorderbook',\n {'marketname': market})", "def call(self, ids):\n\n self.req.MarketIds = ids\n betlog.betlog.info('calling BDAQ Api GetMarketInformation')\n result = self.client.service.GetMarketInformation(self.req)\n # note the raw data is returned here\n return result", "def get_market_data(self) -> dict:\n return MarketData(asks=self.get_orders_by_action(OrderAction.SELL, self.deep.ask_count),\n bids=self.get_orders_by_action(OrderAction.BUY, self.deep.bid_count)).format", "def get_markets(self):\n\n #\n\n result = self.api_query('info')\n detail = []\n for key, value in result['pairs'].items():\n IsActive = False\n if value['hidden'] ==0:\n IsActive = True\n dict_result = {'MarketCurrency':key.split('_')[0],'BaseCurrency': key.split('_')[1], 'MarketName':key,'IsActive':IsActive}\n detail.append(dict_result)\n\n result={'success' : True, 'message':'', 'result':detail}\n return result", "def public_ticker(self, market_symbol):\n return self.get(f'markets/{market_symbol}/ticker')", "def get_event_info(self, market_id: str) -> Tuple[str, str, str]:\n market_filter_ = market_filter(market_ids=[market_id])\n\n event_type = (\n self._client\n .betting\n .list_event_types(filter=market_filter_)[0]\n .event_type\n .name\n )\n\n event = (\n self._client\n .betting\n .list_events(filter=market_filter_)[0]\n .event\n .name\n )\n\n competition = (\n self._client\n .betting\n .list_competitions(filter=market_filter_)[0]\n .competition\n .name\n )\n\n return event_type, event, competition", "def query_market_data(self, kind_of_price):\n market_data = pyRofex.get_market_data(\n ticker=self.symbol,\n entries=[kind_of_price]\n )\n return market_data", "async def fetch_markets(self, params={}):\n spotMarketsInfo = await self.publicGetConfPubInfoPair(params)\n futuresMarketsInfo = await self.publicGetConfPubInfoPairFutures(params)\n spotMarketsInfo = self.safe_value(spotMarketsInfo, 0, [])\n futuresMarketsInfo = self.safe_value(futuresMarketsInfo, 0, [])\n markets = self.array_concat(spotMarketsInfo, futuresMarketsInfo)\n marginIds = await self.publicGetConfPubListPairMargin(params)\n marginIds = self.safe_value(marginIds, 0, [])\n #\n # [\n # \"1INCH:USD\",\n # [\n # null,\n # null,\n # null,\n # \"2.0\",\n # \"100000.0\",\n # null,\n # null,\n # null,\n # null,\n # null,\n # null,\n # null\n # ]\n # ]\n #\n result = []\n for i in range(0, len(markets)):\n pair = markets[i]\n id = self.safe_string_upper(pair, 0)\n market = self.safe_value(pair, 1, {})\n spot = True\n if id.find('F0') >= 0:\n spot = False\n swap = not spot\n baseId = None\n quoteId = None\n if id.find(':') >= 0:\n parts = id.split(':')\n baseId = parts[0]\n quoteId = parts[1]\n else:\n baseId = id[0:3]\n quoteId = id[3:6]\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n splitBase = base.split('F0')\n splitQuote = quote.split('F0')\n base = self.safe_string(splitBase, 0)\n quote = self.safe_string(splitQuote, 0)\n symbol = base + '/' + quote\n baseId = self.get_currency_id(baseId)\n quoteId = self.get_currency_id(quoteId)\n settle = None\n settleId = None\n if swap:\n settle = quote\n settleId = quote\n symbol = symbol + ':' + settle\n minOrderSizeString = self.safe_string(market, 3)\n maxOrderSizeString = self.safe_string(market, 4)\n margin = False\n if spot and self.in_array(id, marginIds):\n margin = True\n result.append({\n 'id': 't' + id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': 'spot' if spot else 'swap',\n 'spot': spot,\n 'margin': margin,\n 'swap': swap,\n 'future': False,\n 'option': False,\n 'active': True,\n 'contract': swap,\n 'linear': True if swap else None,\n 'inverse': False if swap else None,\n 'contractSize': self.parse_number('1') if swap else None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': int('8'), # https://github.com/ccxt/ccxt/issues/7310\n 'price': int('5'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.parse_number(minOrderSizeString),\n 'max': self.parse_number(maxOrderSizeString),\n },\n 'price': {\n 'min': self.parse_number('1e-8'),\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result", "async def fetch_markets(self, params={}):\n response = await self.publicGetInstrumentActiveAndIndices(params)\n #\n # [\n # {\n # \"symbol\": \"LTCUSDT\",\n # \"rootSymbol\": \"LTC\",\n # \"state\": \"Open\",\n # \"typ\": \"FFWCSX\",\n # \"listing\": \"2021-11-10T04:00:00.000Z\",\n # \"front\": \"2021-11-10T04:00:00.000Z\",\n # \"expiry\": null,\n # \"settle\": null,\n # \"listedSettle\": null,\n # \"relistInterval\": null,\n # \"inverseLeg\": \"\",\n # \"sellLeg\": \"\",\n # \"buyLeg\": \"\",\n # \"optionStrikePcnt\": null,\n # \"optionStrikeRound\": null,\n # \"optionStrikePrice\": null,\n # \"optionMultiplier\": null,\n # \"positionCurrency\": \"LTC\", # can be empty for spot markets\n # \"underlying\": \"LTC\",\n # \"quoteCurrency\": \"USDT\",\n # \"underlyingSymbol\": \"LTCT=\", # can be empty for spot markets\n # \"reference\": \"BMEX\",\n # \"referenceSymbol\": \".BLTCT\", # can be empty for spot markets\n # \"calcInterval\": null,\n # \"publishInterval\": null,\n # \"publishTime\": null,\n # \"maxOrderQty\": 1000000000,\n # \"maxPrice\": 1000000,\n # \"lotSize\": 1000,\n # \"tickSize\": 0.01,\n # \"multiplier\": 100,\n # \"settlCurrency\": \"USDt\", # can be empty for spot markets\n # \"underlyingToPositionMultiplier\": 10000,\n # \"underlyingToSettleMultiplier\": null,\n # \"quoteToSettleMultiplier\": 1000000,\n # \"isQuanto\": False,\n # \"isInverse\": False,\n # \"initMargin\": 0.03,\n # \"maintMargin\": 0.015,\n # \"riskLimit\": 1000000000000, # can be null for spot markets\n # \"riskStep\": 1000000000000, # can be null for spot markets\n # \"limit\": null,\n # \"capped\": False,\n # \"taxed\": True,\n # \"deleverage\": True,\n # \"makerFee\": -0.0001,\n # \"takerFee\": 0.0005,\n # \"settlementFee\": 0,\n # \"insuranceFee\": 0,\n # \"fundingBaseSymbol\": \".LTCBON8H\", # can be empty for spot markets\n # \"fundingQuoteSymbol\": \".USDTBON8H\", # can be empty for spot markets\n # \"fundingPremiumSymbol\": \".LTCUSDTPI8H\", # can be empty for spot markets\n # \"fundingTimestamp\": \"2022-01-14T20:00:00.000Z\",\n # \"fundingInterval\": \"2000-01-01T08:00:00.000Z\",\n # \"fundingRate\": 0.0001,\n # \"indicativeFundingRate\": 0.0001,\n # \"rebalanceTimestamp\": null,\n # \"rebalanceInterval\": null,\n # \"openingTimestamp\": \"2022-01-14T17:00:00.000Z\",\n # \"closingTimestamp\": \"2022-01-14T18:00:00.000Z\",\n # \"sessionInterval\": \"2000-01-01T01:00:00.000Z\",\n # \"prevClosePrice\": 138.511,\n # \"limitDownPrice\": null,\n # \"limitUpPrice\": null,\n # \"bankruptLimitDownPrice\": null,\n # \"bankruptLimitUpPrice\": null,\n # \"prevTotalVolume\": 12699024000,\n # \"totalVolume\": 12702160000,\n # \"volume\": 3136000,\n # \"volume24h\": 114251000,\n # \"prevTotalTurnover\": 232418052349000,\n # \"totalTurnover\": 232463353260000,\n # \"turnover\": 45300911000,\n # \"turnover24h\": 1604331340000,\n # \"homeNotional24h\": 11425.1,\n # \"foreignNotional24h\": 1604331.3400000003,\n # \"prevPrice24h\": 135.48,\n # \"vwap\": 140.42165,\n # \"highPrice\": 146.42,\n # \"lowPrice\": 135.08,\n # \"lastPrice\": 144.36,\n # \"lastPriceProtected\": 144.36,\n # \"lastTickDirection\": \"MinusTick\",\n # \"lastChangePcnt\": 0.0655,\n # \"bidPrice\": 143.75,\n # \"midPrice\": 143.855,\n # \"askPrice\": 143.96,\n # \"impactBidPrice\": 143.75,\n # \"impactMidPrice\": 143.855,\n # \"impactAskPrice\": 143.96,\n # \"hasLiquidity\": True,\n # \"openInterest\": 38103000,\n # \"openValue\": 547963053300,\n # \"fairMethod\": \"FundingRate\",\n # \"fairBasisRate\": 0.1095,\n # \"fairBasis\": 0.004,\n # \"fairPrice\": 143.811,\n # \"markMethod\": \"FairPrice\",\n # \"markPrice\": 143.811,\n # \"indicativeTaxRate\": null,\n # \"indicativeSettlePrice\": 143.807,\n # \"optionUnderlyingPrice\": null,\n # \"settledPriceAdjustmentRate\": null,\n # \"settledPrice\": null,\n # \"timestamp\": \"2022-01-14T17:49:55.000Z\"\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'symbol')\n baseId = self.safe_string(market, 'underlying')\n quoteId = self.safe_string(market, 'quoteCurrency')\n settleId = self.safe_string(market, 'settlCurrency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n settle = self.safe_currency_code(settleId)\n # 'positionCurrency' may be empty(\"\", currently returns for ETHUSD)\n # so let's take the settlCurrency first and then adjust if needed\n typ = self.safe_string(market, 'typ') # type definitions at: https://www.bitmex.com/api/explorer/#not /Instrument/Instrument_get\n types = {\n 'FFWCSX': 'swap',\n 'FFWCSF': 'swap',\n 'IFXXXP': 'spot',\n 'FFCCSX': 'future',\n 'MRBXXX': 'index',\n 'MRCXXX': 'index',\n 'MRFXXX': 'index',\n 'MRRXXX': 'index',\n 'MRIXXX': 'index',\n }\n type = self.safe_string(types, typ, typ)\n swap = type == 'swap'\n future = type == 'future'\n spot = type == 'spot'\n contract = swap or future\n contractSize = None\n index = type == 'index'\n isInverse = self.safe_value(market, 'isInverse') # self is True when BASE and SETTLE are same, i.e. BTC/XXX:BTC\n isQuanto = self.safe_value(market, 'isQuanto') # self is True when BASE and SETTLE are different, i.e. AXS/XXX:BTC\n linear = (not isInverse and not isQuanto) if contract else None\n status = self.safe_string(market, 'state')\n active = status != 'Unlisted'\n expiry = None\n expiryDatetime = None\n symbol = None\n if spot:\n symbol = base + '/' + quote\n elif contract:\n symbol = base + '/' + quote + ':' + settle\n multiplierString = Precise.string_abs(self.safe_string(market, 'multiplier'))\n if linear:\n contractSize = self.parse_number(Precise.string_div('1', market['underlyingToPositionMultiplier']))\n else:\n contractSize = self.parse_number(multiplierString)\n if future:\n expiryDatetime = self.safe_string(market, 'expiry')\n expiry = self.parse8601(expiryDatetime)\n symbol = symbol + '-' + self.yymmdd(expiry)\n else:\n # for index/exotic markets, default to id\n symbol = id\n positionId = self.safe_string_2(market, 'positionCurrency', 'underlying')\n position = self.safe_currency_code(positionId)\n positionIsQuote = (position == quote)\n maxOrderQty = self.safe_number(market, 'maxOrderQty')\n initMargin = self.safe_string(market, 'initMargin', '1')\n maxLeverage = self.parse_number(Precise.string_div('1', initMargin))\n result.append({\n 'id': id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': type,\n 'spot': spot,\n 'margin': False,\n 'swap': swap,\n 'future': future,\n 'option': False,\n 'index': index,\n 'active': active,\n 'contract': contract,\n 'linear': linear,\n 'inverse': isInverse,\n 'quanto': isQuanto,\n 'taker': self.safe_number(market, 'takerFee'),\n 'maker': self.safe_number(market, 'makerFee'),\n 'contractSize': contractSize,\n 'expiry': expiry,\n 'expiryDatetime': expiryDatetime,\n 'strike': self.safe_number(market, 'optionStrikePrice'),\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'lotSize'),\n 'price': self.safe_number(market, 'tickSize'),\n 'quote': self.safe_number(market, 'tickSize'),\n 'base': self.safe_number(market, 'tickSize'),\n },\n 'limits': {\n 'leverage': {\n 'min': self.parse_number('1') if contract else None,\n 'max': maxLeverage if contract else None,\n },\n 'amount': {\n 'min': None,\n 'max': None if positionIsQuote else maxOrderQty,\n },\n 'price': {\n 'min': None,\n 'max': self.safe_number(market, 'maxPrice'),\n },\n 'cost': {\n 'min': None,\n 'max': maxOrderQty if positionIsQuote else None,\n },\n },\n 'info': market,\n })\n return result", "def get_market_summary(self, market):\n return self.__call__('market', 'getmarketsummary', \n {'marketname': market})", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "def fetch_ticker(self, symbol: str, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = self.publicGetExchangesPairTicker(self.extend(request, params))\n return self.parse_ticker(response, market)", "async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n ticker = await self.publicGetTickerSymbol(self.extend(request, params))\n return self.parse_ticker(ticker, market)", "def get_active_market_street(market):\r\n return market[-1]", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result", "async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = await self.publicGetInstrument(self.extend(request, params))\n ticker = self.safe_value(response, 0)\n if ticker is None:\n raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')\n return self.parse_ticker(ticker, market)", "async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.publicGetPairTicker(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n return self.parse_ticker(data, market)", "def _fetch_stock_page(*markets) -> bs4.BeautifulSoup:\n\n if len(markets) == 0:\n raise ValueError('No markets given')\n\n params = {\n 'Exchange' : 'NMF',\n 'SubSystem': 'Prices',\n 'Action' : 'GetMarket',\n 'app' : '/osakkeet',\n 'Market' : ','.join([x.value for x in markets]),\n # 'ext_xslt': '/nordicV3/inst_table_shares.xsl'\n }\n\n r = requests.get(_API_URL, params)\n response_text = r.text\n soup = bs4.BeautifulSoup(response_text, 'lxml')\n\n return soup", "def get_option_market_data(self, symbol: str): \n return self.trader.fetch_option_market_data(symbol)", "def getMyMarketOrders(self, empireID):\n d = {}\n marketOrdersDict = self.getMyDictInfo('marketOrders')\n for marketID, myMarketOrderDict in marketOrdersDict.iteritems():\n if self.systems[myMarketOrderDict['system']].myEmpire.id == empireID:\n d[marketID] = myMarketOrderDict\n return d", "def get_marketplace(self, marketplace_id):\n return MarketplaceResource(self._config).get(marketplace_id)" ]
[ "0.7296463", "0.6786435", "0.64976203", "0.645441", "0.6436033", "0.6377452", "0.6353565", "0.6345084", "0.6292913", "0.6270462", "0.6263126", "0.62144077", "0.6168539", "0.6146066", "0.6135332", "0.6084566", "0.6076271", "0.6071816", "0.6056507", "0.6041295", "0.5983031", "0.5908562", "0.5906014", "0.5894554", "0.58888024", "0.58542126", "0.5834648", "0.5819151", "0.5746868", "0.5738209" ]
0.74244684
0
Stop a running Betfair ladder stream.
def stop_betfair_ladder_stream(self) -> None: if self.stream is not None: logger.info("Stopping the Betfair market stream.") self.stream.stop() self.stream.listener.output_queue.put("Terminate") self.stream = None else: logger.info("No existing Betfair market stream to stop.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_stream(self):\n pass", "def stop(self) -> None:\n self._stream.stop()", "def stop(self):\n\t\tself.stream.stop_stream()", "def stop(self):\n self.stream.stop()\n self.running = False", "def stop(self) -> None:\n self._stream.stop()", "def stop() -> None:", "def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")", "async def stop_livestream(self):\n await self.api.stop_livestream(self.product_type, self.serial_no)\n if self.p2p_stream_thread.is_alive() is True:\n await self.p2p_stream_handler.stop()", "async def async_stop_stream(self) -> None:\n await self._api.request(\n \"post\",\n \"web/equipment/stop_stream\",\n json={\n \"device_sn\": self.serial,\n \"station_sn\": self.station_serial,\n \"proto\": 2,\n },\n )", "def stop(self) -> None:\n ...", "def stop(self):\n self._stop_flag = True", "def stop(self) -> None:", "def stop(self) -> None:", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def stop(self):\n self.__running = False", "def stop(self):\n self._run = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop():", "def stop():", "def stop():", "def stop():", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False" ]
[ "0.8050214", "0.80189526", "0.7999657", "0.7922578", "0.7622994", "0.7271031", "0.7232619", "0.71897453", "0.71808064", "0.7150011", "0.71426904", "0.7123477", "0.7123477", "0.7122764", "0.7122764", "0.71187675", "0.7106935", "0.7099712", "0.7099712", "0.7099712", "0.7099712", "0.7099712", "0.70795554", "0.70795554", "0.7060017", "0.7060017", "0.7060017", "0.7060017", "0.70346", "0.70346" ]
0.83448356
0
Returns class by interpreting input string as module path and class name. Module path should be separated by dots as usual. Separate class name from module by '/'.
def get_class(string): logger = logman.getLogger(__name__) if '/' not in string: logger.error("The string is not properly formatted. Use '/' to separate module path from classname. String is: {}".format(string)) return module_name, class_name = string.split('/') try: logger.debug('Retrieving class {} from module {}'.format(class_name, module_name)) temp_class = getattr(importlib.import_module(module_name), class_name) except ModuleNotFoundError: logger.error("Module not found: {}".format(module_name)) raise except AttributeError: logger.error("Class not found: {}".format(class_name)) raise except: logger.error("Unexpected error while loading {}".format(string)) raise return temp_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_class(full_class_string):\r\n class_data = full_class_string.split(\".\")\r\n module_path = \".\".join(class_data[:-1])\r\n class_str = class_data[-1]\r\n module = importlib.import_module(module_path)\r\n return getattr(module, class_str)", "def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_", "def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class", "def load_class(full_class_string):\n\n class_data = full_class_string.split(\".\")\n module_path = \".\".join(class_data[:-1])\n class_str = class_data[-1]\n\n module = importlib.import_module(module_path)\n # Finally, we retrieve the Class\n return getattr(module, class_str)", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)", "def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ValueError, AttributeError):\n raise ImportError('Class %s cannot be found (%s).' %\n (class_str,\n traceback.format_exception(*sys.exc_info())))", "def load_class(full_class_string):\n # todo: cache classes (if this is an overhead)\n class_data = full_class_string.split(\".\")\n module_path = \".\".join(class_data[:-1])\n class_str = class_data[-1]\n module = importlib.import_module(module_path)\n # Finally, we retrieve the Class\n return getattr(module, class_str)", "def import_class(path):\n components = path.split(\".\")\n module = components[:-1]\n module = \".\".join(module)\n # __import__ needs a native str() on py2\n mod = __import__(module, fromlist=[str(components[-1])])\n return getattr(mod, str(components[-1]))", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n try:\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)\r\n except (ValueError, AttributeError):\r\n raise ImportError(_('Class %s cannot be found (%s)') %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ImportError, ValueError, AttributeError), exc:\n logging.debug('Inner Exception: %s', exc)\n raise", "def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n try:\r\n return getattr(sys.modules[mod_str], class_str)\r\n except AttributeError:\r\n raise ImportError('Class %s cannot be found (%s)' %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def importClass(importStr):\n moduleStr, _sep, classStr = importStr.rpartition(\".\")\n \n try:\n __import__(moduleStr)\n return getattr(sys.modules[moduleStr], classStr)\n except (ValueError, AttributeError):\n raise ImportError(\"Class %s cannot be found (%s)\" %\n (classStr, traceback.format_exception(*sys.exc_info())))", "def load_class(\n fully_qualified_class_name: str\n):\n\n (module_name, fully_qualified_class_name) = fully_qualified_class_name.rsplit('.', 1)\n module_ref = importlib.import_module(module_name)\n class_ref = getattr(module_ref, fully_qualified_class_name)\n\n return class_ref", "def create_class_from_strings( self, module_name, class_name):\r\n if not( self.logger is None ):\r\n self.logger.debug( \"create class {module_name} {class_name}\" )\r\n\r\n# print( \"create class \" + module_name + \" \" + class_name )\r\n\r\n a_class = getattr( importlib.import_module(module_name), class_name )\r\n instance = a_class( )\r\n return instance", "def classFromString(className, mod=None):\n if mod is None:\n mod = className\n if className == \"NoneType\":\n cls = None\n else:\n try:\n __import__(mod, globals(), locals(), [], -1)\n cls = sys.modules[mod].__dict__[className]\n except ImportError:\n try:\n cls = eval(\"{0}\".format(className))\n except NameError:\n print('Class \"{0}\" from modue \"{1}\"'\n ' was not found.'.format(className, mod))\n return\n except:\n print('An unanticipated error occurred '\n 'while trying to find Class \"{0}\"'\n ' in module \"{1}\".'.format(className, mod))\n raise\n except:\n print('Module \"{0}\" was not found, terminating'.format(mod))\n raise\n return cls", "def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)", "def get_class_from_string(self, classname, module):\n\n myclass = None\n try:\n # Meta language for dinamically import\n myclass = getattr(module, classname)\n except AttributeError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n\n return myclass", "def get_class(class_name, module_paths=None):\n class_ = locate(class_name)\n if (class_ is None) and (module_paths is not None):\n for module_path in module_paths:\n class_ = locate('.'.join([module_path, class_name]))\n if class_ is not None:\n break\n\n if class_ is None:\n raise ValueError(\n \"Class not found in {}: {}\".format(module_paths, class_name))\n\n return class_", "def process_path(module_path):\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module", "def process_path(module_path):\n\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n if module_path == 'StorageDict':\n return 'StorageDict', 'hecuba.hdict'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module", "def get_class(classname):\n parts = classname.split('.')\n module = '.'.join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m", "def load_class(path):\r\n\r\n mod_name, klass_name = path.rsplit('.', 1)\r\n\r\n try:\r\n mod = import_module(mod_name)\r\n except AttributeError as e:\r\n raise ImproperlyConfigured('Error importing {0}: \"{1}\"'.format(mod_name, e))\r\n\r\n try:\r\n klass = getattr(mod, klass_name)\r\n except AttributeError:\r\n raise ImproperlyConfigured('Module \"{0}\" does not define a \"{1}\" class'.format(mod_name, klass_name))\r\n\r\n return klass", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)) from err", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = importlib.import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n ) from err", "def construct_class_by_name(name, *args, **kwargs):\n parts = name.split('.')\n module_name, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)(*args, **kwargs)", "def import_class(classpath):\n modname, classname = classpath.rsplit(\".\", 1)\n module = importlib.import_module(modname)\n klass = getattr(module, classname)\n return klass", "def instantiate_from_string(class_name):\n class_name = convert_underscore_to_camel_case(class_name)\n return globals()[class_name]()", "def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])", "def load_class(class_path, setting_name=None):\n try:\n class_module, class_name = class_path.rsplit('.', 1)\n except ValueError:\n if setting_name:\n txt = '%s isn\\'t a valid module. Check your %s setting' % (class_path,setting_name)\n else:\n txt = '%s isn\\'t a valid module.' % class_path\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n mod = import_module(class_module)\n except ImportError, e:\n if setting_name:\n txt = 'Error importing backend %s: \"%s\". Check your %s setting' % (class_module, e, setting_name)\n else:\n txt = 'Error importing backend %s: \"%s\".' % (class_module, e)\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n clazz = getattr(mod, class_name)\n except AttributeError:\n if setting_name:\n txt = 'Backend module \"%s\" does not define a \"%s\" class. Check your %s setting' % (class_module, class_name, setting_name)\n else:\n txt = 'Backend module \"%s\" does not define a \"%s\" class.' % (class_module, class_name)\n raise exceptions.ImproperlyConfigured(txt)\n return clazz" ]
[ "0.7585165", "0.7544114", "0.7541174", "0.7536624", "0.75242513", "0.74492747", "0.7387409", "0.7375805", "0.73551124", "0.7353887", "0.7324937", "0.72359395", "0.72320575", "0.72180814", "0.71838266", "0.71719867", "0.7155751", "0.7124456", "0.711404", "0.7071752", "0.7059211", "0.6968013", "0.69621795", "0.68345225", "0.6799715", "0.6759073", "0.67556685", "0.6755289", "0.67147267", "0.6701341" ]
0.87269014
0
FresnelReflection takes the smallest angle between the ray direction and the normal. Thus the flipped normal will also work.
def test_antinormal_reflection(self): n1 = 1.0 n2 = 1.5 normal = (0.0, 0.0, -1.0) angle = 0.0 ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None) fresnel = FresnelReflection() assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04) new_ray = fresnel.transform(ray, {"normal": normal}) assert np.allclose(flip(ray.direction), new_ray.direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def refract(self, ray, rho):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n incidence = dot(-ray.direction, normal)\n complement = sqrt(1.0 - (1.0 - incidence**2) / rho**2)\n return Ray((ray.direction / rho +\n (incidence / rho - complement) * normal), ray.position)", "def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))", "def reflect_step(r0, step, intersection, normal_vector, step_length):\n \n # Calculate distance to intersection point and update step length\n step_length -= math.sqrt((r0[0] - intersection[0])**2 + (r0[1] - intersection[1])**2 + (r0[2] - intersection[2])**2)\n \n # Calculate reflection off the surface\n reflected_x = -r0[0] + 2*intersection[0] + 2*normal_vector[0]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n reflected_y = -r0[1] + 2*intersection[1] + 2*normal_vector[1]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n reflected_z = -r0[2] + 2*intersection[2] + 2*normal_vector[2]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n \n # Update step direction and spin position\n step[0] = reflected_x - intersection[0]\n step[1] = reflected_y - intersection[1]\n step[2] = reflected_z - intersection[2]\n normalizing_factor = math.sqrt(step[0]**2+step[1]**2+step[2]**2)\n step[0] /= normalizing_factor \n step[1] /= normalizing_factor \n step[2] /= normalizing_factor \n \n epsilon = 1e-6\n \n r0[0] = intersection[0] + epsilon*step_length*step[0]\n r0[1] = intersection[1] + epsilon*step_length*step[1]\n r0[2] = intersection[2] + epsilon*step_length*step[2]\n \n return", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def reflected(self, normal):\n return self - (2 * normal * self) * normal", "def op_fresnel_reflection(m, theta):\n rho_p = pypolar.fresnel.r_par_amplitude(m, theta)\n rho_s = pypolar.fresnel.r_per_amplitude(m, theta)\n a = abs(rho_s)**2 + abs(rho_p)**2\n b = abs(rho_s)**2 - abs(rho_p)**2\n c = 2 * rho_s * rho_p\n mat = np.array([[a, b, 0, 0],\n [b, a, 0, 0],\n [0, 0, c, 0],\n [0, 0, 0, c]])\n return 0.5 * mat", "def get_focal_point(patches, shell_point, num_rays=20):\n focal_point = Point3D(0.0, 0.0, 0.0)\n for patch in patches:\n #create a bunch of parallel rays coming from the eye\n ray_vector = normalize(shell_point)\n \n ##TODO: remove me\n #ray_vector = normalize(patch.shell_point)\n \n ray_rotation = numpy.zeros((3, 3))\n optics.rotation_matrix.R_2vect(ray_rotation, PRINCIPAL_RAY, ray_vector)\n rays = []\n for x in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n for y in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n start_point = ray_rotation.dot(Point3D(x, y, 0.0))\n rays.append(Ray(start_point, start_point + ray_vector))\n \n #find the point such that the spot size is minimized on the screen.\n #can average the normal of the reflected rays to get approximately where the screen goes\n #then iteratively try different distances until we've minimized the spot size there\n focal_point = Point3D(0.0, 0.0, 0.0)\n reflected_rays = [ray for ray in patch.reflect_rays_no_bounds(rays) if ray != None]\n approximate_screen_normal = sum([normalize(ray.start - ray.end) for ray in reflected_rays]) / len(reflected_rays)\n if optics.debug.PATCH_FOCAL_REFLECTIONS:\n #TODO: all rays don't come from the origin. draw all rays from their actual start points, and draw non-reflected rays going past the surface\n #also, only draw the part of the surface that is real and should be reflected from\n axes = matplotlib.pyplot.subplot(111, projection='3d')\n size = 5\n num_points = 10\n x, y = numpy.meshgrid(numpy.linspace(-size, size, num_points), numpy.linspace(-size, size, num_points))\n axes.scatter(x, y, patch.poly.get_z_for_plot(x, y), c='r', marker='o').set_label('patch')\n for ray in reflected_rays:\n debug_dist = 2*numpy.linalg.norm(ORIGIN - ray.start)\n rays_to_draw = numpy.array([\n patch.poly_space.point_to_space(ORIGIN),\n patch.poly_space.point_to_space(ray.start),\n patch.poly_space.point_to_space(debug_dist * normalize(ray.end-ray.start) + ray.start)\n ])\n axes.plot(rays_to_draw[:, 0], rays_to_draw[:, 1], rays_to_draw[:, 2], label=\"ray\")\n axes.set_xlabel('X')\n axes.set_ylabel('Y')\n axes.set_zlabel('Z')\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n def calculate_spot_size(distance):\n \"\"\"\n :returns: average distance from the central point for the plane at this distance\n \"\"\"\n screen_plane = Plane(distance * approximate_screen_normal * -1.0 + shell_point, approximate_screen_normal)\n points = []\n for ray in reflected_rays:\n points.append(screen_plane.intersect_line(ray.start, ray.end))\n average_point = sum(points) / len(points)\n errors = [numpy.linalg.norm(p - average_point) for p in points]\n if optics.debug.PATCH_FOCAL_SPOT_SIZE:\n #use coordinate space to move everything to the xy plane\n space = CoordinateSpace(screen_plane._point, screen_plane._normal)\n transformed_points = numpy.array([space.point_to_space(p) for p in points])\n matplotlib.pyplot.plot(transformed_points[:, 0], transformed_points[:, 1], \"r\", linestyle='None', marker='o', label=\"rays at %s\" % (distance))\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n #keep a fixed scale to x and y so that each graph can be compared with the previous\n #should probably print the errors as well\n print errors\n print sum(errors) / len(errors)\n return sum(errors) / len(errors)\n previous_distance = numpy.linalg.norm(patch.shell_point - patch.screen_point)\n min_dist = previous_distance * 0.9\n max_dist = previous_distance * 1.1\n num_iterations = 20\n tolerance = 0.0001\n best_dist = scipy.optimize.fminbound(calculate_spot_size, min_dist, max_dist, maxfun=num_iterations, xtol=tolerance, full_output=False, disp=0)\n focal_point += best_dist * approximate_screen_normal * -1.0 + shell_point\n return focal_point / len(patches)", "def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field", "def propagate(self, ray, index_0, index_1):\n if self._reflective:\n return self.reflect(ray)\n else:\n return self.refract(ray, index_1/index_0)", "def far_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n if self.body.fshape == 'p':\n d = f - n\n # far face dimensions\n l, r, b, t = [(i * d) / n + i for i in (l, r, b, t)]\n face = gt.Plin((l, b, -f), (r, b, -f), (r, t, -f), (l, t, -f))\n return pln.TM * face", "def Fresnel(n1,n2,theta_i):\n\n if np.all(theta_i > 2*np.pi) or isinstance(n1,int) or isinstance(n2,int):\n sys.exit(\"Input the incident angle in radians and the refractive indices as floating points.\")\n # reflection angle is equal to incident angle\n theta_r = theta_i\n # Snell's law for the angle of refraction (transmission)\n theta_t = np.arcsin((n1/float(n2))*np.sin(theta_i))\n # P-Polarized\n Rs = (n1*np.cos(theta_i)-n2*np.cos(theta_t))/(n1*np.cos(theta_i)+n2*np.cos(theta_t))\n Ts = (2*n1*np.cos(theta_i))/(n1*np.cos(theta_i)+n2*np.cos(theta_t))\n # S-Polarized\n Rp = (n2*np.cos(theta_i)-n1*np.cos(theta_t))/(n2*np.cos(theta_i)+n1*np.cos(theta_t))\n Tp = (2*n1*np.cos(theta_i))/(n2*np.cos(theta_i)+n1*np.cos(theta_t))\n # Brewster's Angle\n theta_b = np.arctan(n2/n1)\n # Total Internal Reflection\n theta_tir = np.arcsin(n2/n1)\n return theta_r,theta_t,theta_b,theta_tir,Rp,Tp,Rs,Ts", "def _compute_pixel_ray_direction(\n u: float, v: float, fx: float, fy: float, img_w: int, img_h: int\n) -> NDArrayFloat:\n if not np.isclose(fx, fy, atol=1e-3):\n raise ValueError(\n f\"Focal lengths in the x and y directions must match: {fx} != {fy}\"\n )\n\n # approximation for principal point\n px = img_w / 2\n py = img_h / 2\n\n # the camera coordinate frame (where Z is out, x is right, y is down).\n\n # compute offset from the center\n x_center_offs = u - px\n y_center_offs = v - py\n\n ray_dir: NDArrayFloat = np.array([x_center_offs, y_center_offs, fx])\n ray_dir /= np.linalg.norm(ray_dir)\n return ray_dir", "def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def create_incident_reflected(self):\n # 1. add the normal\n # MObject already constructed\n\n # 2. show an incident ray\n #\n # |<-dx->|\n #\n # + |\n # \\ |\n # \\ |\n # \\ |\n # \\ i| i: incident angle\n # \\ |\n # \\|\n # -------*-------\n #\n delta_x = self.normal_length * math.tan(self.incident_angle)\n self.arrow_incident = Arrow(ORIGIN + -delta_x * RIGHT + self.normal_length * UP,\n ORIGIN,\n color=self.arrow_incident_color,\n stroke_width = 4, buff=0).shift(self.mirror_origin)\n\n\n # 3. show the incident angle\n self.arc_incident = Arc(\n start_angle = PI/2,\n angle = self.incident_angle,\n radius = self.arc_incident_radius,\n color = self.arc_incident_color,\n arc_center = self.mirror_origin\n )\n\n self.text_incident_fig = TextMobject(r\"Incident ray\").set_color(self.tex_theta_in_color).\\\n scale(1.2).move_to(-5.0 * RIGHT + -1.0 * UP)\n theta_in_pos_offset = -0.5 * RIGHT + 1.9 * UP\n self.tex_theta_in = TexMobject(r\"\\theta_{i}\", color=self.arc_incident_color).move_to(self.mirror_origin + theta_in_pos_offset)\n\n\n # 4. show an reflected ray\n #\n # |<-dx->|\n #\n # + | +\n # \\ | /\n # \\ | /\n # \\ | /\n # \\ i|r / i: incident angle\n # \\ | / r: reflected angle\n # \\|/\n # -------*-------\n #\n delta_x = self.normal_length * math.tan(self.reflected_angle)\n self.arrow_reflected = Arrow(ORIGIN,\n ORIGIN + delta_x * RIGHT + self.normal_length * UP,\n color=self.arrow_reflected_color,\n stroke_width = 4, buff=0).shift(self.mirror_origin)\n\n # 5. show the reflected angle\n self.arc_reflected = Arc(\n start_angle = PI/2 - self.reflected_angle,\n angle = self.reflected_angle,\n radius = self.arc_reflected_radius,\n color = self.arc_reflected_color,\n arc_center = self.mirror_origin\n )\n self.text_reflected_fig = TextMobject(r\"Reflected ray\").set_color(self.tex_theta_ref_color).\\\n scale(1.2).move_to(1.0 * RIGHT + -1.0 * UP)\n\n theta_out_pos_offset = 0.5 * RIGHT + 1.9 * UP\n self.tex_theta_ref = TexMobject(r\"\\theta_{r}\", color=self.arc_reflected_color).move_to(self.mirror_origin + theta_out_pos_offset)\n\n self.tex_mirror_reflect = TexMobject(r\"\\text{Specular reflection: }\",\n r\"\\theta_{i}\",\n r\"=\",\n r\"\\theta_{r}\")\n self.tex_mirror_reflect.scale(self.mirror_reflect_tex_scale).move_to(-1.0 * RIGHT + 3.0 * UP)\n self.tex_mirror_reflect[1].set_color(self.arc_incident_color)\n self.tex_mirror_reflect[3].set_color(self.arc_reflected_color)", "def ForsterOrientationFactor(d1, d2, r):\n rn = r / norm(r) ##Normalized distance vector\n d1n = d1/ norm(d1)\n d2n = d2/ norm(d2)\n Factor = 3 * dot(d1n, rn) * dot(d2n, rn) - dot(d1n, d2n)\n return Factor", "def near_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n face = gt.Plin((l, b, -n), (r, b, -n), (r, t, -n), (l, t, -n))\n return pln.TM * face", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def get_funnel(self):\n v_density = torch.distributions.Normal(0,3)\n potential1 = -v_density.log_prob(self.parameters[0])\n x_density = torch.distributions.Normal(0,torch.exp(self.parameters[0])**0.5)\n potential2 = -x_density.log_prob(self.parameters[1:]).sum()\n return potential1 + potential2", "def random_lookat_ray(goal, radius, variance, fov):\n theta1 = 2.*np.pi*np.random.uniform(-fov, fov)\n theta2 = np.arccos(1 - np.random.uniform(0, fov)**2)\n r = radius + variance*np.random.uniform(0,1.)\n x = r*np.cos(theta1)*np.sin(theta2)\n y = r*np.sin(theta1)*np.sin(theta2)\n z = r*np.cos(theta2)\n R = goal[:3,:3]\n point = goal[:3,3] + np.dot(R, np.array([x,y,z]))\n # Find the direction\n direction = -np.dot(R, np.array([x,y,z]))\n direction = tr.unit_vector(direction)\n return orpy.Ray(point, direction)", "def flipNormals(self):\n self.flip = not self.flip", "def test_forward(self) -> None:\n func = self._get_simple_implicit_function()\n\n n_grids, n_points = 10, 9\n raybundle = ImplicitronRayBundle(\n origins=torch.randn(n_grids, 2, 3, 3),\n directions=torch.randn(n_grids, 2, 3, 3),\n lengths=torch.randn(n_grids, 2, 3, n_points),\n xys=0,\n )\n func(raybundle)", "def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h", "def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None", "def trace(self, ray): # type: (Ray) -> Vector\n hit_object = None\n t = numpy.inf\n\n for scene_object in self.scene.shapes:\n t0 = scene_object.intersect(ray)\n if t0 < t:\n t = t0\n hit_object = scene_object\n\n # if there were no intersections, then return the background colour\n if t == numpy.inf:\n return self.scene.camera.background\n\n hit_point = ray.origin + ray.direction * t\n normal = hit_object.normal(hit_point)\n luminance = 0.0\n\n # perform shading calculations\n for light in self.scene.lights:\n hit_point_to_light = (light.centre - hit_point).normal\n\n #check whether this light contributes to the shading\n in_shadow = False\n for shadower in self.scene.shapes:\n # we don't want to test against itself\n if shadower == hit_object:\n continue\n shadow_ray = Ray(hit_point + normal * 0.0001, hit_point_to_light)\n if shadower.intersect(shadow_ray) < numpy.inf:\n in_shadow = True\n break\n if in_shadow:\n continue\n\n # super simple lambertian lighting model\n luminance += hit_point_to_light.dot(normal) * light.power\n\n # calculate shaded colour - luminance may be over one if there are multiple light sources\n # normally this would be dealt with by HDR and tone mapping but is just clipped\n # in demo ray tracers\n object_colour = hit_object.material.colour * min(luminance, 1.0)\n\n # calculate reflection colour if material has reflectance\n if hit_object.material.reflectance != 0.0 and ray.depth != self.scene.camera.depth:\n reflected_direction = (ray.direction - normal * 2 * (ray.direction.dot(normal))).normal\n # we need to 'translate' the reflection vector away from the hitpoint otherwise\n # we risk intersecting the original hit point again which causes artifacts in the reflection\n reflected_ray = Ray(hit_point + reflected_direction * 0.0001, reflected_direction, ray.depth + 1)\n reflection_colour = self.trace(reflected_ray)\n\n # interpolate shaded colour and reflected colour based on reflectance\n return Vector(*[lerp(object_colour.data[i], reflection_colour.data[i], hit_object.material.reflectance) for i in range(3)])\n\n return object_colour", "def heuristic_2_reflection(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(reflection_available_factor)", "def perspectiveFovLH(field_of_view, aspect, znear, zfar):\n h = 1 / tan(field_of_view / 2)\n w = h / aspect\n m = [\n [w, 0, 0, 0],\n [0, h, 0, 0],\n [0, 0, zfar / (zfar - znear), 1],\n [0, 0, (znear * zfar) / (znear - zfar), 0],\n ]\n return Matrix(m)", "def _rf(self, p):\n return self.faces[:, 0, :] - p # 0 is arbitrary - the other vertices also work" ]
[ "0.6952331", "0.59180486", "0.58824384", "0.57535976", "0.57386696", "0.55146694", "0.5512906", "0.5464423", "0.5416558", "0.5369161", "0.5335698", "0.52933735", "0.52365685", "0.5117482", "0.50288486", "0.49778453", "0.49726534", "0.49646103", "0.49436632", "0.49390393", "0.49358514", "0.49279857", "0.49221197", "0.49185342", "0.48926383", "0.48917755", "0.48183814", "0.48181015", "0.47715023", "0.4744581" ]
0.8033125
0
Tests the API endpoint to get hashrate resale details with missing field
def test_mining_hashrate_resale_details_with_missing_field(params): client = Client(key, secret) client.mining_hashrate_resale_details.when.called_with(**params).should.throw( ParameterRequiredError )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mining_hashrate_resale_details():\n\n client = Client(key, secret)\n response = client.mining_hashrate_resale_details(123, \"user_name\")\n response.should.equal(mock_item)", "def test_retire_rate_plan(self):\n pass", "def test_validation_get_valid_resampling(self):\n self.assertIsInstance(api.validation.fetch_resampling(), dict)", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_get_details7(self):\n pass", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_by('quantity')\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)", "def test_get_pay_in_details(self):\n pass", "async def test_get_rates_get(client):\n params = [('exchangeType', 'exchange_type_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/public/exchange/1/getRates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_dealer_ratings(self):\n pass", "def test_get_restaurant_review_list_fail(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 401)", "def test_get_metadata_for_rate_plan(self):\n pass", "def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})", "def test_view_reteta_detail(self):\n reteta = sample_reteta(user=self.user)\n reteta.tags.add(sample_tag(user=self.user))\n reteta.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(reteta.id)\n res = self.client.get(url)\n serializer = RetetaDetailSerializer(reteta)\n self.assertEqual(res.data, serializer.data)", "def test_get_all_rate_plans(self):\n pass", "def test_get_object_dict(self):\n review = self.review[0].get_dict()\n self.assertIsNotNone(review['reviewer_id'])\n self.assertIsNotNone(review['book_id'])\n self.assertEqual(5, review['rate'])", "def test_get_risk_profile_using_get(self):\n pass", "def test_get_rate_plan_by_product(self):\n pass", "def test_company_EU_GR_vies_zero(self, mock_check):\n mock_check.return_value = {\"valid\": True}\n self.assertEqual(self.policy.get_tax_rate(\"EL090145420\", \"GR\"), (None, True))", "def test_get_rating(self):\n url = reverse('rate-game')\n data = {'igdb': self.game.igdb}\n response = self.client.get(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_limited_to_user_who_made_resgate(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'test12345'\n )\n sample_resgate(user=user2)\n sample_resgate(user=self.user)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.filter(user=self.user)\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data, serializer.data)", "def test_household_get(self):\n url = '/household/'+ self.test_id + '/'\n response = self.tester.get(url,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_update_rate_plan(self):\n pass", "def test_get(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/1/')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n obj = resp.data\n self.assertEqual(obj['id'], 1)\n self.assertEqual(obj['terms'], '0/net')\n self.assertEqual(obj['revision'], 0)\n \n #Test items\n self.assertIn('items', obj)\n self.assertEqual(len(obj['items']), 1)\n item1 = obj['items'][0]\n #self.assertIn('purchasing_units', item1)\n #self.assertEqual(item1['purchasing_units'], 'm')", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_get_pricing_with_incorrect_instrument():\n res = oanda.get_pricing(CONFIG, 'XXX500_WRONG')\n assert res[0] == 400", "def test_get_risk_profile_all_using_get(self):\n pass", "def test_gelir_api(self):\n response = self.client.get(reverse('gelir-json', args=[self.sample_type]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEquals(response.status_code, 200)" ]
[ "0.6968484", "0.62199587", "0.61830664", "0.6144193", "0.61011153", "0.59623325", "0.58962244", "0.58485985", "0.5811617", "0.5798121", "0.5735763", "0.5712043", "0.56655514", "0.5655179", "0.56484246", "0.5585292", "0.5584498", "0.5572597", "0.5553329", "0.55408216", "0.55361027", "0.55315655", "0.5476421", "0.5474295", "0.5471295", "0.54707515", "0.5468141", "0.5459678", "0.54576546", "0.54391783" ]
0.7106652
0
Tests the API endpoint to get hashrate resale details
def test_mining_hashrate_resale_details(): client = Client(key, secret) response = client.mining_hashrate_resale_details(123, "user_name") response.should.equal(mock_item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_retire_rate_plan(self):\n pass", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_by('quantity')\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)", "async def test_get_rates_get(client):\n params = [('exchangeType', 'exchange_type_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/public/exchange/1/getRates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_rating(self):\n url = reverse('rate-game')\n data = {'igdb': self.game.igdb}\n response = self.client.get(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_mining_hashrate_resale_details_with_missing_field(params):\n client = Client(key, secret)\n client.mining_hashrate_resale_details.when.called_with(**params).should.throw(\n ParameterRequiredError\n )", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_validation_get_valid_resampling(self):\n self.assertIsInstance(api.validation.fetch_resampling(), dict)", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def test_get_details7(self):\n pass", "def test_get_primary(self):\n response = self.client.open('/v1/primary/',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_predict_house_price():\n with app.test_client()as c:\n response = c.get('/REST/api/v1.0/predict')\n assert response.status_code == 201", "def test_household_get(self):\n url = '/household/'+ self.test_id + '/'\n response = self.tester.get(url,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_limited_to_user_who_made_resgate(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'test12345'\n )\n sample_resgate(user=user2)\n sample_resgate(user=self.user)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.filter(user=self.user)\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data, serializer.data)", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def test_retrieve_reteta_list(self):\n sample_reteta(user=self.user)\n sample_reteta(user=self.user)\n\n res = self.client.get(RETETA_URL)\n\n retete = Reteta.objects.all().order_by('-id')\n serializer = RetetaSerializer(retete, many=True) # return list\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_get_pay_in_details(self):\n pass", "def test_gelir_api(self):\n response = self.client.get(reverse('gelir-json', args=[self.sample_type]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEquals(response.status_code, 200)", "def test_view_reteta_detail(self):\n reteta = sample_reteta(user=self.user)\n reteta.tags.add(sample_tag(user=self.user))\n reteta.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(reteta.id)\n res = self.client.get(url)\n serializer = RetetaDetailSerializer(reteta)\n self.assertEqual(res.data, serializer.data)", "def test_lti20_get_with_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\",\r\n \"resultScore\": SCORE,\r\n \"comment\": COMMENT})", "def test_get_usd_rates_succeeds(self, mock):\n\n mock.get(self.fixer_endpoint, status_code=200, text=fixture_get_usd_rates_succeeds)\n\n base = 'USD'\n r = self.simulate_get('/api/v1/rates', params={'symbol': base})\n self.assertEqual(r.status, falcon.HTTP_200)\n\n fixture = json.loads(fixture_get_usd_rates_succeeds)\n response_rates = r.json\n for response_rate in response_rates:\n sell_currency = response_rate['sell_currency']\n buy_currency = response_rate['buy_currency']\n rate = response_rate['rate']\n\n self.assertEqual(sell_currency, base)\n self.assertEqual(rate, fixture['rates'].get(buy_currency))", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_get_all_rate_plans(self):\n pass", "def test_get_dealer_ratings(self):\n pass", "def test_api_can_get_all_pressures(self):\n res = self.client().post('/pressures/', data=self.pressure)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/pressures/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('120', str(res.data))", "def test_get_risk_profile_using_get(self):\n pass", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def test_yearn_api(database, ethereum_inquirer):\n # mock coingecko response\n original_request = requests.get\n\n def mock_yearn_api(url, timeout):\n \"\"\"Return only two yearn vaults for the API response\"\"\"\n if YEARN_OLD_API in url:\n return MockResponse(HTTPStatus.OK, \"\"\"[{\"inception\":14891068,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\",\"symbol\":\"yvCurve-STG-USDC\",\"name\":\"yvCurve-STG-USDC 0.4.3\",\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x341bb10D8f5947f3066502DC8125d9b8949FD3D6/logo-128.png\",\"token\":{\"name\":\"Curve.fi Factory Crypto Pool: STG/USDC\",\"symbol\":\"STGUSDC-f\",\"address\":\"0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1\",\"decimals\":18,\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1/logo-128.png\"},\"tvl\":{\"total_assets\":1066762132988328431382564,\"price\":1.613069263536325,\"tvl\":1720761.2082279222},\"apy\":{\"type\":\"convex\",\"gross_apr\":0.14584764353034685,\"net_apy\":0.09226416095055612,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.02,\"keep_crv\":null,\"cvx_keep_crv\":0.1},\"points\":null,\"blocks\":null,\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x916011bD2d333fBA14dBB8bf0BdF01e3384FD2e6\",\"name\":\"StrategyConvexSTGUSDC\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":18,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812577,\"migration\":{\"available\":false,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\"}},{\"inception\":14980240,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\",\"symbol\":\"yvUSDT\",\"name\":\"yvUSDT 0.4.3\",\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x3B27F92C0e212C671EA351827EDF93DB27cc0c65/logo-128.png\",\"token\":{\"name\":\"Tether USD\",\"symbol\":\"USDT\",\"address\":\"0xdAC17F958D2ee523a2206206994597C13D831ec7\",\"decimals\":6,\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo-128.png\"},\"tvl\":{\"total_assets\":14938928651062,\"price\":1.0000823,\"tvl\":14940158.124889985},\"apy\":{\"type\":\"v2:averaged\",\"gross_apr\":0.023362870862237983,\"net_apy\":0.018862632100866916,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.0,\"keep_crv\":null,\"cvx_keep_crv\":null},\"points\":{\"week_ago\":0.013129557974331796,\"month_ago\":0.018862632100866916,\"inception\":0.022614793789739185},\"blocks\":{\"now\":17565983,\"week_ago\":17516180,\"month_ago\":17345663,\"inception\":15243268},\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x016919386387898E4Fa87c7c4D3324F75f178F12\",\"name\":\"0x01691938\"},{\"address\":\"0x087794F304aEB337388a40e7c382A0fEa78c47fC\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeUniV3StablesJoint(USDC-USDT)\"},{\"address\":\"0xBc04eFD0D18685BA97cFAdE4e2D3171701B4099c\",\"name\":\"StrategyLenderYieldOptimiser\"},{\"address\":\"0xE7A8Cbc43a0506d3A328393C1C30548835256d7D\",\"name\":\"Stargate-v2-USDT\"},{\"address\":\"0xde6F5b2452F94337a428c86b5D2F143383b4D573\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeBalancerTripod(bb-a-USD)\"},{\"address\":\"0x8829f62FCe1DFBfA3EB60eBE95133D5F43b9BD04\",\"name\":\"EmptyStrat\"},{\"address\":\"0xd8F414beB0aEb5784c5e5eBe32ca9fC182682Ff8\",\"name\":\"StrategyLenderYieldOptimiser\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":6,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812580,\"migration\":{\"available\":false,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\"}}]\"\"\") # noqa: E501\n nonlocal original_request\n return original_request(url, timeout)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_before = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n with patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_after = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n last_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert last_queried_ts is not None\n\n assert state_after != state_before\n # 140 is the number of vaults at the moment of writing this test\n assert len(state_before) == 0\n assert int(state_after[0]) == 2\n\n # check that a new vault was added\n token = GlobalDBHandler.get_evm_token(\n address=string_to_evm_address('0x341bb10D8f5947f3066502DC8125d9b8949FD3D6'),\n chain_id=ChainID.ETHEREUM,\n )\n\n assert token is not None\n assert token.name == 'yvCurve-STG-USDC 0.4.3'\n assert token.symbol == 'yvCurve-STG-USDC'\n assert token.protocol == YEARN_VAULTS_V2_PROTOCOL\n assert token.started == Timestamp(1654174125)\n\n # trigger the query again and check that the timestamp was updated\n future_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(seconds=WEEK_IN_SECONDS) # noqa: E501\n with freeze_time(future_timestamp), patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n new_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert new_queried_ts is not None\n assert new_queried_ts > last_queried_ts", "def test_update_rate_plan(self):\n pass", "def test_smoker_latest_get(self):\n pass" ]
[ "0.63951224", "0.6310697", "0.6241689", "0.62342453", "0.60122466", "0.59688497", "0.5942244", "0.5845871", "0.5801503", "0.578017", "0.5748117", "0.5727169", "0.57210433", "0.5657565", "0.56426114", "0.56349385", "0.5620731", "0.56199795", "0.5606225", "0.5600434", "0.5599018", "0.55868405", "0.5558119", "0.55354196", "0.5524954", "0.5512352", "0.5510392", "0.5486031", "0.5483743", "0.5475922" ]
0.74108505
0
Takes a url and email, sends POST request and display body
def main(): post_url = argv[1] params = { 'email': argv[2] } query_string = parse.urlencode(params) post_data = query_string.encode("ascii") with request.urlopen(post_url, post_data) as post_response: response_text = post_response.read() print(response_text.decode("UTF-8"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n return send_email(request.args)", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def email_post(request):\n if request.user.is_authenticated:\n messages.error(request, _(\"You are already logged in.\"))\n return redirect(ta_settings.LOGIN_REDIRECT)\n\n form = EmailForm(request.POST)\n if not form.is_valid():\n messages.error(request, _(\"The email address was invalid. Please check the address and try again.\"))\n return redirect(ta_settings.LOGIN_URL)\n\n email = ta_settings.NORMALIZE_EMAIL(form.cleaned_data[\"email\"])\n if not email:\n # The user's normalization function has returned something falsy.\n messages.error(\n request, _(\"That email address is not allowed to authenticate. Please use an alternate address.\")\n )\n return redirect(ta_settings.LOGIN_URL)\n\n email_login_link(request, email, next_url=request.GET.get(\"next\", \"\"))\n\n messages.success(request, _(\"Login email sent! Please check your inbox and click on the link to be logged in.\"))\n return redirect(ta_settings.LOGIN_URL)", "def sendUrl(body, numberOfUrls):\n #POST parameters **On Mac Python 3.1 remove .encode() if error; on Linux Python 3.3 .encode() is required**\n postBody = (str(numberOfUrls) + \"\\n\" + body).encode()\n # create your HTTP request **sometimes 403 errors so change userAgent**\n request = Request(url, postBody)\n # submit your request\n #print(\"Sent to Google.\")\n res = build_opener().open(request)\n #print(\"Retrieved file from Google.\")\n html = res.read().decode(\"utf-8\")\n res.close()\n if not html:\n for i in range(0, numberOfUrls):\n html += \"ok\"\n # save retrieved HTML to file\n saveToFile(html)", "def do_POST(self): # noqa\n l = int(self.headers['Content-Length'])\n new_address = self.rfile.read(l).decode('utf-8')\n if check.match(new_address) is not None:\n logging.info(\"Forwarding {} to sales.\".format(new_address))\n Thread(target=self.send_email, args=(new_address, )).start()\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.send_header('Access-Control-Allow-Origin',\n 'http://numat-tech.com')\n self.end_headers()\n self.wfile.write(new_address.encode('utf-8'))\n else:\n logging.exception(\"Received malformed email: \" + new_address)\n self.send_response(500)", "def _handle_post_request(self):\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST'})\n\n if self.path == '/URLRequest':\n # First we check, whether the formular has been filled by\n # something behaving like a bot\n if form.has_key('URL'):\n self._send_homepage('<p class=\"warning\">Please check your input</p>')\n return\n else:\n url = form['real_URL'].value if form.has_key('real_URL') else None\n tmp = self._insert_url_to_db(url)\n if tmp:\n try:\n blocked = self._db.is_hash_blocked(tmp)\n if tmp < 0:\n self._send_database_problem()\n return\n elif blocked:\n self._send_blocked_page(blocked[3])\n return\n else:\n self._send_return_page(tmp)\n return\n except YuDatabaseError:\n self._send_database_problem()\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n elif self.path == '/ContactUs':\n if form.has_key('URL'):\n # Here we might have a bot who likes to send the webmaster some spam\n # who most likely will be not amused about.\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='There was an issue with your request. Are you a bot? '\n '<a href=\"/ContactUs\">Please try again</a>.')\n else:\n try:\n email = form['email'].value\n subj = form['subject'].value\n descr = form['request'].value\n if self._send_mail(subj, descr, email):\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail sent',\n msg=\"Your request has been sent. You will receive an answer soon.\")\n else:\n self._send_internal_server_error()\n return\n except KeyError:\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='It appers you did not fill out all needed fields.\\\n <a href=\"/ContactUs\">Please try again</a>.')\n\n elif self.path == '/Show':\n short_url = form['ShortURL'].value if form.has_key('ShortURL') else None\n if short_url != None and short_url.find(\"yaturl.net\") > -1:\n tmp = short_url.rfind(\"/\")\n if tmp > -1 and short_url != \"\":\n tmp = tmp + 1\n short_url = short_url[tmp:]\n if short_url != None and short_url.isalnum():\n try:\n result = self._db.get_link_from_db(short_url)\n except YuDatabaseError:\n self._send_database_problem()\n return\n template_filename = self._get_config_template('showpage')\n if result:\n new_url = '<p><a href=\"%(result)s\">%(result)s</a></p>' % \\\n {'result': result}\n else:\n new_url = '<p class=\"warning\">No URL found for this string. Please double check your\\\n <a href=\"/ShowURL\">input and try again</a></p>'\n\n stats = self._db.get_statistics_for_hash(short_url)\n\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n msg=new_url,\n stat=stats,\n statspage=\"/stats/\" + short_url)\n else:\n self._send_404()\n return\n\n else:\n self._send_404()\n return\n\n self._send_response(text, 200)", "def _send_request(self, url, text=None, params=None):\n if params is not None:\n for k, v in params.items():\n params[k] = v.encode(\"utf-8\")\n else:\n params = {}\n\n params['email'] = self._username\n\n if self._password:\n params['pass'] = self._password\n\n if self._hash:\n params['hash'] = self._hash\n\n if text is not None:\n params['s'] = self._stripslashes(text)\n\n\n try:\n response = requests.post(url, data=params)\n except Exception as e:\n print(str(e))\n\n result = response.content.decode('utf-8')\n \n\n try:\n json_data = json.loads(result)\n except ValueError as e:\n print(str(e))\n\n if json_data['status'] == \"Success\":\n return json_data\n elif json_data['status'] == \"Failure\":\n if json_data['error'].startswith(\"Error Authenticating.\"):\n print(json_data['error'])\n else:\n print(json_data['error'])\n else:\n print(json_data)", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def email_body_beta_email(url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Thanks for signing up for Insprite! We are excited that you\\'re interested in what we are doing over here. We are creating Insprite to be a vibrant, friendly community where you can both learn from creative people in your area, and teach your passions to others. We sincerely hope that you will be a part of it!'\n\tmsg = msg + '<br><br>We\\'re currently in the process of finishing up Insprite... and we\\'re nearly there. We\\'re just adding some bells and whistles so it\\'ll be the best possible experience.<br><br>'\n\tmsg = msg + 'We will be in touch when we\\'re ready to launch&mdash;tentatively in late 2014. We can\\'t wait to show you what we\\'ve been working on. You\\'re going to love it.<br><br>'\n\tmsg = msg + 'In the meantime, feel free to drop us a line, or follow us on our <a href=\"#\" style=\"color:#1488CC\">Blog</a>, where we will post lots of cool bloggy things (no, really, we\\'re gonna try and keep it interesting).<br><br>'\n\tmsg = msg + '<br>Spritely yours,<br>'\n\tmsg = msg + 'The Insprite Gang </font>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'", "def send_request(request):\n auth()\n response = urllib2.urlopen(request)\n\n return BeautifulSoup(response).resultmessage.string", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def post(self, request):\n config_name = request.POST.get('config')\n email = request.POST.get('recipient')\n config = MailConfig.objects.get(name=config_name)\n version = TemplateVersion.objects.active(config.template.name)\n message = utils.render(config_name, email, version.test_data)\n pk = utils.send(\n f'[TEST] {message.subject}',\n message.from_email,\n message.to_email,\n message.body\n )\n return JsonResponse({'id': pk})", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def send_email(self):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"[email protected]\", \"tdcvgycwrzthjqgj\")\n\n subject = \"Price Fell Down\"\n body = \"Check the amazon link \" + self.__product_URL\n message = f\"Subject: {subject}\\n\\n{body}\"\n server.sendmail(\n \"[email protected]\",\n self.__email,\n message\n )\n #print(\"Our mail is sent!!!!\")", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def post(self, url, data):\n return self.app.post(get_url(url), data=data, follow_redirects=True)", "def email_body_recover_your_password(url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"110\" width=\"600\" height=\"350\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:16px;\">We get it&mdash;strong passwords can be tough to remember.<br><br>'\n\tmsg = msg + 'No biggie, simply <a href=\\\"' + url + '\\\" style=\"color:#1488CC\">follow the instructions to change it.</a> and you\\'ll be good to go.<br><br>'\n\tmsg = msg + 'Didn\\'t request for a password reset? <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Give us a holler ASAP</a>.</font>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '| Sent by <a href=\\\"https://insprite.co\\\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def post_fixture(url=None, data_file=None):\n if url is None:\n url = 'http://localhost:5000/calendar/invites/incoming'\n temp_file = None\n if data_file is None:\n handle, temp_file = tempfile.mkstemp()\n data_file = temp_file\n os.write(\n handle,\n 'envelope[from][email protected]&\\n'\n 'headers[Subject]=This is a subject&\\n'\n 'headers[To][email protected],\\n'\n '[email protected],\\n'\n '[email protected]&\\n'\n 'plain=When: Wednesday, Apri 20, 2016 4:00 PM-4:30 PM '\n '(UTC-05:00) Eastern Time (US %26 Canada)%13'\n 'Where: An office%13'\n '%13'\n '*~*~*~*~*~*~*~*~*~*%13'\n '%13'\n '%13'.encode('utf8')\n )\n os.close(handle)\n\n print(' '.join(\n ['curl', '-v', '-X', 'POST', '--data-ascii', '@' + data_file,\n # '--trace', '-',\n url],\n ))\n subprocess.run(\n ['curl', '-v', '-X', 'POST', '--data-ascii', '@' + data_file,\n # '--trace', '-',\n url],\n )\n\n if temp_file is not None:\n os.remove(temp_file)", "def post(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def PostEmails(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def request(ctx, flow):\n ctx.log(\"request\")\n #print \"REQUEST:\"\n #print flow.request._assemble()\n #print str(flow.request.headers[\"Host\"][0])\n try:\n # no windows update\n if str(flow.request.headers[\"Host\"][0]).endswith('windowsupdate.com'):\n flow.request.host = \"127.0.0.1\"\n flow.request.headers[\"Host\"] = [\"127.0.0.1\"]\n\n file = open(\"data/urls.txt\", \"a\")\n if flow.request.port == 443:\n file.write(\"HTTPS \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n else:\n file.write(\"http \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n file.close()\n\n #if 'Accept-Encoding' in flow.request.headers:\n flow.request.headers[\"Accept-Encoding\"] = ['none']\n\n form = flow.request.get_form_urlencoded()\n if form:\n file = open(\"data/forms.txt\", \"a\")\n file.write(flow.request.path + \"\\n\")\n file.write(str(form))\n file.close()\n\n except Exception as ee:\n ctx.log(str(ee))", "def receive_email_view(request):\n save_inbound_email(request.POST, request.FILES)\n return HttpResponse(200)", "def email_body_verify_account(verify_email_url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#fffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px; padding-left:85px; padding-right:85px; padding-bottom:25px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Welcome to Insprite! We\\'re thrilled that you\\'ve joined us.<br><br>'\n\tmsg = msg + 'Insprite lets you connect with and learn from the creative people around you, and to teach your passions and skills to others. We think you\\'ll love exploring our growing community!<br><br>'\n\tmsg = msg + 'Before you explore the cool things you can learn and experience from the inspiring, creative people around you (or decide to be one of them), <a href=\\\"' + verify_email_url + '\\\">please verify your email account</a> so we know you\\'re a real breathing human.<br><br>'\n\tmsg = msg + 'If you\\'re getting this message by mistake and didn\\'t create an account, <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">drop us a line</a> and we\\'ll get on it ASAP.</font>'\n\tmsg = msg + '</td></tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\" height=\"200\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"center\" valign=\"top\">'\n\tmsg = msg + '<a href=\"'+ verify_email_url + '\" style=\"color:#ffffff;text-decoration:none;display:inline-block;min-height:38px;line-height:39px;padding-right:16px;padding-left:16px;background:#1488CC;font-size:14px;border-radius:999em;margin-top:15px;margin-left:5px;font-family:Garamond, EB Garamond, Georgia, serif;\" target=\"_blank\">Verify your account</a>'\n\tmsg = msg + '</td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\"><img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\"></td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\"></td></tr></table>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Contact Us</a> | Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '</td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\" align=\"left\" valign=\"middle\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\"></td></tr></table>'\n\treturn msg", "def send_mailshot(mailshot_data):\n\n url = settings.mailer_endpoint\n headers = {'Content-Type': 'application/json'}\n response = requests.post(url, headers=headers, data=mailshot_data)", "def post():\n contactus_json = request.get_json()\n\n try:\n dict_data = ContactUsSchema().load(contactus_json)\n dict_data['description'] = escape(dict_data['description'])\n EmailService.save_and_send(EmailType.CONTACT_US, dict_data)\n response, status = 'Received successfully', HTTPStatus.OK\n except ValidationError as project_err:\n response, status = {'systemErrors': project_err.messages}, \\\n HTTPStatus.BAD_REQUEST\n return response, status" ]
[ "0.66343814", "0.630883", "0.6291691", "0.62672955", "0.62664545", "0.6248591", "0.62434167", "0.61401975", "0.6046999", "0.5980318", "0.5960414", "0.5922105", "0.5918039", "0.5868241", "0.5860079", "0.5837165", "0.5816196", "0.58109957", "0.5810417", "0.57976925", "0.5796052", "0.576485", "0.5753836", "0.5737029", "0.5719886", "0.56959283", "0.5679247", "0.5658345", "0.5634412", "0.5630758" ]
0.71457523
0
Return stock move name by type.
def next_move(ttype): count = db.session.query(StockMove.id).count() + 1 return str('SO/' if ttype =='sale' else 'PO/') + str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def typeToName(type: int) -> unicode:\n ...", "def get_item_name(sp, item_type, item_id):\n if item_type == 'playlist':\n name = sp.playlist(playlist_id=item_id, fields='name').get('name')\n elif item_type == 'album':\n name = sp.album(album_id=item_id).get('name')\n elif item_type == 'track':\n name = sp.track(track_id=item_id).get('name')\n return sanitize(name)", "def _get_type_name(self, st_type):\n if st_type <= 244: return 'str' + str(st_type)\n return self._type_names[st_type]", "def onchange_move_type(self, cr, uid, ids, type, context=None):\n if context is None:\n context = {}\n location_id = False\n location_dest_id = False\n if context.get('location_id') or context.get('location_dest_id'):\n location_id = context.get('location_id')\n location_dest_id = context.get('location_dest_id')\n return {\n 'value': {\n 'location_id': location_id or self._get_default_location(cr, uid, field='location_id', context=context),\n 'location_dest_id': location_dest_id or self._get_default_location(cr, uid, field='location_dest_id', context=context)}\n }\n elif context.get('picking_id'):\n return {\n 'value': {\n 'location_id': self._get_default_location(cr, uid, field='location_id', context=context),\n 'location_dest_id': self._get_default_location(cr, uid, field='location_dest_id', context=context)}\n }\n else:\n return super(stock_move, self).onchange_move_type(cr, uid, ids, type, context=context)\n return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}", "def filterToName(type: int) -> unicode:\n ...", "def get_name(self, _return_type):\n return '{0} - v{1}'.format(_return_type.name, _return_type.version)", "def _get_type_name(self, st_type):\n if st_type <= 2045: return 'str' + str(st_type)\n return self._type_names[st_type]", "def type_name(self):\n return self._type_name", "def getNameFromType(self, *args):\n return _libsbml.ASTBasePlugin_getNameFromType(self, *args)", "def get_type_of_name(text, item_type):\n article = nlp(text)\n labels = [x.label_ for x in article.ents]\n [(x.orth_,x.pos_, x.lemma_) for x in [y \n for y\n in nlp(text) \n if not y.is_stop and y.pos_ != 'PUNCT']]\n parts_of_speech = dict([(str(x), x.label_) for x in nlp(text).ents])\n names = []\n for (key, value) in parts_of_speech.items() :\n # entity_type for people: 'PERSON'\n # entity_type for movie: 'WORK_OF_ART'\n if(value == item_type) :\n names.append(key)\n# print(names)\n return names", "def type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_name\")", "def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()", "def get_by_move_type(character: dict, move_type: str) -> list:\n\n move_json = get_character_movelist(character)\n moves = list(filter(lambda x: (move_type in x[\"Tags\"]), move_json))\n\n if moves:\n move_list = []\n for move in moves:\n move_list.append(move['Command'])\n return list(set(move_list))\n else:\n return []", "def get_move_type(clicked_tile_position, blank_position):\n move_type = None # will hold move type\n\n clicked_row = clicked_tile_position[0] # get clicked row number\n clicked_col = clicked_tile_position[1] # get clicked column number\n\n blank_row = blank_position[0] # get blank row number\n blank_col = blank_position[1] # get blank column number\n\n # check UP or DOWN\n if clicked_row > blank_row and clicked_col == blank_col: # DOWN move\n move_type = 'down'\n elif clicked_row < blank_row and clicked_col == blank_col: # UP move\n move_type = 'up'\n \n # check LEFT or RIGHT\n if clicked_col > blank_col and clicked_row == blank_row: # RIGHT move\n move_type = 'right'\n elif clicked_col < blank_col and clicked_row == blank_row: # LEFT move\n move_type = 'left'\n \n return move_type", "def type_name(self) -> Optional[str]:\n return pulumi.get(self, \"type_name\")", "def type_name(self):\n return self.TYPE_NAMES[self.type]", "def get_ctor(piece_type_str: str):\n if piece_type_str == \"PAWN\":\n return Pawn\n if piece_type_str == \"ROOK\":\n return Rook\n if piece_type_str == \"HORSE\":\n return Horse\n if piece_type_str == \"BISHOP\":\n return Bishop\n if piece_type_str == \"KING\":\n return King\n if piece_type_str == \"QUEEN\":\n return Queen", "def event_type_name(self, event_type):\n return irfman.IrfManager.event_type_names[event_type]", "def _get_type_name(type_):\n # type: (type) -> str\n name = repr(type_)\n if name.startswith(\"<\"):\n name = getattr(type_, \"__qualname__\", getattr(type_, \"__name__\", \"\"))\n return name.rsplit(\".\", 1)[-1] or repr(type_)", "def name(self) -> str:\n station_name = self._get_station_name()\n return f\"{station_name} {self._fuel_type}\"", "def getName(self,item):\n return item.s", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def name(self) -> str:\n return self.type_data.name", "def type_name(self):\n # TODO(peria): Replace with exceptions.NotImplementedError() after shipping.\n assert 'type_name() is not implemented for class %s' % (type(self))", "def get_move(self, find_move_name):\n frame_data = self._get_frame_data()\n sprites = self._get_sprites()\n\n # Need to check both names separately\n for move in frame_data.keys():\n if '\"' in find_move_name:\n temp_move_name = find_move_name.replace('\"', '')\n if temp_move_name == move:\n frame_data_name = move\n break\n else:\n continue\n elif find_move_name.lower() == move.lower():\n frame_data_name = move\n break\n\n else:\n for move in frame_data.keys():\n if find_move_name.lower() in move.lower():\n frame_data_name = move\n break\n else:\n raise MoveNotFound\n\n sprite_name = None\n\n # temporary fix for the 214/236B/22x/5AD meme\n if '214b' in frame_data_name.lower() and not '214bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '214A/B' in move:\n sprite_name = move\n break\n elif '236b' in frame_data_name.lower() and not '236bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '236A/B' in move:\n sprite_name = move\n break\n\n elif '22' in frame_data_name.lower():\n for move in sprites.keys():\n if '22A/B' in move and '22c' not in frame_data_name.lower():\n sprite_name = move\n break\n elif '22A/B/C' in move and '22c' in frame_data_name.lower():\n sprite_name = move\n break\n\n elif 'reversal' in frame_data_name.lower():\n for move in sprites.keys():\n if '5AD' in move:\n sprite_name = move\n break\n\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() == split_name.lower():\n sprite_name = move\n break\n elif move.lower() == frame_data_name.lower():\n sprite_name = move\n break\n else:\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() in split_name.lower():\n sprite_name = move\n break\n elif move.lower() in frame_data_name.lower() and '22' not in find_move_name:\n print('ok')\n sprite_name = move\n break\n elif find_move_name.lower() in move.lower():\n sprite_name = move\n break\n else:\n sprite_name = None\n\n if sprite_name is None:\n sprite = ''\n else:\n sprite = self._get_high_quality_sprite(sprites[sprite_name])\n\n return {\n frame_data_name: {\n 'fd': frame_data[frame_data_name],\n 'sprite': sprite\n }\n }", "def get_name(self):\n return str(self.comparison_type)", "def get_name(cls):\n\t\treturn '' if cls is SAM3X else cls.__name__", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in self.local_op_namespace:\n self.local_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n self.local_op_namespace[t] += 1\n suffix = f\"{self.local_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in self.local_var_namespace:\n new_name = _gen(op_type)\n\n self.local_var_namespace.add(new_name)\n return new_name", "def type_name(self):\n return self.TYPE_NAMES.get(self.type, \"Unknown\")", "def name_get(self):\n if isinstance(self._ids, (int)):\n ids = [self._ids]\n if not self.ids :\n return []\n res = []\n data_move = self.env['account.wh.src'].browse(\n )\n for move in data_move:\n if not move.name:\n if move.number:\n name = move.number\n else:\n name = 'CRS * ID = ' + str(move.id)\n else:\n name = move.name\n res.append((move.id, name))\n return res" ]
[ "0.6108094", "0.59010094", "0.5656677", "0.555516", "0.5488487", "0.547423", "0.5448434", "0.54116297", "0.5408066", "0.5407569", "0.5393827", "0.53872436", "0.53552365", "0.53311074", "0.5316032", "0.53117967", "0.5291321", "0.5275399", "0.5212203", "0.51964265", "0.51749265", "0.517231", "0.51679945", "0.5165942", "0.51647246", "0.51563776", "0.51547474", "0.5154572", "0.5147813", "0.51441467" ]
0.63632435
0
Load and return the vowel training dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target
def load_vowel(): train = _load_vowel_train() test = _load_vowel_test() return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def get_x_y() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n logger.log('Loading Dataset...')\n x_train, y_train = helpers.datasets.load_voice()\n logger.log(str(len(y_train)) + ' train data loaded')\n\n x_test, y_test = None, None\n # x_test, y_test = helpers.datasets.load_voice(train=False)\n # logger.log(str(len(y_test)) + ' test data loaded')\n\n return x_train, y_train, x_test, y_test", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def convert_data_to_examples(train, test, data_column, label_column):\r\n train_InputExamples = train.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n\r\n validation_InputExamples = test.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n return train_InputExamples, validation_InputExamples", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y", "def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototypes by clustering over stored acoustic reps\n raw_data = data.reshape(4 * len(self.stems), 2)\n ac_vowels, ac_spread = vq.kmeans(raw_data, 4)\n # find articulatory reps by comparing synthesized output vowels to\n # acoustic prototypes\n # start with candidate list of \"all possible\" articulations\n tmp_ar = N.empty((1, 3))\n rd = 0.0\n for hi in [0.0, 1.0]:\n for bk in [0.0, 1.0]:\n tmp_ar = N.vstack((tmp_ar, N.array([hi, bk, rd])))\n tmp_ar = tmp_ar[1:]\n while len(self.vowel_map) < 4:\n # no noise (since this shouldn't be running through the \"mouth\")\n tmp_ac = self.perceive(self.acoustify(tmp_ar))\n for v in ac_vowels:\n dists = N.sqrt(N.sum((v - tmp_ac)**2, axis=1))\n d = 0\n while True:\n if dists[d] < (2 * ac_spread):\n # found an articulatory prototype\n self.vowel_map[tuple(v)] = tmp_ar[d]\n # remove it from the candidate list\n tmp_ar = N.vstack((tmp_ar[:d], tmp_ar[d + 1:]))\n tmp_ac = N.vstack((tmp_ac[:d], tmp_ac[d + 1:]))\n break\n d += 1\n if d == len(dists):\n # take the best of the bad ones\n index = N.argmin(dists)\n self.vowel_map[tuple(v)] = tmp_ar[index]\n break\n self.vowel_spread = ac_spread\n return self.vowel_map", "def learn(self, Xtrain, ytrain):", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def load_MNIST(filename_train='train.csv', filename_test='test.csv'):\n # Load training data\n reader = csv.reader(open(filename_train,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xtr = numpy.array(xs)\n Ytr = numpy.array(ys)\n\n # Load test data\n reader = csv.reader(open(filename,test,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xte = numpy.array(xs)\n Yte = numpy.array(ys)\n\n return Xtr,Ytr,Xte,Yte", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def __init__(self,\n x_train,\n y_train,\n train_indices,\n x_test,\n y_test,\n test_indices,\n x_unlabel=None,\n y_unlabel=None,\n unlabel_indices=None,\n y_train_str=None,\n y_test_str=None):\n self._x_train = x_train\n self._train_indices = train_indices\n self._y_train = y_train\n self._x_test = x_test\n self._y_test = y_test\n self._test_indices = test_indices\n self._x_unlabel = x_unlabel\n self._y_unlabel = y_unlabel\n self._unlabel_indices = unlabel_indices\n self._y_train_str = y_train_str\n self._y_test_str = y_test_str", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'" ]
[ "0.7972531", "0.6284241", "0.62223625", "0.61976546", "0.6173541", "0.6136231", "0.6088554", "0.60739297", "0.60504085", "0.60448134", "0.60420364", "0.60369146", "0.60268086", "0.60214686", "0.60152215", "0.6013476", "0.6010893", "0.5968949", "0.5953889", "0.5910949", "0.589535", "0.58857495", "0.58570874", "0.58107865", "0.5791524", "0.5782802", "0.57761437", "0.57728124", "0.57667637", "0.5729273" ]
0.8670741
0
Load and return the vowel testing dataset. Returns (X, y) Tuple A tuple of data and target
def _load_vowel_test(): vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1) X = vowel_data[:, -10:] y = vowel_data[:, 1].astype(int) return (X, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vowel():\n train = _load_vowel_train()\n test = _load_vowel_test()\n return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototypes by clustering over stored acoustic reps\n raw_data = data.reshape(4 * len(self.stems), 2)\n ac_vowels, ac_spread = vq.kmeans(raw_data, 4)\n # find articulatory reps by comparing synthesized output vowels to\n # acoustic prototypes\n # start with candidate list of \"all possible\" articulations\n tmp_ar = N.empty((1, 3))\n rd = 0.0\n for hi in [0.0, 1.0]:\n for bk in [0.0, 1.0]:\n tmp_ar = N.vstack((tmp_ar, N.array([hi, bk, rd])))\n tmp_ar = tmp_ar[1:]\n while len(self.vowel_map) < 4:\n # no noise (since this shouldn't be running through the \"mouth\")\n tmp_ac = self.perceive(self.acoustify(tmp_ar))\n for v in ac_vowels:\n dists = N.sqrt(N.sum((v - tmp_ac)**2, axis=1))\n d = 0\n while True:\n if dists[d] < (2 * ac_spread):\n # found an articulatory prototype\n self.vowel_map[tuple(v)] = tmp_ar[d]\n # remove it from the candidate list\n tmp_ar = N.vstack((tmp_ar[:d], tmp_ar[d + 1:]))\n tmp_ac = N.vstack((tmp_ac[:d], tmp_ac[d + 1:]))\n break\n d += 1\n if d == len(dists):\n # take the best of the bad ones\n index = N.argmin(dists)\n self.vowel_map[tuple(v)] = tmp_ar[index]\n break\n self.vowel_spread = ac_spread\n return self.vowel_map", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def generate_vowel():\n return random.sample(['a', 'e', 'i', 'o', 'u', 'y'], 1)", "def vowels(self):\n vas = []\n file = self.read()\n words = re.sub(\"[aeiouAEIOU]\",\" \", file).split(\" \")\n for h_u in words:\n if h_u != \"\":\n vas.append(h_u)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def analyse_vowels(self, source):\r\n\r\n word_set = set()\r\n with open(source) as f:\r\n for line in f:\r\n words = [word.lower().strip() for word in line.split()]\r\n for word in words:\r\n map(self.parse_character, word)\r\n stripped = ''.join(c for c in word if c in letters)\r\n if stripped:\r\n word_set.add(stripped)\r\n vowels = self.get_possible_vowels(word_set)\r\n return self.filter_vowels(vowels, word_set)", "def test_one_disemvowel_code_wars():\n from disemvowel_trolls import disemvowel\n tests = [(\"This website is for losers LOL!\", \"Ths wbst s fr lsrs LL!\"),\n (\"No offense but,\\nYour writing is among the worst I've everread\",\n \"N ffns bt,\\nYr wrtng s mng th wrst 'v vrrd\"),\n (\"What are you, a communist?\", \"Wht r y, cmmnst?\")]\n\n for case in tests:\n assert disemvowel(case[0]) == case[1]", "def get_x_y() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n logger.log('Loading Dataset...')\n x_train, y_train = helpers.datasets.load_voice()\n logger.log(str(len(y_train)) + ' train data loaded')\n\n x_test, y_test = None, None\n # x_test, y_test = helpers.datasets.load_voice(train=False)\n # logger.log(str(len(y_test)) + ' test data loaded')\n\n return x_train, y_train, x_test, y_test", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def test_train_test_split_uni_exo(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n ####################################\n #### Continuous fh without Gaps ####\n ####################################\n\n #### Integer fh ----\n exp = TSForecastingExperiment()\n fh = 12\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test.index == data.iloc[-fh:].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test.index == data.iloc[-fh:].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(exp.train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(exp.X_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test_transformed.index == data.iloc[-fh:].index)\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(1, 10) # 9 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [1, 2, 3, 4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #################################\n #### Continuous fh with Gaps ####\n #################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(7, 13) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n ####################################\n #### Discontinuous fh with Gaps ####\n ####################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.array([4, 5, 6, 10, 11, 12]) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6, 10, 11, 12]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)", "def load_data():\n d = sio.loadmat('ex5data1.mat')\n return map(np.ravel, [d['X'], d['y'], d['Xval'], d['yval'], d['Xtest'], d['ytest']])", "def get_vowel_names():", "def convert_data_to_examples(train, test, data_column, label_column):\r\n train_InputExamples = train.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n\r\n validation_InputExamples = test.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n return train_InputExamples, validation_InputExamples", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def load_data_test(self, size, a_low, a_high=None):\n\n if a_high is None:\n a_high = self.a;\n\n data, label = self._generate_test_set(size, a_low, a_high, flip_structure=False);\n\n return data, label;", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def test_convert_single_vowel():\n for vowel in \"aeiou\":\n result = convert(vowel)\n assert result == vowel + \"way\"", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def test__validate_with_synthetic_data(elbow_with_synthetic_data):\n x, y, break_pt = elbow_with_synthetic_data\n expected_elbow = np.argmin(np.abs(x - break_pt))\n assert expected_elbow == find_elbow_point(x, y)", "def oil(data_set='three_phase_oil_flow'):\r\n if not data_available(data_set):\r\n download_data(data_set)\r\n oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')\r\n oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')\r\n oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')\r\n oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')\r\n oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')\r\n oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')\r\n fid = open(oil_train_file)\r\n X = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_test_file)\r\n Xtest = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_valid_file)\r\n Xvalid = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_trainlbls_file)\r\n Y = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_testlbls_file)\r\n Ytest = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_validlbls_file)\r\n Yvalid = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)\r\n #else:\r\n # throw an error\r", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20", "def test_enforce_exogenous_exo_data(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n exp1 = TSForecastingExperiment()\n exp1.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=True)\n num_models1 = len(exp1.models())\n\n exp2 = TSForecastingExperiment()\n exp2.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=False)\n num_models2 = len(exp2.models())\n\n # We know that some models do not offer exogenous variables support, so the\n # following check is valid for now.\n assert num_models1 < num_models2", "def load_characteristics(self):\r\n data = self.data\r\n X = data[:, :-1]\r\n Y = data[:, -1]\r\n return X, Y", "def load_eval_dataset(self):\n dict_path = get_eval_data(self.eval_path, self.src_lang, self.tgt_lang)\n\n pairs = []\n not_found_all = 0\n not_found_L1 = 0\n not_found_L2 = 0\n\n # Open the file and check if src and tgt word exists in the vocab\n with open(dict_path, \"r\") as f:\n for _, line in enumerate(f):\n word1, word2 = line.rstrip().split()\n if word1 in self.src_dico and word2 in self.tgt_dico:\n pairs.append((self.src_dico.index(word1), self.tgt_dico.index(word2)))\n else:\n not_found_all += 1\n not_found_L1 += int(word1 not in self.src_dico)\n not_found_L2 += int(word2 not in self.tgt_dico)\n print(\n \"Found %i pairs of words in the dictionary (%i unique). \"\n \" %i other pairs contained at least one unknown word \"\n \" (%i in src_lang, %i in tgt_lang)\"\n % (\n len(pairs),\n len(set([x for x, _ in pairs])),\n not_found_all,\n not_found_L1,\n not_found_L2,\n )\n )\n src_ind = [pairs[x][0] for x in range(len(pairs))]\n tgt_ind = [pairs[x][1] for x in range(len(pairs))]\n self.src_ind = np.asarray(src_ind)\n self.tgt_ind = np.asarray(tgt_ind)", "def test_returns_true_for_vowel(self):\n \n self.assertEqual(vowel_check.is_vowel('i'), True)", "def load_data(dataset_str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\n \"data/corpus/{}/{}.test.index\".format(dataset_str, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)\n\n # training nodes are training docs, no initial features\n # print(\"x: \", x)\n # test nodes are training docs, no initial features\n # print(\"tx: \", tx)\n # both labeled and unlabeled training instances are training docs and words\n # print(\"allx: \", allx)\n # training labels are training doc labels\n # print(\"y: \", y)\n # test labels are test doc labels\n # print(\"ty: \", ty)\n # ally are labels for labels for allx, some will not have labels, i.e., all 0\n # print(\"ally: \\n\")\n # for i in ally:\n # if(sum(i) == 0):\n # print(i)\n # graph edge weight is the word co-occurence or doc word frequency\n # no need to build map, directly build csr_matrix\n # print('graph : ', graph)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n # print(len(labels))\n\n idx_test = test_idx_range.tolist()\n # print(idx_test)\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask" ]
[ "0.8300088", "0.6068408", "0.5904717", "0.5585258", "0.55028343", "0.54649633", "0.54600555", "0.5418553", "0.5395617", "0.53787655", "0.53458136", "0.5342534", "0.52974266", "0.5277391", "0.526091", "0.52232075", "0.52189106", "0.5207462", "0.5204022", "0.5163287", "0.51475626", "0.51368123", "0.5135174", "0.51095444", "0.51079", "0.5099455", "0.5097614", "0.50904113", "0.50838804", "0.508168" ]
0.83285666
0
Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target The copy of UCI ML Breast Cancer Wisconsin (Original) dataset is
def load_breast_cancer(): bc_data_train = np.load(_BREAST_CANCER_FOLDER+'bc_data.train') bc_data_test = np.load(_BREAST_CANCER_FOLDER+'bc_data.test') bc_target_train = np.load(_BREAST_CANCER_FOLDER+'bc_target.train') bc_target_test = np.load(_BREAST_CANCER_FOLDER+'bc_target.test') for i in range(len(bc_target_test)): if bc_target_test[i] == 2: bc_target_test[i] = 0 elif bc_target_test[i] == 4: bc_target_test[i] = 1 for i in range(len(bc_target_train)): if bc_target_train[i] == 2: bc_target_train[i] = 0 elif bc_target_train[i] == 4: bc_target_train[i] = 1 return (bc_data_train, bc_target_train.reshape(-1, 1), bc_data_test, bc_target_test.reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_breast_cancer():\n data = load_breast_cancer_sk()\n X = pd.DataFrame(data.data, columns=data.feature_names)\n y = pd.Series(data.target)\n y = y.map(lambda x: data[\"target_names\"][x])\n\n X.ww.init()\n y = ww.init_series(y)\n return X, y", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}", "def load_benzene_concentration_sample():\n file = Path(__file__).parent.parent / \"data/benzene_concentration_sample.csv\"\n df = pd.read_csv(file)\n y = df[\"target\"].to_numpy()\n X = df.drop(columns=\"target\").to_numpy()\n X = np.expand_dims(X, axis=1)\n return X, y", "def _breast_cancer_wisconsin_diag(location: str) -> Dataset:\n\n columns = ['id', 'target']\n data_type = ['radius', 'texture', 'perimeter', 'area', 'smoothness',\n 'compactness', 'concavity', 'concave_points', 'symmetry',\n 'fractal_dimension']\n\n # Compute proper names of the columns\n for prefix in ['mean', 'sd', 'worst']:\n for dtype in data_type:\n columns.append(f'{prefix}_{dtype}')\n\n # Read dataframe and split data into x, y\n df = pd.read_csv(joinpath(location, 'wdbc.data'), names=columns)\n y, x = df.pop('target'), df\n\n # Convert target into [0, 1]\n conv_to_class = {'M': 0, 'B': 1}\n y = y.apply(lambda x: conv_to_class[x])\n\n return x, y", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def dataset(self):\n if self.X is not None and self.y is not None:\n return self.X, self.y\n\n self.X, self.y = self.get_BOW_from_file(self.labels[0])\n for label in self.labels[1:]:\n X_temp, y_temp = self.get_BOW_from_file(label)\n self.X = np.concatenate((self.X, X_temp))\n self.y = np.concatenate((self.y, y_temp))\n\n return self.X, self.y", "def load_bottleneck_data(training_file, validation_file):\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val", "def get_breast_cancer_data(target=\"diagnosis\"):\n data = load_breast_cancer()\n df = pd.DataFrame(data=data.data, columns=[_.replace(\" \", \"_\") for _ in data.feature_names])\n df[target] = data.target\n return df", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_wdbc(random_state=None, return_X_y=False, subset='kriegel11'):\n\n wdbc = load_breast_cancer()\n X = wdbc.data\n y = wdbc.target\n feature_names = wdbc.feature_names\n\n n_outliers = 10\n is_outlier = y == 0\n idx_inlier = np.flatnonzero(~is_outlier)\n idx_outlier = np.flatnonzero(is_outlier)\n y[is_outlier] = NEG_LABEL\n\n if subset not in ['goldstein12', 'kriegel11', 'sugiyama13']:\n raise ValueError(f'invalid subset: {subset}')\n\n if subset == 'goldstein12':\n s = np.union1d(idx_inlier, idx_outlier[:n_outliers])\n\n if subset == 'kriegel11':\n rnd = check_random_state(random_state)\n s = np.union1d(\n idx_inlier,\n rnd.choice(idx_outlier, size=n_outliers, replace=False)\n )\n\n if subset != 'sugiyama13':\n # Downsample outliers\n X = X[s]\n y = y[s]\n\n if return_X_y:\n return X, y\n\n return Bunch(data=X, target=y, feature_names=feature_names)", "def on_pushButton_clicked(self):\n # TODO: not implemented yet\n print(\"加载数据\")\n \n boston = datasets.load_boston()\n train = boston.data\n target = boston.target\n \n self.X_train,self.x_test,self.y_train,self.y_true = train_test_split(train,target,test_size=0.2)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def load_bottleneck_data(network, dataset):\n\n # training files have been moved to a subdirectory for cleaner root\n training_sets_dir = './training_sets/'\n\n # build the training/validation file names from supplied flags\n training_file = training_sets_dir + network + '_' + dataset + '_100_bottleneck_features_train.p'\n validation_file = training_sets_dir + network + '_' + dataset + '_bottleneck_features_validation.p'\n print(\"Training file \", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_valid = validation_data['features']\n y_valid = validation_data['labels']\n\n return X_train, y_train, X_valid, y_valid", "def load_or_generate_data(self) -> None:\n\n # Training set defined as a 5 x 5 square:\n xg1 = np.linspace(-5, 10, 5)\n xg2 = np.linspace(0, 15, 5)\n x = np.zeros((xg1.size * xg2.size, 2))\n for i, x1 in enumerate(xg1):\n for j, x2 in enumerate(xg2):\n x[i + xg1.size * j, :] = [x1, x2]\n\n y = self.branin(x)[:, None]\n self.x, self.y = x, y", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def get_naive_Bayes_classificator(self):\n try:\n with open(TWEET_BAYES_FILENAME, 'rb') as f:\n self.classifier, self.bayes_accuracy = pickle.load(f)\n print('It was read sucessfully!')\n except IOError:\n self.train_naive_Bayes_classificator()", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset" ]
[ "0.7141485", "0.69764185", "0.6372257", "0.6351349", "0.6293626", "0.60629076", "0.5941613", "0.59359765", "0.5885802", "0.5858357", "0.58188754", "0.57530105", "0.5714098", "0.5703062", "0.56975883", "0.5686469", "0.5659355", "0.5605132", "0.560234", "0.55977595", "0.5596762", "0.5585393", "0.5571161", "0.5550326", "0.554698", "0.5539647", "0.5532786", "0.55197036", "0.55194855", "0.55177706" ]
0.76880604
0
sub(Vector,Vector) subtracts second vector from first one
def sub(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.x-other.x, first.y-other.y, first.z-other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_sub(v1,v2):\n return Vector(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)", "def vec_sub(x, y):\r\n return [a - b for a, b in zip(x, y)]", "def vector_subtract(v1, v2):\n return v1[0] - v2[0], v1[1] - v2[1]", "def vector_substraction(a, b):\n return a[0] - b[0], a[1] - b[1]", "def vec_sub (x, y):\n return [x_i - y_i for (x_i, y_i) in zip (x, y)]", "def sub(self, a, b):\n return a - b", "def sub(a,b):\n return [a[0]-b[0],a[1]-b[1],a[2]-b[2],1.0]", "def __sub__(self, other):\n return Vec2d(self.v[0] - other[0], self.v[1] - other[1])", "def sub(a, b):\n return [a[i] - b[i] for i in range(2)]", "def sub(x, y):\n return x - y", "def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])", "def __sub__(self,other):\n return Vector(self.x - other.x, self.y-other.y)\n pass", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def _subVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] - X2[i] for i in range(len(X1))]", "def sub(a,b):\r\n return a-b", "def subtractVector(self, subtrahend):\n result = self.addVector(subtrahend.scalarMultiplication(-1.0))\n return result", "def test__vector_subtraction__given_two_vectors__return_correct_vector():\n assert Vector((0, 1, 2)) - Vector((3, 4, 5)) == Vector((-3, -3, -3))", "def pairwise_sub(a, b):\n return [a[i]-b[i] for i in xrange(0, min(len(a), len(b)))]", "def subtract(first, second):\n return first - second", "def test_sub_with_vec_argument(self):\n\n a = Vec3(2, 4, 6)\n b = Vec3(1, 2, 3)\n\n result = a - b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(result, expected_result)", "def sub(o1, o2):\n return o1-o2", "def __sub__(self, other):\n if isinstance(other, Vector):\n a = self._ar - other._ar\n else:\n a = self._ar - numpy.array(other)\n return Vector(a)", "def substract(self, vector):\n\n # return (self.from_list([vector.vector[self.vector.index(x)]-x for x in\n # self.vector]))\n return Vector(self.x - vector.x, self.y - vector.y, self.z - vector.z)", "def subtract_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] - vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def __sub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n return self.from_points(\n a - b for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a - b for a in self)", "def sub(self,v2): \n n = len(self.a)\n m = len(v2.a)\n c = []\n if n != m:\n print(\"Incompatible Types\")\n return\n\n for i in range(n):\n c.append(self.a[i]-v2.a[i])\n\n return c", "def sub(x, y):\r\n\r\n return x + (-y)", "def subtract_vectors(u, v):\n return u[0] - v[0], u[1] - v[1], u[2] - v[2]" ]
[ "0.8428072", "0.8130589", "0.81168246", "0.80479693", "0.7928738", "0.7876339", "0.78062", "0.7775048", "0.77688444", "0.7703792", "0.7700997", "0.7661297", "0.76243055", "0.76243055", "0.76243055", "0.7616531", "0.7509781", "0.7507299", "0.7501865", "0.7447214", "0.7437817", "0.74181235", "0.7406448", "0.73655885", "0.73607874", "0.7325287", "0.73193175", "0.72883755", "0.72571737", "0.72237587" ]
0.8258668
1
Test that a valid message is sent to a valid webhook.
def test_valid_webhook(self, mock_send): send_notification("valid_webhook", self.message) mock_send.assert_called()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_message(self):\n messaging = {\n 'sender': {'id': '1331235'},\n 'recipient': {'id': '1111111'},\n 'message': {'text': 'Hello world.'}\n }\n event = self.create_message_event(messaging)\n c = Client()\n response = c.post(self.webhook, data=event, content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def test_webhook_empty_event(self):\n event = {\n 'body': json.dumps({})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))", "def test_validation(self):\n challenge = \"challenge-string\"\n data = {\n 'hub.mode': 'subscribe',\n 'hub.verify_token': settings.VERIFY_TOKEN,\n 'hub.challenge': challenge\n }\n c = Client()\n response = c.get(self.webhook, data=data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.content, 'utf-8'), challenge)", "def test_good_signature(post_data, expected_error_message, settings, rf):\n app_key = '123appkey'\n request_signature = compute_request_signature(app_key, post_data)\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, True)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n post_data,\n content_type='application/json',\n HTTP_X_GAPI_SIGNATURE=request_signature)\n\n response = view(request)\n if expected_error_message is None:\n assert response.status_code == 200\n else:\n assert response.status_code == 400\n assert response.content == expected_error_message", "def test_bot_message():\n send_json_message_to_bot(request.get_json())\n return \"ok\"", "def test_postMessage(self): #GIVEN the appropriate environment variables are configured\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n status = testBot.postMessage('Zygium') #WHEN the bot posts a message\n self.assertTrue(status == 202) # a status code of 202 should be returned", "def test_bad_signature(fail_on_mismatch, settings, rf):\n app_key = '123appkey'\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, fail_on_mismatch)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n GOOD_EVENT_LIST_JSON,\n content_type='application/json')\n\n response = view(request)\n if fail_on_mismatch:\n assert response.status_code == 400\n assert response.content == ErrorMessages.INVALID_SIGNATURE\n else:\n assert response.status_code == 200", "def test_post_empty_data(self):\n response = self.app.post('/_ah/push-handlers/receive_message')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.body, \"No request body received\")\n self.assertRaises(ValueError)", "def test_validate_post(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_post_invalid(self):\n sender = UserFactory()\n data = {\n 'senderId': sender.id,\n 'recipientId': 999,\n 'text': '...'\n }\n\n response = self.client.post(\n reverse('messages:list'),\n content_type='application/json',\n data=data,\n )\n self.assertEqual(400, response.status_code)", "async def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count", "async def test_send(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n http_message = HttpMessage(\n dialogue_reference=(\"\", \"\"),\n target=0,\n message_id=1,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"/\",\n headers=\"\",\n body=\"\",\n version=\"\",\n )\n envelope = Envelope(\n to=\"addr\",\n sender=\"my_id\",\n message=http_message,\n )\n with patch.object(self.webhook_connection.logger, \"warning\") as mock_logger:\n await self.webhook_connection.send(envelope)\n await asyncio.sleep(0.01)\n mock_logger.assert_any_call(\n RegexComparator(\n \"Dropping envelope=.* as sending via the webhook is not possible!\"\n )\n )", "def test_command_trigger_webhook_post(self):\n pass", "def test_slackWH_send_badAuth(get_slackwebhook):\n s = get_slackwebhook\n s.url = 'https://hooks.slack.com/services/badAuthCreds'\n with pytest.raises(MessageSendError):\n s.send()", "def test_uptimerobot_invalid_payload_with_missing_data(self) -> None:\n self.url = self.build_webhook_url()\n payload = self.get_body(\"uptimerobot_invalid_payload_with_missing_data\")\n result = self.client_post(self.url, payload, content_type=\"application/json\")\n self.assert_json_error(result, \"Invalid payload\")\n\n expected_message = MISCONFIGURED_PAYLOAD_ERROR_MESSAGE.format(\n bot_name=self.test_user.full_name,\n support_email=FromAddress.SUPPORT,\n ).strip()\n\n msg = self.get_last_message()\n self.assertEqual(msg.content, expected_message)\n self.assertEqual(msg.recipient.type, Recipient.PERSONAL)", "def test_incorrect_token_post(self): \n request = self.build_request(token=\"incorrect_token\")\n response = self.app.post('/_ah/push-handlers/receive_message',json.dumps(request).encode('utf-8'),content_type=\"application/json\")\n self.assertEqual(response.status_int, 200)\n self.assertRaises(ValueError)", "async def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "def test_webhook_bad_signature(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Invalid Travis CI webhook signature for status update %d.'\n % self.status_update.pk)", "def test_save_check_data(client):\n\n del proto_reminder['message']\n res = client.post('/api/reminders', json=proto_reminder)\n assert res.status_code == 400", "def post_message(webhook_url, message):\n try:\n r = requests.post(webhook_url, json=message)\n return (True, 'success')\n except Exception as e:\n return (False, 'Encountered exception:\\n' + render_exception(e))", "def test_invalid_form_message(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': ''})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))", "def test_invalid_form_message(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))", "def test_webhook_unkown_action(self):\n event = {\n \"body\": json.dumps({\n \"queryResult\": {\n \"action\": \"1manage_bmi\"\n }})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))", "def test_webhook_build_error(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n 'build_url': 'https://example.com/build',\n 'state': 'failed',\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 200)\n\n self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)\n self.assertEqual(self.status_update.url, 'https://example.com/build')\n self.assertEqual(self.status_update.state,\n StatusUpdate.DONE_FAILURE)", "async def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "async def test_mailgun_webhook_event_with_correct_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_with_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "async def test_receive_post_ok(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n payload = {\"hello\": \"world\"}\n call_task = self.loop.create_task(self.call_webhook(\"test_topic\", json=payload))\n envelope = await asyncio.wait_for(self.webhook_connection.receive(), timeout=10)\n\n assert envelope\n\n message = cast(HttpMessage, envelope.message)\n dialogue = self.skill_dialogues.update(message)\n assert dialogue is not None\n assert message.method.upper() == \"POST\"\n assert message.body.decode(\"utf-8\") == json.dumps(payload)\n await call_task", "async def test_webhook_endpoint_unauthorized_update_doesnt_generate_telegram_text_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n unauthorized_update_message_text,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_text\")\n\n response = await client.post(\n TELEGRAM_WEBHOOK_URL, json=unauthorized_update_message_text\n )\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure any events would have fired\n await hass.async_block_till_done()\n\n assert len(events) == 0", "def test_webhook_no_env(self):\n payload = json.dumps({})\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(rsp.content, b'Got event without an env in config.')" ]
[ "0.73162293", "0.72650135", "0.70089185", "0.6989183", "0.6965128", "0.6857879", "0.68053627", "0.67763484", "0.6760223", "0.6692867", "0.66774166", "0.66758436", "0.665029", "0.66362166", "0.66284573", "0.65982944", "0.65941006", "0.6593365", "0.65475965", "0.6529135", "0.652329", "0.6470273", "0.64480865", "0.6419158", "0.6418783", "0.6416242", "0.6406553", "0.63935274", "0.63690674", "0.6365155" ]
0.7538486
0
Sets up biosafe and stores it as an object variable.
def setup_biosafe(self): # Generate dummy data in the right format species_presence = pd.DataFrame( np.random.randint(2, size=len(self.links_law)), columns=['speciesPresence'], index=self.links_law.index) ecotope_area = pd.DataFrame( np.ones(len(self.links_eco2.columns)-1) * 1e5, columns = ['area_m2'], index = self.links_eco2.columns.values[0:-1]) # Simplify ecotope tables to VR ecotopes unique_eco = np.unique( np.hstack((self.vr_eco.ecotope1.values, self.vr_eco.ecotope2.values))) links_eco3 = self.links_eco2.reindex(columns=unique_eco) ecotope_area = ecotope_area.reindex(index=unique_eco) # Run a first version of Biosafe self.bsf_model = bsf.biosafe( self.legal_weights, self.links_law, links_eco3, species_presence, ecotope_area) #PotTax = self.bsf_model.TFI() #PotAll = self.bsf_model.FI() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self):\n self.ae = None", "def __init__(self, barcamp, handler):\n self.barcamp = barcamp\n self.handler = handler\n self.app = self.handler.app\n self.config = self.handler.app.config\n self.user = self.handler.user", "def setup():\n global zb\n # Signal handler (Ctrl+C exit)\n signal.signal(signal.SIGINT, signal_handler) \n # DBus\n session_bus = dbus.SessionBus()\n objXBZB = session_bus.get_object(PROTOCOL_BUS_NAME, PROTOCOL_OBJ_PATH + \"/\" + XBEE_ZB + \"/\" + SOCKET0)\n zb = dbus.Interface(objXBZB, dbus_interface=PROTOCOL_BUS_NAME)", "def basetype_setup(self):\n # the text encoding to use.\n self.db.encoding = \"utf-8\"\n # A basic security setup\n lockstring = \"examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:false()\"\n self.locks.add(lockstring)\n # set the basics of being a bot\n self.cmdset.add_default(BotCmdSet)\n script_key = \"%s\" % self.key\n self.scripts.add(BotStarter, key=script_key)\n self.is_bot = True", "def setup(self):\n self.config = pau.IConfig\n self.session = pau.ISession\n pau.resolve(self)\n\n self.session.assets = Assets()\n self.config.db = self.db_name\n\n self.db = pau.IDb\n pau.resolve(self)\n\n # Instance\n i = Setup()\n pau.resolve(i)\n return i", "def __init__(self):\n # Get a weboob instance\n self.weboob = Weboob()\n self.backend = None", "def setup(client):\n\n client.add_cog(Faq(client))\n print(\"\\tLoaded Faq cog!\")", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def _setup(app_obj):", "def init():", "def setup( self ):", "def memb_init(self):\n self.initialize()", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def boot(self):\n\n pass", "def init(self, sevabot):\n\n logger.debug(\"GiantbombHandler init\")\n self.sevabot = sevabot\n self.skype = sevabot.getSkype()", "def initialise(self):\n self.set_up()", "def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']", "def __init__(self):\n self.load()", "def setup(app):\n # Register builder.\n app.add_builder(BeamerBuilder)\n\n # Add setting for allowframebreaks.\n app.add_config_value(\"beamer_allowframebreaks\", True, \"beamer\")\n # Add setting for Beamer theme.\n app.add_config_value(\"beamer_theme\", \"Warsaw\", \"beamer\")\n # Adjust titles upon doctree-resolved.\n app.connect(\"doctree-resolved\", adjust_titles)\n\n return {\n \"version\": \"1.0\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"52 Card Trick\")\n self.CardSet = CardSet(self)", "def __init_euca(self):\n if self.euca:\n return\n self.euca = Euca2ool()", "def setup_game(self):", "def _setup(self) -> None:\n\t\treturn", "def _init_objects(self) -> None:\n self.position = selectors.get_position(self.exchange, self.symbol)\n self.broker = Broker(self.position, self.exchange, self.symbol, self.timeframe)\n\n if self.hp is None and len(self.hyperparameters()) > 0:\n self.hp = {}\n for dna in self.hyperparameters():\n self.hp[dna['name']] = dna['default']", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def setup(self) -> None:", "def init():\n try:\n compile_contract(\n \"fishcake\", f\"Fishcake(sp.address('{pub_key_hash}'),{default_supply})\")\n fishcake_addr = deploy(\"fishcake\")\n print(\"\\n\")\n compile_contract(\n \"fishcakeBox\", f\"FishcakeBox({default_redeem_amt}, sp.address('{fishcake_addr}'))\")\n fishcake_box_addr = deploy(\"fishcakeBox\")\n setup(fishcake_addr, fishcake_box_addr)\n print(\"\\n\\n[!] Details :\\n\")\n print(f\"-- Fishcake Token Address : {fishcake_addr}\")\n print(f\"-- Fishcake Box Address : {fishcake_box_addr}\")\n except Exception as e:\n print(\"Failed to originate Contracts : \", e)", "def setUpClass(cls):\n cls.user = User()\n cls.user.first_name = \"Kev\"\n cls.user.last_name = \"Yo\"\n cls.user.email = \"[email protected]\"\n cls.storage = FileStorage()\n cls.console = HBNBCommand()", "def __init__(self, temboo_session):\n super(Image, self).__init__(temboo_session, '/Library/Freebase/Image')" ]
[ "0.56603616", "0.5621632", "0.55927515", "0.5504273", "0.53797036", "0.5317156", "0.5243224", "0.52396697", "0.52148014", "0.5166802", "0.51143235", "0.51060176", "0.50999767", "0.50967854", "0.50958705", "0.50950307", "0.50940466", "0.5094038", "0.50789654", "0.5071926", "0.50698096", "0.5066861", "0.5062106", "0.5058001", "0.50500965", "0.50406635", "0.50365543", "0.5031632", "0.5030729", "0.50294065" ]
0.6278834
0
Calculate the total area of all ecotopes on the playing board.
def ecotope_area_sums(self, board): # clean up the input and merge into a single dataframe cols = ['geometry', 'z_reference', 'landuse', 'biosafe'] board_clean = board.loc[board.biosafe, cols] board_eco = pd.merge(board_clean, self.vr_eco, on=['z_reference', 'landuse']) # optional: output gdf to shp # gdf = board_eco.copy() # gdf['biosafe'] = gdf.biosafe.values.astype('int') # gdf.to_file('board_eco.shp') # calculate the total area of all columns # note: landuse-z_reference combinations not in vr_ecotopes are # excluded area_eco1 = board_eco.groupby('ecotope1').sum() area_eco2 = board_eco.groupby('ecotope2').sum() area_fractions = pd.concat([area_eco1.fraction1, area_eco2.fraction2], axis=1, sort=True) area_total = area_fractions.fillna(0).sum(axis=1).reset_index() area_total.columns = ['ecotope', 'area_m2'] # assert that that total area of the ecotopes matches the biosafe # hexagons try: assert int(area_total.sum().area_m2) == int(board_clean.shape[0]),\ ("ERROR: There appears to be one or more polygons that is not " + "detected correctly, resulting in a missmatch of the VR ecotopes") except AssertionError as error: print(error) pass area_out = area_total.set_index('ecotope') area_out.index.name=None return area_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def total_area(self) :\n area = 0\n for i in self.residues :\n area += i.solvent_acc_area\n return area", "def calculatearea(self):\r\n return self.width * self.height", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def calculate_area(building, pixel_size=1):\n return len(building.points) * (pixel_size**2)", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def total_area(self):\n return self._total_area", "def area(self):\n area = self.__size * self.__size\n return(area)", "def area(self):\n area = self.__size * self.__size\n return area", "def area(self):\n\t\treturn self.height * self.height", "def area(self):\n return self.__size ** 2", "def getArea(self):\r\n return np.sum(self.array[:])", "def area(self):\n\t\treturn self.width * self.height", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n\t\treturn self.width() * self.height()", "def area(self, by_spec=False):\n if by_spec:\n cell_area = {}\n for element in self.elements:\n element_area = element.area(True)\n for ll in element_area.keys():\n if ll in cell_area:\n cell_area[ll] += element_area[ll]\n else:\n cell_area[ll] = element_area[ll]\n else:\n cell_area = 0\n for element in self.elements:\n cell_area += element.area()\n return cell_area", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)" ]
[ "0.72191346", "0.69285345", "0.6818615", "0.6791541", "0.6719441", "0.6589277", "0.6585048", "0.6579154", "0.65724313", "0.65674067", "0.6542835", "0.651549", "0.6497927", "0.64875937", "0.64872205", "0.6446514", "0.6436132", "0.6436132", "0.6436132", "0.6436132", "0.6428201", "0.6425329", "0.6424015", "0.6424015", "0.6424015", "0.6424015", "0.64212507", "0.64013994", "0.64013994", "0.64013994" ]
0.7589144
0
Function that calculates the biodiversity score based on the Biosafe output. the numbers 29.33 and 1.4349 follow from running MC simulations to determine the lowest and highest possible scores. The biodiversity score reflects the 0100% range between the two.
def set_score(self): if self.PotTax_intervention is None: if self.PotTax_reference is not None: self.score = (((self.PotTax_reference.sum().TFI - 29.33) / 1.4349) / 100) else: print("There is no Biosafe output to score") return else: self.score = (((self.PotTax_intervention.sum().TFI - 29.33) / 1.4349) / 100) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query):\n\tstart_time = time.time()\n\n\t#Load necessary data\n\t\"\"\"\twith open ('../../../Data/percentagesDict.pickle', 'rb') as f:\n\t\tpercentage_data = pickle.load(f)\n\n\twith open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f:\n\t\tsnack_data = pickle.load(f)\"\"\"\n\n\t#Set constants\n\tLOW_FAT = .3\n\tHIGH_FAT = .6\n\tLOW_CARB = .1\n\tHIGH_CARB = .2\n\tLOW_PRO = .2\n\tHIGH_PRO = .4\n\n\t#Convert macro percentages to 'high', 'med', 'low' categories\n\tfat = percentage_data[snack]['fat']\n\tprotein = percentage_data[snack]['protein']\n\tcarb = percentage_data[snack]['carb']\n\n\tif fat > HIGH_FAT:\n\t\tfat_content = 'high'\n\telif fat < LOW_FAT:\n\t\tfat_content = 'low'\n\telse:\n\t\tfat_content = 'med'\n\n\tif protein > HIGH_PRO:\n\t\tprotein_content = 'high'\n\telif protein < LOW_PRO:\n\t\tprotein_content = 'low'\n\telse:\n\t\tprotein_content = 'med'\n\n\tif carb > HIGH_CARB:\n\t\tcarb_content = 'high'\n\telif carb < LOW_CARB:\n\t\tcarb_content = 'low'\n\telse:\n\t\tcarb_content = 'med'\n\n\t#Set x values\n\tx1 = fat_query == fat_content\n\tx2 = carb_query == carb_content\n\tx3 = protein_query == protein_content\n\tx4 = cooccur(snack_data, snack, snack_query) \n\tx5 = snack_data[snack]['rating']\n\n\tw1 = 1\n\tw2 = 1\n\tw3 = 1\n\tw4 = 1\n\tw5 = 1\n\t\n\t#print('x1: {}, x2: {}, x3: {}, x4: {}, x5: {}'.format(x1, x2, x3, x4, x5))\n\t#print(\"get_score() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\n\treturn w1*x1 + w2*x2 + w3*x3 + w4*x4 + w5*x5", "def get_score(self, a, b):\n ### FILL IN ###", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))", "def prob5(file = 'crime_data.csv'):\n #Question one\n data = pd.read_csv(file)\n my_list = data.columns[(data.mean()>1500000)][2:]\n \n #Get the correlation between the three crimes\n corr = data[my_list].corr()\n prop_max = data['Property'].max()\n larc_max = data[\"Larceny\"].max()\n ans1 = 'Property'\n \n #Question 2 get the ammount of aggravated assaults\n new_data = data[data['Year']>=2000]\n new_data = new_data.sort_values('Murder',ascending=True)\n agg_as = new_data['Aggravated Assault']\n agg_as = agg_as[agg_as>850000]\n ans2 = agg_as.values\n \n #Question 3 get the highest year of crime and get the percentage of that\n S = 10\n N = int(len(data)/S)\n \n #Split the decades\n frames = [ data.iloc[i*S:(i+1)*S].copy() for i in range(N+1) ]\n dec_crime = []\n for dec in frames:\n dec_crime.append(dec['Total'].mean())\n \n #Get the highest crime and its percentage of the total\n my_dec = frames[np.argmax(dec_crime)]\n my_crimes = ['Violent','Property','Murder','Forcible Rape','Robbery','Aggravated Assault','Burglary','Larceny','Vehicle Theft']\n high_crime = my_dec[my_crimes].mean().idxmax()\n ans3 = float(my_dec[high_crime].mean()/my_dec['Total'].mean())\n return(ans1,ans2,ans3)", "def disp_score():", "def __calculate_ethnic_diversity_score(project: dict, student: dict) -> int:\n # project_name = project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"Calculating ethnic pairing score for: Project({}) - Student({})\".format(project_name, student_name))\n\n # Get the ethnicities specified by the student\n student_ethnicities = student[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, None)\n if not student_ethnicities:\n # The student didn't specify ethnicities, so we can't calculate a score\n return 0\n\n # Get the list of current assignments for the project team\n team_assignments = __get_team_assignments(project)\n\n # This list will hold the list of ethnicities on the team\n team_ethnicities = []\n for assignment in team_assignments:\n assigned_student_ethnicities = assignment.student[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, None)\n\n if assigned_student_ethnicities:\n team_ethnicities.append(assigned_student_ethnicities)\n\n # Team ethnicities is going to be a list of lists, so let's flatten it\n team_ethnicities = [item for sublist in team_ethnicities for item in sublist]\n\n # ================================================================================================================\n # Get the count ethnicities for the already assigned students\n ethnicity_counter = __get_ethnicity_counter()\n ethnicity_counter.update(team_ethnicities)\n\n # Check each of the student's listed ethnicities and take the highest score\n best_ethnicity_score = 0\n for student_ethnicity in student_ethnicities:\n matching_ethnicity_count = ethnicity_counter.get(student_ethnicity)\n\n current_ethnicity_score = 0\n\n if matching_ethnicity_count == 0:\n # This is good, as it will make the team more diverse\n current_ethnicity_score = SURVEY_BASE_ETHNICITY_WEIGHT\n elif matching_ethnicity_count == 1:\n # This is better, as it will pair students with like ethnicities\n current_ethnicity_score = SURVEY_BASE_ETHNICITY_WEIGHT * 2\n\n # Check to see if this is a better match\n if current_ethnicity_score > best_ethnicity_score:\n best_ethnicity_score = current_ethnicity_score\n\n return best_ethnicity_score", "def bridge_score(bridge):\n return (bridge_strength(bridge), len(bridge))", "def University_calculation(jobid):\r\n min_cgpa=90\r\n \"\"\"~~~~~~~~~\"\"\"\r\n dbconnect= connect_to_db()\r\n Candidate_qualifications=pd.read_sql(\"select candidate_id,university_name,institute_name,aggregate from candidate_qualification where candidate_id in(select candidate_id from master_id where job_id=\"+str(jobid)+\")\",con=dbconnect)\r\n College_data=pd.read_sql(\"select * from college_tiers\",con=dbconnect)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 0 if x<0 or x>100 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: x*10 if 5<x<10 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 25*x if 0<x<4 else x)\r\n\r\n def Aggregate():\r\n Unique_candids=Candidate_qualifications[[\"candidate_id\",\"aggregate\"]].groupby(\"candidate_id\").mean()\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:x-min_cgpa)\r\n minval=min(Unique_candids[\"aggregate\"])\r\n maxval=max(Unique_candids[\"aggregate\"])\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:(x-minval)*100/(maxval-minval))\r\n Unique_candids=Unique_candids.reset_index()\r\n return Unique_candids\r\n \r\n def University_name():\r\n stop_words=[\"of\",\"on\",\"for\",\"the\",\"&\",\"and\"]\r\n unique_candids=list(np.unique(Candidate_qualifications[\"candidate_id\"]))\r\n candidate_univdict={}\r\n for i in unique_candids:\r\n candidate_univdict[i]=Candidate_qualifications[[\"university_name\",\"institute_name\"]][Candidate_qualifications[\"candidate_id\"]==i].values.tolist()\r\n candidate_univdict={k:list(map(lambda x:list(filter(lambda y:str(y).strip() not in[\"\",\"None\"],x)),v)) for k,v in candidate_univdict.items()}\r\n candidate_univdict={k: np.unique(list(itertools.chain.from_iterable(v))).tolist() for k,v in candidate_univdict.items()}\r\n for i in candidate_univdict.keys():\r\n for j in candidate_univdict[i]:\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"College\"]==j])[0]\r\n continue;\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"Ancronym\"]==j])[0]\r\n continue;\r\n else:\r\n Processed_collegedata=pd.DataFrame(College_data[\"College\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words]))\r\n Processed_collegedata[\"Ancronym\"]=College_data[\"Ancronym\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words])\r\n val=[w for w in re.sub(\"[\\.-]\",\" \",j.lower()).split() if w not in stop_words]\r\n Processed_collegedata[\"College\"]=Processed_collegedata[\"College\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n Processed_collegedata[\"Ancronym\"]=Processed_collegedata[\"Ancronym\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n maxval=Processed_collegedata.max().idxmax()\r\n if Processed_collegedata[maxval].idxmax()>0.5:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=College_data.ix[Processed_collegedata[maxval].idxmax(),\"Tier\"]\r\n else:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=3\r\n \r\n candidate_univdict={k:100/min(v) for k,v in candidate_univdict.items() if len(v)>0}\r\n College_score=pd.DataFrame.from_dict(candidate_univdict,orient=\"index\")\r\n College_score=College_score.reset_index()\r\n College_score.columns=[\"candidate_id\",\"Tier_score\"]\r\n return College_score\r\n result=pd.merge(Aggregate(),University_name(),how=\"outer\",on=\"candidate_id\")\r\n result=pd.merge(pd.DataFrame(np.unique(Candidate_qualifications[\"candidate_id\"]),columns=[\"candidate_id\"]),result,how=\"left\",on=\"candidate_id\")\r\n result=result.fillna(0)\r\n return result", "def bleu_score(ref_file, hyp_file):\n command = 'perl scripts/multi-bleu.pl ' + ref_file + ' < ' + hyp_file\n c = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n details, error = c.communicate()\n details, error = details.decode('utf-8'), error.decode('utf-8')\n if not details.startswith('BLEU ='):\n raise ValueError('Error in BLEU score computation:\\n%s' % error)\n else:\n BLEU_str = details.split(' ')[2][:-1]\n BLEU = float(BLEU_str)\n return BLEU, details", "def get_risk_profile_bas_scores(self):\n answers = self.current_risk_profile_responses\n if not answers:\n return None\n\n scores = (answers.values('b_score', 'a_score', 's_score').aggregate(b_score=Sum('b_score'),\n a_score=Sum('a_score'),\n s_score=Sum('s_score')))\n\n extents = (\n RiskProfileAnswer.objects.filter(question__group=self.risk_profile_group)\n .values('question').annotate(\n min_b=Min('b_score'), max_b=Max('b_score'),\n min_a=Min('a_score'), max_a=Max('a_score'),\n min_s=Min('s_score'), max_s=Max('s_score'),\n ).aggregate(\n min_b_sum=Sum('min_b'), max_b_sum=Sum('max_b'),\n min_a_sum=Sum('min_a'), max_a_sum=Sum('max_a'),\n min_s_sum=Sum('min_s'), max_s_sum=Sum('max_s'),\n )\n )\n\n max_b = extents['max_b_sum']\n max_a = extents['max_a_sum']\n max_s = extents['max_s_sum']\n return (\n scores['b_score'] / max_b if max_b > 0 else 0,\n scores['a_score'] / max_a if max_a > 0 else 0,\n scores['s_score'] / max_s if max_s > 0 else 0,\n )", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def stateQualtityScore(roomba):\n return 0", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def worst_score(self):\r\n pass", "def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score", "def score(A, B):\n assert 10 <= A <= 100 and 10 <= B <= 100 # you can't get too slow or too fast\n trackA = 100 - B\n trackB = 100 - A\n tA = trackA/A\n tB = trackB/B\n return tB - tA", "def muc_scores(self):\n A_card, B_card = self.shape\n V_card = len(self)\n N = self.grand_total\n\n recall = _div(N - V_card, N - A_card)\n precision = _div(N - V_card, N - B_card)\n fscore = hmean(recall, precision)\n return precision, recall, fscore", "def viterbi_score(confusion_networks):\n for confusion_network in confusion_networks:\n prev, score = [-infinity] * len(confusion_network), [-infinity] + [0.0] * len(confusion_network)\n for t in range(0, len(confusion_network)): # t: words in the sentence (\"bfs\")\n prev, score = score, prev\n for j in range(0, len(confusion_network[t])): # Iterates deep-first in a CN position (\"dfs\")\n score[j] = max([prev[i] +\n confusion_network[i][j][2]\n for i in range(0, len(confusion_network[t]))])\n return max([score[i] for i in range(1, len(confusion_network[t]))])", "def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)", "def NormalizePhenotypeScore(score, max_score):\n return float(score) / float(max_score) * 100", "def calculate_bleu(output_lns, refs_lns):\n return round(corpus_bleu(output_lns, [refs_lns]).score, 4)", "def question2():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_seq = \"HSGVNQLGGVFVNGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATPEVVSKIAQYKRECPSIFAWEIRDRLLSEGVCTNDNIPSVSSINRVLRNLASEKQQ\"\n frfly_seq = \"HSGVNQLGGVFVGGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATAEVVSKISQYKRECPSIFAWEIRDRLLQENVCTNDNIPSVSSINRVLRNLAAQKEQQ\"\n consensus_pax = read_protein(CONSENSUS_PAX_URL)\n \n # compute human and fruitfly global alignment matrix with consensus pax\n human_align_matrix = student.compute_alignment_matrix(human_seq, consensus_pax, score_matrix, True)\n frfly_align_matrix = student.compute_alignment_matrix(frfly_seq, consensus_pax, score_matrix, True)\n \n # compute human and fruitfly global alignment sequences\n score_human, human_align, consensus_align = student.compute_global_alignment(human_seq, consensus_pax, \n score_matrix, human_align_matrix)\n score_fly, frfly_align, consensus_align_2 = student.compute_global_alignment(frfly_seq, consensus_pax,\n score_matrix, frfly_align_matrix)\n \n # compute percentages match for human and fruitfly\n human_count = 0.0\n for index in range(len(human_align)):\n if human_align[index] == consensus_align[index]:\n human_count += 1\n \n frfly_count = 0.0\n for index in range(len(frfly_align)):\n if frfly_align[index] == consensus_align_2[index]:\n frfly_count += 1\n \n print \"% Human: \" + str(human_count / len(human_align) * 100)\n print \"Hmn: \" + human_align\n print \"PAX: \" + consensus_align\n \n print \"\"\n \n print \"% FrFly: \" + str(frfly_count / len(frfly_align) * 100)\n print \"Fly: \" + frfly_align\n print \"PAX: \" + consensus_align_2", "def define_score(self, votes_string):\n\t\t#2*REW + colleagues + post-doctorate associate + 2* JBW\n\t\tvotes = [int(x) for x in votes_string] \n\t\tweights = [2,1,1,2]\n\t\tscore = 0\n\t\tfor i in range(0, 4):\n\t\t\tif votes[i] >= 0 and votes[i] <= 2:\n\t\t\t\tscore += votes[i]*weights[i]\n\t\treturn score" ]
[ "0.6018452", "0.589307", "0.58814985", "0.58546895", "0.5849375", "0.5819225", "0.57758874", "0.5774465", "0.573999", "0.571513", "0.56816", "0.56671625", "0.56617856", "0.5648553", "0.5645618", "0.56252235", "0.55905104", "0.5585876", "0.5570236", "0.55351394", "0.550514", "0.5499441", "0.549548", "0.54920506", "0.54892886", "0.54666096", "0.5452374", "0.5436716", "0.5432589", "0.5422339" ]
0.5997721
1
Like `Flask.app.route` but takes only a function that returns HtmlSanitizedStr
def safe_route(app: Flask, rule, **options) -> Callable[[RouteFunction], None]: original_decorator = app.route(rule, **options) def decorator(fn: RouteFunction): return original_decorator(compose(str, fn)) # type: ignore return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_html(func):\n\n cleaner = re.compile(\"<.*?>\")\n def new_func(*args, strip_html=False, **kwargs):\n name = func(*args, **kwargs)\n if strip_html:\n if isinstance(name, str):\n return html.unescape(re.sub(cleaner, \"\", name))\n elif isinstance(name, list) or isinstance(name, tuple):\n return type(name)([html.unescape(re.sub(cleaner, \"\", n)) for n in name])\n else:\n return name\n new_func.__name__ = func.__name__\n new_func.__doc__ = func.__doc__\n return new_func", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def sanitize(text):\n try:\n from airy.core import sanitizer\n return smart_unicode(sanitizer.clean_html(text))\n except ImportError:\n logging.error(\"You need html5lib in order to use sanitize\")\n return \"ERROR: You need html5lib in order to use sanitize\"", "def rest2html(s):\n return core.publish_string(s, writer=html_fragment_writer)", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def raw():\n return redirect(url_for('render.render', raw='true', **request.args))", "def wsgiapp():\n def decorator(func):\n def wsgiapp_wrapper(*args):\n # we get 3 args when this is a method, two when it is\n # a function :(\n if len(args) == 3:\n environ = args[1]\n start_response = args[2]\n args = [args[0]]\n else:\n environ, start_response = args\n args = []\n def application(environ, start_response):\n form = request.parse_formvars(environ,\n include_get_vars=True)\n status = '200 OK'\n form['environ'] = environ\n try:\n res = func(*args, **form.mixed())\n except ValueError, ve:\n status = '500 Server Error'\n res = '<html>There was an error: %s</html>' % \\\n html_quote(ve)\n start_response(status, [('content-type', 'text/html')])\n return [res]\n app = simplecatcher(application)\n return app(environ, start_response)\n wsgiapp_wrapper.exposed = True\n return wsgiapp_wrapper\n return decorator", "def sanitize(string):\n from html5lib import parseFragment, serialize\n\n parsed = parseFragment(string)\n clean = serialize(parsed, sanitize=True, omit_optional_tags=False,\n quote_attr_values='always')\n return clean", "def form(s):\r\n \r\n # removes leading and trailing apostrophe's from string\r\n s = s.strip(\"'\")\r\n \r\n # converts HTML hex back to characters\r\n s = s.replace(\"&#39;\", \"'\")\r\n s = s.replace(\"&#8217;\", \"’\")\r\n s = s.replace(\"&#8216;\", '\"')\r\n s = s.replace(\"&#8221;\", \"'\")\r\n s = s.replace(\"&#8220;\", \"'\")\r\n \r\n # success\r\n return s", "def wsgiapp(self):\n def wrapped(environ, start_response):\n \"\"\"wsgi application function\"\"\"\n start_time = time.clock()\n req = Request(environ)\n res = Responder(start_response, environ, self.mylookup, start_time)\n \n \n found_matches = None\n route = {}\n for reg, route in self.routes:\n found_matches = re.match(route['regex'], req.path)\n if found_matches and meetsreqs(req, route['reqs']):\n break\n else:\n return ''\n bindings = route['kwargs']\n for part in route['parts']:\n if len(part) == 2:\n bindings[part[0]] = part[1]\n for part in xrange(len(found_matches.groups())):\n if found_matches.group(part+1):\n partname = route['parts'][part][0]\n bindings[partname] = found_matches.group(part+1)\n\n return str(route['function'](res, **dict(bindings)))\n\n return wrapped", "def _on_raw(func_name):\n\n def wrapped(self, *args, **kwargs):\n args = list(args)\n try:\n string = args.pop(0)\n if hasattr(string, \"_raw_string\"):\n args.insert(0, string.raw())\n else:\n args.insert(0, string)\n except IndexError:\n # just skip out if there are no more strings\n pass\n result = getattr(self._raw_string, func_name)(*args, **kwargs)\n if isinstance(result, str):\n return ANSIString(result, decoded=True)\n return result\n\n return wrapped", "def make_html_safe(s):\n return s.replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")", "def any_string_method(request):\n return request.param", "def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result", "def csrf_protection(fn):\n def protected(*args):\n if 'X-Requested-With' in request.headers:\n return fn(*args)\n else:\n return \"X-Requested-With header missing\", HTTPStatus.FORBIDDEN\n return protected", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def _sanitize_function(self, func_msg):\n if func_msg is not None:\n func = str(func_msg)\n else:\n func = None\n return func", "def escape(input):\n # first correct the HTML\n output=html(input)\n # print \"HTML is: %s\" % output\n # then escape it\n output=atpic.cleaner_escape.escape(output)\n # print \"ESCAPD is: %s\" % output\n return output", "def urlify_pythonic(text, length):\n return text.rstrip().replace(\" \", \"%20\")", "def sanitize(cls):", "def html_tag(string, input_id, proc):\n return html_simple_element(\n string, \"a\", 'id=\"' + proc + \"_\" + normalise_tag_id(input_id) + '\"'\n )", "def get_html_string(self, **kwargs):\n ...", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def urlify(w, length):\n return w.strip().replace(' ', '%20')", "def assert_clean(data):\n def _ensure_clean(value):\n if value != bleach.clean(value):\n raise ValueError\n\n return escape_html(data)", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def hello():\n return \"<h1 style='color:blue'>Hello There, Gainzzzasasas!</h1>\"", "def sanitize(sensitive_thing):\n sanitized_string = sensitive_thing\n length = len(sensitive_thing)\n if sensitive_thing:\n if \"http\" in sensitive_thing:\n # Split the URL – expecting a Slack (or other) webhook\n sensitive_thing = sensitive_thing.split(\"/\")\n # Get just the last part for sanitization\n webhook_tail = \"\".join(sensitive_thing[-1:])\n length = len(webhook_tail)\n # Construct a sanitized string\n sanitized_string = (\n \"/\".join(sensitive_thing[:-1])\n + \"/\"\n + webhook_tail[0:4]\n + \"\\u2717\" * (length - 8)\n + webhook_tail[length - 5 : length - 1]\n )\n # Handle anything else that's long enough to be a key\n elif length > 15:\n sanitized_string = sensitive_thing[0:4] + \"\\u2717\" * (length - 8) + sensitive_thing[length - 5 : length - 1]\n return sanitized_string", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)" ]
[ "0.60575014", "0.5710384", "0.54833734", "0.54531986", "0.52721405", "0.52415216", "0.5219625", "0.51628417", "0.51594687", "0.5052998", "0.50139946", "0.5011002", "0.5004929", "0.4984093", "0.49426973", "0.49259076", "0.49226424", "0.48769718", "0.48679265", "0.48450527", "0.48097062", "0.48082182", "0.47947422", "0.47917923", "0.47887138", "0.478794", "0.47780767", "0.47674096", "0.47654408", "0.4764804" ]
0.5955667
1
Program entry point. Loads a CSV file of observations Determines how items were exchanged during various rendezvous Prints the exchanges as they happen, if desired Prints the latest owner of a specific item, if desired. Otherwise neatly prints a dictionary mapping suspects to the item they currently own. This program will return an exit code of `1` in one of two
def main(args): # Tuple of carried items and timeline time_tuple = load_timeline(args.observations) # For each Observation in list, calculated final held item for suspectPair in time_tuple[1].rendezvous(): # If user wanted exchanges, print each exchange if args.exchanges: print(suspectPair[0].name + " meets with " + suspectPair[1].name + " to exchange " + time_tuple[0][suspectPair[0].name] + " for " + time_tuple[0][suspectPair[1].name] + ".") # Trades items temp_item = time_tuple[0][suspectPair[0].name] time_tuple[0][suspectPair[0].name] = time_tuple[0][suspectPair[1].name] time_tuple[0][suspectPair[1].name] = temp_item # If no items specified or exchanges is true, # print list of final help items if (args.item == '') or (args.exchanges): pprint.pprint(time_tuple[0], indent=4) # If user specified an item, print who has said item if not args.item == '': for name, i in time_tuple[0].items(): if i == args.item: print(name + " had the " + i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n options = [\"Add\", \"Remove\", \"Update\", \"Oldest person\", \"Persons closest to average\"]\n common_options = [\"Name: \", \"Year: \"]\n file = \"model/hr/persons.csv\"\n title_list = [\"Id\", \"Name\", \"Year\"]\n choice = None\n dont_clear = False\n while choice != '0':\n if not dont_clear:\n os.system(\"clear\")\n table = data_manager.get_table_from_file(file)\n terminal_view.print_table(table, title_list)\n choice = terminal_view.get_choice_submenu(options)\n dont_clear = False\n if choice == '1':\n common.add(file, common_options)\n elif choice == '2':\n common.remove(file)\n elif choice == '3':\n common.update(file, common_options)\n elif choice == '4':\n terminal_view.print_result(hr.get_oldest_person(table), \"Oldest persons:\\n\")\n dont_clear = True\n elif choice == '5':\n msg = \"Persons with age closest to average:\\n\"\n terminal_view.print_result(hr.get_persons_closest_to_average(table), msg)\n dont_clear = True\n else:\n terminal_view.print_error_message(\"There is no such choice.\")", "def run():\n table = hr.get_hr_table_from_file()\n title_list = [\"ID\", \"Name\", \"BirthYear\"]\n options = [\"View records\",\n \"Add record\",\n \"Remove record\",\n \"Update record\",\n \"Which person is the oldest?\",\n \"Which person is the closet to average age?\"]\n\n\n choice = None\n while choice != \"0\":\n choice = terminal_view.get_choice_inner_menu(options, \"HR manager\")\n if choice == \"1\":\n terminal_view.print_table(table, title_list)\n elif choice == \"2\":\n record = terminal_view.get_inputs(title_list[1::],\"Please provide new item data\")\n table = hr.add(table, record)\n elif choice == \"3\":\n id_to_delete_table = terminal_view.get_inputs([\"ID\"],\"Item to delete\")\n id_to_delete = id_to_delete_table[0]\n table = hr.remove(table, id_to_delete)\n elif choice == \"4\":\n records = terminal_view.get_inputs(title_list,\"Edit item\")\n record_id = records[0]\n table = hr.update(table, record_id, records)\n elif choice == \"5\":\n oldest_person = hr.get_oldest_person(table)\n terminal_view.print_result(oldest_person, \"The oldest person: \")\n elif choice == \"6\":\n closest_to_average = hr.get_persons_closest_to_average(table)\n terminal_view.print_result(closest_to_average,\"The closest to average is: \")\n elif choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n\n # open links.csv in order to access IMDB id numbers\n ifile = open('movie-countries.csv', \"rb\")\n reader = csv.reader(ifile)\n \n # writer for csv with countries\n ofile = open('country_stats.csv', \"wb\")\n writer = csv.writer(ofile)\n\n # deal with headers\n reader.next() # skip first line\n writer.writerow(['country', 'number of movies', 'number of primary movies'])\n\n # one dictionary for all mention of a country, one dictionary for if the country was the first one listed\n country_count_dict = {}\n country_count_primary_dict= {}\n\n # iterate through data\n for row in reader:\n # get the countries column\n countries = row[3]\n\n # add to dicionary of countries\n for country in countries.split(\"|\"):\n country_count_dict[country] = country_count_dict.get(country, 0) + 1\n\n # if it's the primary country\n if country == countries.split(\"|\")[0]:\n country_count_primary_dict[country] = country_count_primary_dict.get(country, 0) + 1\n\n # write to the file\n for key, value in country_count_dict.iteritems():\n writer.writerow([key , str(value), country_count_primary_dict.get(key, \"0\")])\n\n ifile.close()\n ofile.close()", "def main():\n\n try:\n people = Parser.read_file(sys.argv[1])\n print(\"\\nResult:\")\n for email, person in people.items():\n print(\"{}: {}\".format(email, person))\n except RuntimeError as error:\n print(error)\n exit(1)", "def main():\n\n # Refer to Problem Set 07 README.md for instructions and tips.\n\n # 6.1: Read in < sh_basic_info.csv >\n\n basic_info = read_csv_file('sh_basic_info.csv')\n\n # 6.2: Create instances of < SuperHeroine >\n\n heroines = {}\n for hero in basic_info:\n heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'],\n hero['eye_color'], hero['hair_color'], hero['base'])\n print(heroines)\n\n # 6.3: Read in < sh_additional_info.csv >\n\n additional_info = read_csv_file('sh_additional_info.csv')\n\n # 6.4: Add powers and nemesis\n\n for row in additional_info:\n name = row[\"Heroine Name\"]\n instance_affected = heroines[name]\n how_affected = row[\"Category\"]\n value = row['Value']\n if how_affected == 'power':\n instance_affected.add_power(value)\n else:\n instance_affected.add_nemesis(value)\n\n # 6.5: Write to file\n\n write_to_file('storm.txt',heroines['Storm'])\n write_to_file('scarlet_witch.txt',heroines['Scarlet Witch'])\n write_to_file('jessica_jones.txt',heroines['Jessica Jones'])", "def main():\n\n csv_file = \"shortlist.csv\"\n team_count = 0\n participant_count = 0\n\n\n #Delete all existing teams and participants from the database.\n Team.objects.all().delete()\n Participant.objects.all().delete()\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = [row for row in reader]\n\n for item in data:\n if item[0]:\n team_count += 1\n\n t = Team.objects.create(\n name=item[0].strip(),\n idea=item[30].strip()\n )\n\n no_of_p = int(item[1])\n print item[1]\n participant_count += no_of_p\n\n p1 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[2].strip() + \" \" + item[3].strip(),\n gender=item[4].strip(),\n college=item[7].strip(),\n email=item[5].strip(),\n phone=str(item[6]),\n team=t\n )\n\n p2 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[11].strip() + \" \" +item[12].strip(),\n gender=item[13].strip(),\n college=item[16].strip(),\n email=item[14].strip(),\n phone=str(item[15]),\n team=t\n )\n\n if no_of_p == 3:\n p3 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[20].strip() + \" \" +item[21].strip(),\n college=item[25].strip(),\n gender=item[22].strip(),\n email=item[23].strip(),\n phone=str(item[24]),\n team=t\n )\n\n print \"{} teams and {} participants imported.\".format(team_count,\n participant_count)", "def main():\n\n # Read the CSV and get its content\n jobOfferList, professionsList = usefulFunctions.readCsv()\n \n # Create an empty output tab with the right number of lines and columns\n finalTab = usefulFunctions.createEmpty(jobOfferList, professionsList)\n \n # Fill the tab\n finalTab = usefulFunctions.fillTabExceptTotals(jobOfferList, professionsList, finalTab)\n \n # Update the totals \n finalTab = usefulFunctions.fillTotals(finalTab)\n \n print(\"\\nTable des métiers par profession et type de contrat : \")\n for line in finalTab:\n print(line)", "def main():\n\n # Ask for games to compare.\n games = {}\n more_games = True\n\n while more_games:\n search = input(\"Enter board game to search (leave empty if finished):\")\n\n if search:\n matches = bgg_compare.find_game(search)\n\n print(\"Games found:\")\n for game_id, name in matches.items():\n print(game_id + \"\\t\" + name)\n id = input(\"Enter the number before the intended game:\")\n games[id] = matches[id]\n\n else:\n more_games = False\n\n # If no games entered, compare all downloaded ratings.\n if not games:\n ids = []\n for f in glob.glob(\"[0-9]*.csv\"):\n id = os.path.splitext(f)[0]\n ids.append(id)\n game_info = bgg_compare.get_game_info(ids)\n for i, info in enumerate(game_info):\n name = info.find(\"name\", attrs={\"type\": \"primary\"})[\"value\"]\n games[ids[i]] = name\n\n print(\"Comparing games:\")\n\n all_ratings = []\n\n for game_id, name in games.items():\n\n print(name)\n\n ratings = {}\n filename = \"%s.csv\" % game_id\n\n try:\n ratings = bgg_compare.read_ratings(filename)\n except:\n ratings = bgg_compare.get_ratings(game_id)\n bgg_compare.write_ratings(ratings, filename)\n\n all_ratings.append(ratings)\n\n rankings = bgg_compare.condorcet_irv(all_ratings, list(games.keys()))\n\n print(\"Games ranked by Condorcet-IRV:\")\n\n header = [\"Rank\", \"ID\", \"Game\", \"Tiebreak\"]\n print(\"\\t\".join(header))\n\n for i, (game_id, tiebreak) in enumerate(rankings, 1):\n print(\"\\t\".join([str(i), game_id, games[game_id], str(tiebreak)]))\n\n outfile = input(\"Enter filename to save results (leave empty to not save)\")\n\n if outfile:\n with open(outfile, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(header)\n for i, (game_id, tiebreak) in enumerate(rankings, 1):\n writer.writerow([str(i), game_id, games[game_id], str(tiebreak)])", "def main():\n # P R O B L E M 2\n a_random_pokemon = create_entry(81, \"Magnemite\", \"Electric\", \"Steel\", 25, 35, 70, 95, 55, 45, 1, False)\n\n for key in a_random_pokemon.keys():\n print(\"{}: {}\".format(key, a_random_pokemon[key]))\n\n print(a_random_pokemon[\"Battle Stats\"])\n print(a_random_pokemon[\"Battle Stats\"][\"HP\"])\n print(a_random_pokemon[\"Battle Stats\"][\"Attack\"])\n\n print() # For formatting\n # P R O B L E M 3\n filepath = \"pokemon.csv\"\n pokedex = create_pokedex(filepath)\n pokemon_key = \"Glaceon\"\n #\n # # This is one of the many ways to check if a certain key exists in a dictionary!\n try:\n # # This step could potentially fail, so we \"try\" it first.\n my_favorite_pokemon = pokedex[pokemon_key]\n except KeyError:\n # # If it does fail under a KeyError, we'll print an error message.\n print(\"ERROR: Pokemon {} does not exist!\".format(pokemon_key))\n else:\n # # If it doesn't fail under a KeyError, we'll print the Pokemon's info!\n print(\"PRINTING {}'S INFORMATION...\".format(pokemon_key))\n for key in my_favorite_pokemon.keys():\n print(\"{}: {}\".format(key, my_favorite_pokemon[key]))", "def main():\n # P R O B L E M 2\n a_random_pokemon = create_entry(81, \"Magnemite\", \"Electric\", \"Steel\", 25, 35, 70, 95, 55, 45, 1, False)\n\n for key in a_random_pokemon.keys():\n print(\"{}: {}\".format(key, a_random_pokemon[key]))\n\n # print(a_random_pokemon[\"Battle Stats\"])\n # print(a_random_pokemon[\"Battle Stats\"][\"HP\"])\n # print(a_random_pokemon[\"Battle Stats\"][\"Attack\"])\n\n print() # For formatting\n # P R O B L E M 3\n filepath = \"pokemon.csv\"\n pokedex = create_pokedex(filepath)\n pokemon_key = \"Glaceon\"\n\n # This is one of the many ways to check if a certain key exists in a dictionary!\n try:\n # This step could potentially fail, so we \"try\" it first.\n my_favorite_pokemon = pokedex[pokemon_key]\n except KeyError:\n # If it does fail under a KeyError, we'll print an error message.\n print(\"ERROR: Pokemon {} does not exist!\".format(pokemon_key))\n else:\n # If it doesn't fail under a KeyError, we'll print the Pokemon's info!\n print(\"PRINTING {}'S INFORMATION...\".format(pokemon_key))\n for key in my_favorite_pokemon.keys():\n print(\"{}: {}\".format(key, my_favorite_pokemon[key]))", "def run():\n\n title_list = [\"* id of item\",\n \"* title\",\n \"* price\",\n \"* month of the sale\",\n \"* day of the sale\",\n \"* year of the sale\",\n \"* customer's id\"]\n\n # ! sign with a position is unfinished function but added in options\n # !8. Show the sale numbers of games for each customer-292\n # !11. Show the customer who spent the most and the amount spent-365\"\n # !12. Show the customer's id who spent the most and the amount spent-376\"\n # !13. Show the most frequent buyers-387\n # !14. Show the if of the most freuent buyers-\n\n options = [\"Print table\",\n \"Get game title by id\",\n \"Show the most recently sold game\",\n \"Get the sum of games' prices by their id\",\n \"Get the customer's id by the id of a game\",\n \"Show ids of all customers who purchased games\",\n \"Show sale ids of all customers\",\n \"Show the owner of a recently sold game\",\n \"Show the owner's id of a recently sold game\",\n \"Show the most frequent buyers\",\n \"Show the ids of the most frequent buyers\",\n \"Get the customer by id\"]\n\n os.system('clear')\n file = \"model/sales/sales.csv\"\n choice = None\n while choice != \"0\":\n os.system('clear')\n terminal_view.print_predator()\n terminal_view.print_menu(\"What do you want to do:\", options, \"Back to main menu\")\n choice = terminal_view.get_choice(options)\n\n if choice == \"1\":\n os.system(\"clear\")\n common.all_print_table(title_list, file)\n\n elif choice == \"2\":\n os.system(\"clear\")\n print(\"Get game title by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n identification = common.get_input(\"Enter the id: \")\n print(sales.get_title_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"3\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n most_recently_sold_game = sales.get_item_id_title_sold_last(table)\n print(\"The most recently sold game is: \")\n terminal_view.print_table([most_recently_sold_game], [\"* id\", \"* title\"])\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"4\":\n os.system(\"clear\")\n print(\"Get the sum of games' prices by their id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n item_ids = []\n x = True\n while x:\n add_id = common.get_input(\"Enter the id or 'x' to exit: \")\n if add_id == \"x\":\n x = False\n item_ids.append(add_id)\n print(sales.get_the_sum_of_prices_from_table(table, item_ids))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"5\":\n os.system(\"clear\")\n print(\"Get the customer's id by the id of a game\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n sale_id = common.get_input(\"Enter the id of a game: \")\n print(sales.get_customer_id_by_sale_id_from_table(table, sale_id))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"6\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n ids_of_all_customers = sales.get_all_customer_ids_from_table(table)\n print(\"ids of all customers who purchased games:\\n\", ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"7\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n sale_ids_of_all_customers = sales.get_all_sales_ids_for_customer_ids_form_table(table)\n print(\"Sale ids of all customers:\\n\\n\", sale_ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"8\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n last_buyer = sales.get_the_last_buyer_name(table_from_customers, table_from_sales)\n print(\"Owner of a recently sold game: \", last_buyer)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"9\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n last_buyer_id = sales.get_the_last_buyer_id(table)\n print(\"Owner's id of a recently sold game: \", last_buyer_id)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"10\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n the_most_frequent_buyers = sales.get_the_most_frequent_buyers_names(table_from_customers,\n table_from_sales,\n num=1)\n print(\"The most frequent buyers:\\n\", the_most_frequent_buyers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"11\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n the_most_frequent_buyers_ids = sales.get_the_most_frequent_buyers_ids(table, num=1)\n print(\"ids of the most frequent buyers:\\n\", the_most_frequent_buyers_ids)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"12\":\n os.system(\"clear\")\n print(\"Get the customer by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = \"model/crm/customers.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, [\"* id\", \"* name\", \"* email\", \"* subscribed\"])\n identification = common.get_input(\"Enter the id: \")\n print(crm.get_name_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n else:\n if choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\\n\")\n common.waiting()", "def main(args):\n \n args_are_valid, input_filepath, output_filepath, base_url, message = handle_arguments(args)\n if not args_are_valid:\n return print(message)\n \n with open(input_filepath, newline=\"\") as input_csv:\n csvreader = csv.reader(input_csv, delimiter=\",\",)\n\n needed_input_columns = [\"Account ID\",\"First Name\", \"Created On\"]\n needed_output_columns = [\"Account ID\",\"First Name\", \"Created On\", \"Status\", \"Status Set On\"]\n headers = next(csvreader) #grab first row as headers\n if not set(needed_input_columns).issubset(headers):\n print('ERROR - input csv must contain columns [\"Account ID\",\"First Name\", \"Created On\"] as headers')\n\n with open(output_filepath, mode = \"w\", newline = \"\") as output_csv:\n csvwriter = csv.DictWriter(output_csv, fieldnames = needed_output_columns)\n csvwriter.writeheader()\n\n index_of = {}\n for index,header in enumerate(headers):\n index_of[header] = index\n write_dict = {}\n\n #Loop through inputfile\n for row in csvreader:\n still_valid = True\n if len(row) != len(headers):\n message = \"ERROR - csv row has incomplete data\"\n still_valid = False\n if still_valid:\n # extract data from row, columns can be in any order\n for column in needed_input_columns:\n write_dict[column] = row[index_of[column]]\n still_valid, write_dict, message = verify_and_clean_input(write_dict)\n if still_valid:\n write_dict, message = extend(write_dict, query(write_dict[\"Account ID\"], base_url))\n #only write to csv if all input data valid, query data nulled out if invalid\n csvwriter.writerow(write_dict) \n print(message)\n\n output_csv.close() \n input_csv.close()", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def main():\n movies = read_movies('bond.csv')\n\n print('Original list (first 10):')\n print_movies(movies[:10])\n\n sorted_movies = movie_sort(movies)\n print('\\nSorted list (by year, first 10):')\n print_movies(sorted_movies[:10])\n\n bonus(movies)", "def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)", "def main():\n download_files()\n data, validCountries = clean_data()\n while True:\n choice = pyip.inputMenu([\"Confirmed\", \"Deaths\", \"Recovered\", \"Confirmed per capita\", \"Deaths per capita\", \"Recovered per capita\", \"See countries\", \"Quit\"], numbered=True)\n if choice == \"Quit\":\n break\n elif choice == \"See countries\":\n for country in validCountries:\n print(country)\n continue\n response = pyip.inputStr(\"Enter countries of interest (separate by commas): \")\n countries = response.split(\", \")\n for country in countries:\n if country not in validCountries: #check if countries are valid (in dataframe)\n countries.remove(country)\n print(f\"{country} is not a valid entry\")\n\n choiceDict = {\"Confirmed\": 0, \"Deaths\": 1, \"Recovered\": 2, \"Confirmed per capita\": 3, \"Deaths per capita\": 4, \"Recovered per capita\": 5}\n\n fig, ax = plt.subplots()\n\n for country in countries:\n data[choiceDict[choice]].T[country].plot(ax=ax)\n ax.legend(countries)\n plt.xlabel(\"Date\")\n label = choice\n if label != \"Deaths\":\n label += \" cases\"\n plt.ylabel(f\"Number of {label.lower()}\")\n plt.title(f\"Number of {label.title()}\")\n plt.show()", "def main():\n\n program_run = True\n\n while program_run:\n item = 1 # Column A\n expense = 2 # Column B\n income = 3 # Column C\n month = 4 # Column D\n row_numbers = {\n \"a\": 5, # Row of items\n \"e\": 0\n }\n\n error_message = \"\"\"\n Sorry! I don't seem to be able to carry out the request you gave me, please\n try again and give a valid argument (this program is case sensitive)\n \"\"\"\n choice_q = \"\"\"\n Would you like to Look at data options or put some data into your budget?\n [R/W]\n \"\"\"\n read_q = \"\"\"\n What information would you like to access?\n Total income[ti]\n Total expences[te]\n Profit[p]\n All[a]\n \"\"\"\n write_q = \"Have you sold or bought an item? [s/b] (q to quit)\"\n type = \"type 1 to read data, 2 to write data or q to quit: \"\n\n input1 = input(type) # Asks user whether they want to read or write info\n if input1 == \"q\": # Allows the user to quit at any given time\n program_run = False\n elif input1 == \"1\": # input chosen 'read'\n while input1 == \"1\":\n input2 = input(read_q) # Asks user on info regarding reading info\n if input2 == \"ti\": # Prints total income for the user\n print(\"\\tYour total income is: \" + cell_ti)\n break\n elif input2 == \"te\": # Prints total expenses for the user\n print(\"\\tYour total expences are: \" + cell_te)\n break\n elif input2 == \"p\": # Prints total profit for user, if Profit\n if cell_p <= 0: # below 0, user will get 'in debt' message.\n print(\"\\tYou're currently \" + cell_p + \" in debt.\")\n break\n else:\n print(\"\\tYour total profit is: \" + cell_p)\n break\n elif input2 == \"a\": # User will get all of the information above\n print(\"\\tYour total income is: \" + cell_ti + '\\n' +\n \"\\tYour total expences are: \" + cell_te + '\\n' +\n \"\\tYour total profit is: \" + cell_p)\n break\n else:\n print(error_message)\n else:\n break\n elif input1 == \"2\": # Input chosen 'write'\n while input1 == \"2\":\n input3 = input(write_q)\n if input3 == \"s\": # user sold something\n with open(\"row_used.json\") as ru:\n number = json.load(ru)\n a = 0\n a += int(number) # Code used to contantly move\n a += 1 # down rows when inputting new\n # data so that the data doesn't\n # overlap.\n with open(\"row_used.json\", 'w') as ru:\n ru.truncate(0)\n json.dump(a, ru)\n\n item_sold = input(\"What did you sell?: \")\n sheet.update_cell(a,item, item_sold)\n sheet.update_cell(a,expense, row_numbers['e']) # This 'e'(0) is here since the user didn't actually lose\n income_price = input(\"How much did you sell it for?: \") # any money, it will fill in the cell marked 'expences'\n sheet.update_cell(a,income, income_price) # to 0\n month_sold = input(\"In what month did you make the sale?(eg. Aug): \")\n sheet.update_cell(a,month, month_sold)\n\n ru.close()\n elif input3 == \"b\": # User bought something\n with open(\"row_used.json\") as ru:\n number = json.load(ru)\n a = 0\n a += int(number) # Code used to contantly move\n a += 1 # down rows when inputting new\n # data so that the data doesn't\n # overlap.\n with open(\"row_used.json\", 'w') as ru:\n ru.truncate(0)\n json.dump(a, ru)\n\n item_bought = input(\"What did you buy?: \")\n sheet.update_cell(a,item, item_bought)\n item_expense = input(\"How much was the item?: \")\n sheet.update_cell(a,expense, item_expense)\n sheet.update_cell(a,income, row_numbers['e']) # again 'e' is the value 0 since user isn't making income\n month_sold = input(\"In what month did you make the sale?(eg. Aug): \")\n sheet.update_cell(a,month, month_sold)\n\n ru.close()\n\n\n elif input3 == \"q\":\n program_run = False\n break\n\n else:\n print(error_message)", "def main3():\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #use DictReader method from csv module\r\n csv_reader = csv.DictReader(csvfile1)\r\n #read the lines\r\n for line in csv_reader:\r\n print(line['email'])", "def run():\n\n options = [\"Add new record to table\",\n \"Remove a record with a given id from the table\",\n \"Update specified record in the table\",\n \"Number of different kinds of game that are available of each manufacturer\",\n \"Average amount of games in stock of a given manufacturer\",\n \"Print table\"]\n\n title_list = [\"*id\",\n \"* title\",\n \"* manufacturer\",\n \"* price\",\n \"* in_stock\"]\n os.system('clear')\n choice = None\n while choice != \"0\":\n terminal_view.print_menu(\"What do you want to do:\", options, \"Back to main menu\")\n choice = terminal_view.get_choice(options)\n os.system('clear')\n if choice == \"1\":\n # to jest dzialajacy plik model/store/games.csv\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n record = terminal_view.get_inputs(title_list, \"Enter data: \")\n table = store.add(table, record)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been added ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"2\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n id_ = common.get_input(\"Get id to removed: \")\n table = store.remove(table, id_)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been removed ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"3\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n id_ = common.get_input(\"Enter id to update: \")\n record = terminal_view.get_inputs(title_list, \"Enter data: \")\n table = store.update(table, id_, record)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been updated ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"4\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n dictionary = store.get_counts_by_manufacturers(table)\n terminal_view.print_dictionary(\"Number of different kinds of game that are\" +\n \"available of each manufacturer:\", dictionary)\n common.waiting()\n os.system(\"clear\")\n elif choice == \"5\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n manufacturer = common.get_input(\"Enter a manufacturer: \")\n print(store.get_average_by_manufacturer(table, manufacturer))\n common.waiting()\n os.system(\"clear\")\n elif choice == \"6\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n common.waiting()\n os.system(\"clear\")\n\n else:\n if choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def main():\n # Initialize key variables\n alldata = [[\n 'Meet', 'City', 'Country', 'Course', 'Event ID', 'Distance', 'Stroke',\n 'Round', 'Gender', 'Firstname', 'Lastname', 'Birthyear', 'Height cm',\n 'Weight Kg', 'BMI', 'Speed / Kg', 'Speed m/s', 'Time']]\n finadata = []\n olympicdata = []\n ts_start = int(time.time())\n\n # Get filename\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l', '--lenex_directory',\n help='Name of directory with LENEX XML files.',\n type=str, required=True)\n parser.add_argument(\n '-o', '--olympic_directory',\n help='Name of directory with Olympic XLSX files.',\n type=str, required=True)\n parser.add_argument(\n '-p', '--profile_directory',\n help='Name of directory with athlete profiles.',\n type=str, required=True)\n parser.add_argument(\n '-d', '--database_file',\n help='Name of database file.',\n type=str, required=True)\n args = parser.parse_args()\n lenex_directory = args.lenex_directory\n profile_directory = args.profile_directory\n database_file = args.database_file\n olympic_directory = args.olympic_directory\n\n # Get the profiles\n profiles = _read_profiles(profile_directory)\n\n # Process Fina data\n finadata = _lenex(lenex_directory, profiles)\n\n # Process Olympic data\n olympicdata = _olympic(olympic_directory, profiles)\n\n # Get all data\n alldata.extend(finadata)\n alldata.extend(olympicdata)\n\n # Create output file\n with open(database_file, 'w') as f_handle:\n writer = csv.writer(f_handle, delimiter='|')\n writer.writerows(alldata)\n\n # Print status\n print('Swimmer event results created: {}'.format(len(alldata) - 1))\n print('Duration: {}'.format(int(time.time() - ts_start)))", "def execute(self, args: Namespace):\n data = {}\n\n with open(args.data, 'r') as file:\n reader = csv.reader(file, delimiter=';')\n\n for row in reader:\n if reader.line_num == 1:\n if 'Timestamp, Decimal, TriState, State' != ','.join(row):\n print('Not a valid CSV file!')\n return\n\n else:\n dt = datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')\n value = tri_state_value(row[2])\n device = data.setdefault(tri_state_device(row[2].strip()), {})\n device.setdefault(dt.date(),[]).append((dt.strftime('%H:%M'), value))\n\n for k in sorted(data.keys()):\n device = data[k]\n print('Device {}:'.format(tint_yellow(k)))\n\n for d in sorted(device.keys()):\n device[d].sort(key=lambda x: x[0])\n print('\\t{}:'.format(tint_blue(d)))\n\n for (t, v) in device[d]:\n if v:\n print('\\t\\tAt {} the device was turned {}.'.format(tint_blue(t),\n tint_green('ON')))\n else:\n print('\\t\\tAt {} the device was turned {}.'.format(tint_blue(t),\n tint_red('OFF')))", "def userReport():\n for player, dat in players.items():\n if \"Arca\" not in dat and \"Observatorio\" not in dat \\\n and \"Atomium\" not in dat and \"Dirigible\" not in dat \\\n and \"Estatua\" not in dat and \"Baño\" not in dat:\n continue\n\n print(\"-------------------------------------------------------------\")\n print(player, \" - \", dat[\"Edad\"])\n if \"Arca\" in dat:\n print(\" Arca %i\" % dat[\"Arca\"])\n if \"Observatorio\" in dat:\n print(\" Observatorio %i\" % dat[\"Observatorio\"])\n if \"Atomium\" in dat:\n print(\" Atomium %i\" % dat[\"Atomium\"])\n\n if \"Estatua\" in dat:\n for ed in dat[\"Estatua\"]:\n print(\" Estatua %i - %s\" % (ed[\"Nivel\"], ed[\"Edad\"]))\n\n if \"Dirigible\" in dat:\n for ed in dat[\"Dirigible\"]:\n print(\" Dirigible 11 - %s\" % ed)\n if \"Baño\" in dat:\n print(\" Baño Real %i - %s\" % (\n dat[\"Baño\"][\"Nivel\"], dat[\"Baño\"][\"Edad\"]))\n\n print()", "def print_records(results_file, player_1, player_2):\n # keep track of the results in the file\n results_lines = []\n\n # read all of the lines from the file into a list\n with open(results_file) as f:\n results_lines = f.readlines()\n\n # parse the results (results will be a dictionary of string and tuple)\n # { string->name: tuple->(int->wins, int->losses) }\n # { 'reed': (2, 5), 'britney': (5, 2) }\n results = parse_results(results_lines)\n\n player_1_wins = results[player_1][0]\n player_1_losses = results[player_1][1]\n player_2_wins = results[player_2][0]\n player_2_losses = results[player_2][1]\n\n print \"\\n%s's record is %d wins and %d losses\" % (player_1, player_1_wins, player_1_losses)\n print \"\\n%s's record is %d wins and %d losses\" % (player_2, player_2_wins, player_2_losses)", "def main():\n args = parse_arguments()\n\n ##### READ AND PREPROCESS DATA #####\n mentors, candidates, mentors_years, candidates_years = read_and_preprocess(args.mentor_file, args.candidate_file, args.num_preferences)\n\n # Store a copy of the initially declared preferences (after filtering)\n # This is used later to see how many people didn't get someone in their top \n candidate_dict_initial = list_to_dictionary(candidates)\n mentor_dict_initial = list_to_dictionary(mentors)\n\n ##### PAIR BASED ON FIRST CHOICES #####\n successful_pairings = match_top_choices(list_to_dictionary(mentors), list_to_dictionary(candidates))\n print(\"Paired {} people based on first choices\".format(len(successful_pairings)))\n remove_pairings(successful_pairings, mentors, candidates)\n\n ##### PAIR BASED ON ONE-SIDED PREFERENCE #####\n ## ONE SIDED CANDIDATES\n one_sided_candidate_pairings = match_no_preference(candidates, mentors)\n print(\"Paired {} people based on candidates with no mentor preferences\".format(len(one_sided_candidate_pairings)))\n remove_pairings(one_sided_candidate_pairings, candidates, mentors)\n\n ## ONE SIDED MENTORS\n one_sided_mentor_pairings = match_no_preference(mentors, candidates)\n print(\"Paired {} people based on mentors with no candidate preferences\".format(len(one_sided_mentor_pairings)))\n remove_pairings(one_sided_mentor_pairings, mentors, candidates)\n\n # Fill the remaining preference list with the valid unspecified mentors/candidates\n mentors_filled, candidates_filled = fill_with_valid(mentors, candidates, mentors_years, candidates_years)\n\n candidate_dict = list_to_dictionary(candidates_filled)\n mentor_dict = list_to_dictionary(mentors_filled)\n\n stable_matches = stable_matching.stable_marriage(mentor_dict, candidate_dict)\n stable_match_success = (len(stable_matches) != 0)\n if stable_match_success:\n print(\"Paired {} people based on stable matching\".format(len(stable_matches)))\n else:\n print(\"Warning: Could not determine a stable match with the optimizations.\")\n print(\"Attempting to stable match without...\")\n mentors, candidates, mentors_years, candidates_years = read_and_preprocess(args.mentor_file, args.candidate_file, args.num_preferences)\n # Fill the remaining preference list with the valid unspecified mentors/candidates\n mentors_filled, candidates_filled = fill_with_valid(mentors, candidates, mentors_years, candidates_years)\n\n candidate_dict = list_to_dictionary(candidates_filled)\n mentor_dict = list_to_dictionary(mentors_filled)\n\n # Stable match immediately\n stable_matches = stable_matching.stable_marriage(mentor_dict, candidate_dict)\n if len(stable_matches) == 0:\n print(\"Error: Could not stable match these preference lists.\")\n return\n\n # Combine the pairings from all sources into a single list\n if stable_match_success:\n all_pairings = []\n for mentor, candidate in successful_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on first choice\"])\n for mentor, candidate in one_sided_mentor_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on one-sided mentors\"])\n for candidate, mentor in one_sided_candidate_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on one-sided candidates\"])\n for mentor, candidate in stable_matches:\n mentor_name = mentor\n if mentor is None:\n mentor_name = \"No mentor\"\n candidate_name = candidate\n if candidate is None:\n candidate_name = \"No candidate\"\n all_pairings.append([mentor_name, candidate_name, \"Paired based on stable matching\"])\n else:\n all_pairings = stable_matches\n\n output_file = \"pairings.csv\"\n if args.output_file:\n output_file = args.output_file\n with open(output_file, \"w+\") as f:\n f.write(\"Mentor,Candidate,Notes\\n\")\n for mentor, candidate, notes in all_pairings:\n f.write(\"{},{},{}\\n\".format(mentor, candidate, notes))", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def main():\n\n args = get_args()\n random.seed(args.seed)\n wod = []\n\n for name, low, high in read_csv(args.file):\n reps = random.randint(low, high)\n if args.easy:\n reps = int(reps / 2)\n wod.append((name, reps))\n\n wod = random.sample(wod, k=args.num_exercises)\n print(tabulate(wod, headers=('Exercise', 'Reps')))", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()" ]
[ "0.6056687", "0.57561624", "0.5681713", "0.5558769", "0.5533532", "0.55103606", "0.54816645", "0.5471882", "0.5470575", "0.5453268", "0.54475284", "0.5446846", "0.5314073", "0.53068393", "0.5291783", "0.52585655", "0.52576137", "0.5234658", "0.5230194", "0.5194857", "0.51882", "0.5187348", "0.51819515", "0.5159562", "0.5134737", "0.5109166", "0.51057583", "0.51003957", "0.50865924", "0.50835425" ]
0.6281538
0
Keep only notNaN column positions in all arrays.
def drop_nan_columns(arrays): # Keep all column indices not_nan_filter = ones(len(arrays[0]), dtype=bool) # Currently keeping all columns! # Keep column indices without missing value in all arrays # for a in arrays: # not_nan_filter &= ~isnan(a) return [a[not_nan_filter] for a in arrays]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def columns_with_na_values(data):\n aux = data.isna().sum() > 0\n return aux.index[aux.values].values", "def remove_nans(coords):\n s = np.apply_along_axis(sum,1,np.isnan(coords[1])) == 0\n coords[0] = (np.asarray(coords[0])[s]).tolist()\n coords[1] = coords[1][s,:]", "def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]", "def removeNans(data):\n for i in data[:]:\n ind = data.index(i)\n for j in i:\n if np.isnan(j):\n data.remove(i)\n break\n return data", "def remove_empty_columns(x, threshold=0.4):\n # For each column compute the ratio of nan values over the number of rows\n prop_empty_column = (np.isnan(x)).sum(axis=0) / len(x)\n column_mask = prop_empty_column < threshold\n return x[:, column_mask], column_mask", "def remove_nans(arr, nan_rows=None):\n # Remove NaNs\n nconfigs, nt = arr.shape\n if nan_rows is None:\n mask = np.isfinite(arr)\n else:\n mask = np.array([n for n in np.arange(nconfigs) if n not in nan_rows])\n return arr[mask].reshape(-1, nt)", "def locate_nan_rows(arr):\n # Count the number of NaNs in each row\n nan_counts = np.sum(~np.isfinite(arr), axis=1)\n # Trigger on a NaN appearing anywhere in a line/row\n nans, = np.where(nan_counts > 1)\n return frozenset(nans)", "def filter_nan(s,o):\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n data = data[~np.isnan(data).any(1)]\n #data = data[~np.isnan(data)]\n return data[:,0],data[:,1]", "def nonans(array):\n return array[~np.isnan(array)]", "def purgeNanEveryWhere(df):\n #Row-wise dropping\n toDrop = np.array([])\n for i in range(df.shape[0]):\n if( np.sum ( pd.isnull(df.iloc[i]) ) == df.shape[1]-1 ):\n toDrop= np.append(toDrop,i)\n df.drop(df.index[toDrop.astype(int)],inplace=True) \n #Column-wise dropping\n for col in df.columns:\n arr = pd.notnull(df[col])\n nnan = np.sum(arr) \n if (nnan == df.shape[1]):\n df.drop(col,inplace=True,axis=1)\n return df", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col", "def get_columns_with_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n mask = np.array(X[columns_to_check][:rows_to_scan].count() == 0)\n return list(np.array(columns_to_check)[mask])", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def get_columns_with_nulls(X, columns_to_scan = \"all\", rows_to_scan=100000):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n \n columns_to_scan = get_list_of_columns_to_check(columns_to_scan, X.columns)\n mask = np.array(X[columns_to_scan][:rows_to_scan].count() < rows_to_scan)\n return list(np.array(columns_to_scan)[mask])", "def _nan_cells(traces):\n # Find all cells with NaNs\n nancells = []\n ncells = -1\n for cs in traces:\n if len(traces[cs]) > 0:\n ncells = np.shape(traces[cs])[1]\n ns = np.sum(np.sum(np.invert(np.isfinite(\n traces[cs])), axis=2), axis=0)\n vals = np.arange(ncells)\n nancells.extend(vals[ns > 0])\n\n # Set _mask_cells if it hasn't been set\n out = np.zeros(ncells, dtype=bool)\n\n # Convert nancells to a list of good cells\n nancells = np.array(list(set(nancells)))\n if len(nancells) > 0:\n print('Warning: %i cells have NaNs'%len(nancells))\n out[nancells] = True\n\n return out", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def nancnt_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.sum(~np.isnan(a[:, col]))\n return out", "def handel_nans(self):\n col_nan_pct = self.df.isin([' ',np.nan]).mean() #Calculates percent of Nans\n col_names = col_nan_pct[col_nan_pct >= .1].index # Gets name of columns with over 50% Nans\n col_count = [self.df[col].count() for col in col_names for x in self.df if x == col] #Gets length of valid values for column\n dropped_col = [col for col in zip(col_count, col_names) if col[0] <= 1400] #Gets columns names with under 50 values\n [self.df.drop(columns=[col[1]], inplace=True) for col in dropped_col]\n self.dropped_cols_phase_one = dropped_col\n [self.column_dtypes.pop(item[1]) for item in dropped_col]\n self.df[self.target].dropna(inplace=True)", "def get_nan_columns(df):\n df = nan_val_summary(df)\n return df[df['fraction_missing'] > 0]['columns'].values", "def remove_none_from_arrays(self):\r\n\r\n is_nan = numpy.isnan(self.y_values) # array of booleans, element is True if the corresponding element in\r\n # self.y_values is None\r\n\r\n self.x_values = self.x_values[numpy.logical_not(is_nan)]\r\n self.y_values = self.y_values[numpy.logical_not(is_nan)] # replace all None elements\r", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def dropna(df, axis=0, th=0.4):\n df = df.copy()\n axis = 0 if axis==1 else 1\n col_idx = df.isna().sum(axis=axis)/df.shape[axis] <= th\n df = df.iloc[:, col_idx.values]\n return df", "def whichColumnsNA(df):\n return df.columns[df.isna().any()].tolist()", "def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])", "def bad_column_positions(self, x):\n return x.is_null()", "def dropNaN(featZ):\n \n n_cols = len(featZ.columns)\n featZ.dropna(axis=1, inplace=True)\n n_dropped = n_cols - len(featZ.columns)\n \n if n_dropped > 0:\n print(\"Dropped %d features after normalisation (NaN)\" % n_dropped)\n \n return featZ", "def clean_data(x, null, drop_thresh):\n # Do not modify the original dataset\n x_clean = np.copy(x)\n \n # Vector holding, for each feature, the fraction of datapoints with a null value\n null_frac = (1/x_clean.shape[0]) * np.sum(x_clean==null, axis=0)\n # Boolean vector holding, for each feature, whether or not it needs to be kept\n column_to_keep = null_frac <= drop_thresh\n \n # Drop bad columns\n x_clean = x_clean[:, column_to_keep]\n \n # Vector of (list of) indices of columns where there are still null values\n columns_to_interpolate = np.argwhere(np.any(x_clean==null, axis=0))\n \n # For each of those columns, find the mean of non-null values, and substitute it to null values\n for col_list in columns_to_interpolate:\n # Extrapolate only entry of col_list\n col = col_list[0]\n \n # Boolean vector holding, for each row, whether or not it has a \"null\" entry at position \"col\"\n row_non_null = x_clean[:, col] != null\n # Find mean\n interpolation = np.mean(x_clean[row_non_null, col])\n # Substitute it to null values\n row_null = np.logical_not(row_non_null)\n x_clean[row_null, col] = interpolation\n \n return x_clean", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def get_columns_not_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n remove_columns = get_columns_with_all_nulls(X, columns_to_check, rows_to_scan)\n return list(set(columns_to_check)-set(remove_columns))" ]
[ "0.68530303", "0.67052513", "0.669365", "0.6676535", "0.66358685", "0.6495949", "0.6456195", "0.64343864", "0.6338271", "0.63302857", "0.62897485", "0.62503976", "0.6189583", "0.61509717", "0.61466753", "0.6088549", "0.6019746", "0.5990693", "0.59600526", "0.5936195", "0.5896028", "0.588317", "0.5881776", "0.5853736", "0.5849152", "0.5827604", "0.58145475", "0.58126885", "0.58126885", "0.5795356" ]
0.7522396
0
Drop slice that contains only value from df.
def drop_uniform_slice_from_dataframe(df, value, axis=0): if axis == 0: dropped = (df == value).all(axis=0) if any(dropped): print('Removed {} column index(ices) whose values are all {}.'. format(dropped.sum(), value)) return df.ix[:, ~dropped] elif axis == 1: dropped = (df == value).all(axis=1) if any(dropped): print('Removed {} row index(ices) whose values are all {}.'.format( dropped.sum(), value)) return df.ix[~dropped, :]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_values(df, value=0, axis=0):\n \n if axis:\n return df.loc[:, (df != value).any(axis=1-axis)]\n else:\n return df.loc[(df != value).any(axis=1-axis)]", "def remove(df, pattern):\n return df[~df.index.isin(df.query(pattern).index)]", "def drop_transafers(df):\n return df.filter(~(df.valor == 0))", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]", "def df_cleaner(df):\n return df.dropna()", "def drop_missing_values_in_dataframe(dataframe):\r\n return dataframe.dropna()", "def remove_not_available(df):\n drop_indices = df.index[df['genre'] == 'Not Available'].tolist()\n df = df.drop(drop_indices)\n return df", "def drop_nan(df, perc=20):\n check = check_nan(df, show_plot=False)\n return df.drop(check[check.percentage > perc].index.values, axis=1)", "def df_cleaner(df):\n df = df.dropna()\n return df", "def clean(df):", "def remove_nan(self, dataframe):\n return dataframe.dropna()", "def drop_zero_pay(df):\n df = df.loc[df.payann > 0]\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def crop_amide_one(df):\n df = df[(df[df.columns[0]] < 1706) & (df[df.columns[0]] > 1599)]\n df.reset_index(drop=True, inplace=True)\n return df", "def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)", "def drop_illogical(df,var1,var2):\r\n #Mask the illogical entries\r\n mask = df[var1]>df[var2]\r\n #Record the number of entries\r\n NumRecords = df.shape[0]\r\n #drop the illogical entries\r\n df = df[df.keys()][~mask]\r\n #Notify the user how many records were dropped\r\n print('{} records dropped because {} is greater than {}'.format(NumRecords-df.shape[0],var1,var2))\r\n \r\n return df", "def delete_entries(df, column, values):\n for val in values:\n dropindex = df[df[column] == val].index\n df.drop(index = dropindex, inplace = True)", "def remove_missing_values(df, col, exclude):\n if type(col) == 'list':\n try:\n for ind, c in enumerate(col):\n indices = df[df[c] == exclude[ind]].index\n df = df.drop(indices)\n except:\n print('Exception occurred, check kwargs')\n else:\n indices = df[df[col] == exclude].index\n df = df.drop(indices)\n return df", "def removeNonQuant(df, cols):\r\n df = df[~(df[cols].isnull().all(1))]\r\n return df", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def clean_rows_cat_values(df: pd.DataFrame, col: str, values: list) -> pd.DataFrame:\n\n # create mask to filter df with rows that have\n # the indicated values in the indicated column\n index = df.columns.get_loc(col)\n mask = [df.iloc[row, index] not in values for row in range(len(df))]\n\n # print original dataframe shape\n print(f\"Shape of the original dataframe: \\n{df.shape}\\n\")\n\n # filter df\n df = df.iloc[mask]\n df.reset_index(drop=True, inplace=True)\n print(\n f\"Shape after removing rows with values equal to\\n{values}\\nin column '{col}'':\"\n )\n print(df.shape, \"\\n\")\n\n return df", "def deselect (a_data,a_column) :\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def drop_outliers(target_df, settings):\n target_df.sort_values(list(target_df.columns), inplace=True)\n startlen = target_df.shape[0]\n if settings[\"drop_outlier_above\"] < 1:\n target_df = target_df.iloc[: int(np.floor(startlen * settings[\"drop_outlier_above\"])), :]\n if settings[\"drop_outlier_below\"] > 0:\n target_df = target_df.iloc[int(np.floor(startlen * settings[\"drop_outlier_below\"])) :, :]\n return target_df", "def _truncate_games_df(df, season, season_type):\n return df[(df['season'] != season) | (df['type'] != season_type)]", "def remove_players_wo_positions(df):\n df = df[pd.notnull(df['FantPos'])]\n return df", "def dataendclean(df, x, inplace=False):\r\n # Examine Mean Values\r\n if inplace:\r\n df = df\r\n else:\r\n df = df.copy()\r\n\r\n jump = df[abs(df.loc[:, x].diff()) > 1.0]\r\n try:\r\n for i in range(len(jump)):\r\n if jump.index[i] < df.index[50]:\r\n df = df[df.index > jump.index[i]]\r\n printmes(\"Dropped from beginning to \" + str(jump.index[i]))\r\n if jump.index[i] > df.index[-50]:\r\n df = df[df.index < jump.index[i]]\r\n printmes(\"Dropped from end to \" + str(jump.index[i]))\r\n except IndexError:\r\n printmes('No Jumps')\r\n return df" ]
[ "0.6749588", "0.659988", "0.6581077", "0.64866114", "0.64584786", "0.64093405", "0.62364835", "0.6224", "0.6223723", "0.6217032", "0.6179184", "0.6157227", "0.61414427", "0.6132971", "0.6130289", "0.6113223", "0.610663", "0.60877687", "0.60699916", "0.6052917", "0.6052265", "0.60467917", "0.59864885", "0.5985572", "0.59468275", "0.59339845", "0.5929148", "0.59261584", "0.588431", "0.58636737" ]
0.75481135
0
Split df into n_split blocks (by row).
def split_dataframe(df, n_split, axis=0): # TODO: implement axis logic if df.shape[0] < n_split: raise ValueError( 'n_split ({}) can\'t be greater than the number of rows ({}).'. format(n_split, df.shape[0])) elif n_split <= 0: raise ValueError('n_split ({}) can\'t be less than 0.'.format(n_split)) n = df.shape[0] // n_split splits = [] for i in range(n_split): start_i = i * n end_i = (i + 1) * n splits.append(df.iloc[start_i:end_i, :]) i = n * n_split if i < df.shape[0]: splits.append(df.ix[i:]) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def getKSplits(df, n_splits, seed = None):\n\n result = []\n\n # None random seed is same as not setting it\n df_shuffled = df.sample(len(df), random_state = seed)\n\n fold_size = int(len(df) / n_splits)\n\n for i in range(n_splits):\n if i == n_splits - 1: # last iteration\n df_fold = df_shuffled[fold_size * (i): len(df)] # gets remainder\n else:\n df_fold = df_shuffled[fold_size * (i):fold_size * (i + 1) ] # python starts indexing at 0\n result.append(df_fold)\n\n return result", "def dataFrameSplit(df, norec=1000000, outfile= None):\n # calculation of the no. of rows of the dataframe\n df_rsz = len(df.index)\n if df_rsz>norec:\n no_splits = np.ceil(df_rsz/norec)\n dfarr = np.array_split(df,no_splits)\n return dfarr\n else:\n print(\"The dataframe doesn't have sufficient records\")\n \n # printing to disk when \n if outfile!=None:\n i=0\n for arr in dfarr:\n arr.to_csv(\"D:\\\\ddf\"+str(i+1)+\".csv\",encoding='utf-8', index=False,\n header=False)\n i = i+1", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def prepare_stops_to_request(df: pd.DataFrame) -> list:\n return [split_df(df, i, i + 100) for i in range(0, len(df), 100)]", "def split_df(df,\n test_size=.10,\n random_state=42):\n train_df, test_df = train_test_split(df,\n test_size=test_size,\n random_state=random_state)\n return train_df, test_df", "def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks", "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def get_n_splits(self):\n pass", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df", "def getWindows(df, size=75, step=15):\n start = 0\n while start+size < df.count():\n yield start, start + size #pd.to_timedelta(size, unit='m'))\n start += step", "def add_split_col ( self, arr ):\n arr = arr if not self.shuffle else np.random.permutation ( arr )\n n = len ( arr )\n k = int ( np.ceil ( n / self.number_of_folds ) )\n return pd.DataFrame (\n { \"index\": arr, \"split\": np.tile ( np.arange ( self.number_of_folds ), k )[ 0:n ] , }\n )", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def test_03_dataframe_to_dataframe_w_chunksize(self):\n _, err = _iquery(\"store(flatten(DF1, cells_per_chunk:5), DF3)\")\n assert not err, err\n self._array_cleanups.append('DF3')\n check_v_sum('DF3')\n nchunks = chunk_count(vaid_of('DF3'))\n prt(\"DF3 has\", nchunks, \"chunks\")\n assert nchunks < self._df1_chunks, \"DF3 did not get dense!\"", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def df_split_random(df, N, random_seed=None):\n random = np.random.RandomState(random_seed)\n\n all_local_indices = np.arange(len(df))\n shuffled = random.permutation(all_local_indices)\n\n df1 = df.iloc[shuffled[:N]]\n df2 = df.iloc[shuffled[N:]]\n return df1, df2", "def split_time_series_by_time_steps_index(df, n_time_steps=None):\n\t\n\ttime_steps = df.index.get_level_values(1).unique()\n\tsplit_time_step = time_steps[-n_time_steps]\n\ttrain = df[df.index.get_level_values(1) < split_time_step]\n\ttest = df[df.index.get_level_values(1) >= split_time_step]\n\t\n\treturn train, test", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices" ]
[ "0.80189484", "0.7166912", "0.68179005", "0.67057437", "0.6623575", "0.6451587", "0.6366774", "0.6241725", "0.61946136", "0.6189358", "0.61609524", "0.6113555", "0.6094658", "0.6066726", "0.6060644", "0.6053875", "0.60479224", "0.5984508", "0.59791416", "0.59433573", "0.59427154", "0.5933734", "0.5932551", "0.5930089", "0.59288275", "0.592263", "0.5899354", "0.5891334", "0.5874397", "0.5872169" ]
0.7756274
1