query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return a user by phone_num | def get_user_by_phone(phone_num):
user = db.session.query(User).filter(phone_num == User.phone_num).first()
return user
# SELECT * FROM users WHERE phone_num == phone_num
# User.query.filter(User.phone_num == phone_num).one() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num)\n return user\n \n # User.query.filter(User.phone_num == phone_num).one()",
"def get_user_by_phone(self, phone):\n sql = 'select id ,first_name' \\\n ',last_name' \\\n ',password' \\\n ',phone ' \\\n 'from account_user ' \\\n 'where phone = %s'\n user = User.objects.raw(sql, [phone])[0];\n return user",
"def harvest_by_phone(client, phone):\n try:\n entity = client(users.GetFullUserRequest(id=phone))\n except ValueError:\n return 'There is no account connected to this phone number'\n\n return harvest_user(client, entity)",
"def get(self, phone_number: str):\r\n args = authParser.parse_args()\r\n\r\n first_three = phone_number[:3]\r\n\r\n if first_three not in prefix_list and first_three != \"+23\":\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Input in a valid phone-number\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n if len(phone_number) == 11 or len(phone_number) == 14:\r\n user = (UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first())\r\n\r\n if not user:\r\n response = {\r\n \"status\": \"error\",\r\n \"detials\": {\r\n \"message\": \"User with phone number doesnt exist\"\r\n }\r\n }\r\n return response, http.client.NOT_FOUND\r\n\r\n user = UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first()\r\n\r\n if not user:\r\n # The email doesnt exist\r\n return {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Not Found\"\r\n }\r\n }, http.client.OK\r\n user = admin_namespace.marshal(user, user_model)\r\n return {\r\n \"status\": \"success\",\r\n \"details\": {\r\n \"result\": user\r\n }\r\n }, http.client.OK",
"def _query_user(phone):\n if not is_valid_phone_number(phone):\n return None\n\n try:\n user = Profile.objects.get(mobile_phone=_remove_area_code(phone)).user\n\n return {\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'groups': [group.name if group.name[0] != '_' else\n group.name[1:] for group in user.groups.all()]\n }\n except (ObjectDoesNotExist, MultipleObjectsReturned):\n # Expected output for a lot of calls. Not an error.\n return None",
"def get_user(conn ,phone_number: str) -> Tuple[str, List[str], str]:\n with conn.cursor() as cur:\n\n # Get user info from db.\n cur.execute(\"SELECT * FROM users WHERE phone_number = %s\", (phone_number,))\n usr = cur.fetchone()\n if usr is None:\n return None\n return usr",
"def get_user_or_placeholder(phone_num, nickname):\n if not SWE_PHONENUM_RE.match(phone_num):\n return {\n \"success\": False,\n \"msg\": \"Swedish format is required for phone number.\"\n }\n\n if not NICKNAME_RE.match(nickname):\n return {\n \"success\": False,\n \"msg\": \"Nicknames need to be 2-30 characters long and can only contain letters, numbers, spaces, dashes and underscores.\"\n }\n\n phone_num = strip_phone_num(phone_num) # Get last 9 digits\n user = User.query.filter_by(phone_num=phone_num).first()\n if not user:\n # Create placeholder until a user registers with associate_phone\n user = User(phone_num=phone_num, active=False)\n if not NICKNAME_RE.match(nickname):\n return {\n \"success\": False,\n \"msg\": \"Not a valid nickname.\"\n }\n user.nickname = nickname\n\n db.session.add(user)\n db.session.commit()\n\n return {\n \"success\": True,\n \"user\": user\n }",
"def ldap_get_number(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n number = result.get(\"mobile-phone\")[0]\n return number\n\n return None",
"def get(self, phone):\n\n #args = argParser()\n #phone = args.parse_args().get(\"fromPhone\")\n\n if not UserExist(phone):\n return jsonify(generateReturnDictionary(301, \"Sorry, Mobile Wallet Account does not exists!, create an account.\", \"FAILURE\"))\n\n try:\n retJson = mongo.db.Register.find({\n \"Phone\": phone\n }, {\n \"Password\":0, # projection\n \"_id\":0,\n \"FirstName\":0,\n \"LastName\":0,\n \"Email\":0,\n \"Phone\":0,\n \"Network\":0,\n \"Username\":0,\n \"Password\":0,\n \"Debt\":0,\n \"DateTimeCreated\":0,\n \"apiKeys\":0\n })[0]\n return make_response(jsonify(retJson), 200)\n except Exception as e:\n retJson = {\n \"code\": 409,\n \"message\": \"There was an error while trying to check your wallect balance -> , try again!\",\n \"status\": \"FAILURE: {0}\".format(e.message)\n }\n return jsonify(retJson)",
"def get_user_phone(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n user_phone = user.mobile_phone\n\n return user_phone",
"def get_phone_number(user_id):\n try:\n student = _UserProfile.objects.get(user_id=user_id)\n except _UserProfile.DoesNotExist as exception:\n log.exception(exception)\n return None\n return student.phone_number or None",
"def get_or_create_user_instance(phone_number):\n try:\n user = User.objects.get(phone=phone_number)\n except User.DoesNotExist:\n user = User.objects.create(\n phone=phone_number,\n is_active=True,\n is_registration_finish=False,\n )\n created = not user.is_registration_finish\n\n return user, created",
"def phonenumber_in_db(self, phonenumber, users_list):\n return self.user_in_db(phonenumber, users_list, \"phonenumber\")",
"def phone_primary(self, instance):\r\n return instance.user.profile.phone_primary",
"def lookup_phone_number(phone):\n \n #create Twilio client\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n try:\n\n #check if number is real number using Twilio lookup\n phone_number = client.lookups \\\n .phone_numbers(phone) \\\n .fetch(type=['carrier'])\n\n #returns formmatted phone number\n return phone_number.phone_number\n\n #checks Twilio exception responses if number not real\n except TwilioRestException as e:\n\n #Number not found - return False\n if e.code == 20404:\n\n return False\n\n else:\n\n raise e",
"def get_user(self, key_value):\n if utils.is_numeric(key_value):\n key_value = int(key_value)\n query = TABELLE['users']['select']['from_id']\n else:\n if key_value[0] == '@':\n key_value = key_value[1:]\n query = TABELLE['users']['select']['from_username']\n user = self.execute(query, (key_value, key_value))\n return user",
"def login_user(phone_num, pword):\n phone_num = strip_phone_num(phone_num)\n user = User.query.filter_by(phone_num=phone_num).first()\n if not user:\n return {\n \"success\": False,\n \"msg\": \"User does not exist.\"\n }\n if not user.active:\n return {\n \"success\": False,\n \"msg\": \"The user account registered with this phone number is not active. Please register an account with this phone number to login.\"\n }\n if not user.check_password(pword):\n return {\n \"success\": False,\n \"msg\": \"Wrong password.\"\n }\n\n token = create_access_token(identity=user.id)\n # token_jti = get_jti(encoded_token=token)\n # redis_store.set(token_jti, 'false', ACCESS_EXPIRES * 1)\n\n # # if user already has a session, expire that token before returning new one\n # cached_user_jti = redis_store.get(phone_num)\n # if cached_user_jti:\n # redis_store.set(cached_user_jti, 'true', ACCESS_EXPIRES * 1)\n\n # redis_store.set(phone_num, token_jti)\n\n return {\n \"success\": True,\n \"msg\": \"User logged in\",\n \"token\": token\n }",
"def get_user(id):\n pass",
"def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n return attrs\n raise serializers.ValidationError(\"Phone number already exists. If are trying to glue, consider the glue option\")",
"def validate_phone_number(self, phone_number):\n if User.objects.filter(phone_number=phone_number).exists():\n raise serializers.ValidationError('Phone Number already registered.')\n return phone_number",
"def get_one_user():",
"def lookup_phone(ikey, skey, host, phone):\n response = client.call_json_api(\n ikey, skey, host, 'GET', '/verify/v1/lookup/phone.json',\n phone=[phone])\n return response",
"def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user",
"def phone_mobile(self, instance):\r\n return instance.user.profile.phone_mobile",
"def getPhoneUuid(phoneName):\n return searchForPhone(phoneName)['uuid']",
"def search_by_phone_number(self, phone_number):\r\n if len(re.findall(\"[^0-9-+ ]+\", phone_number)) or len([c for c in phone_number if c == '+']) > 1:\r\n raise PersonPhoneNumberException(\"Invalid phone number search input. Can only contain digits, hyphens,\"\r\n \"spaces, and a plus sign(+).\")\r\n phone_number = phone_number.replace(' ', '')\r\n phone_number = phone_number.replace('-', '')\r\n phone_number = phone_number.replace('+4', '')\r\n return self.__filter(self.get_all_persons(), lambda x: phone_number in x.phone_number.replace(' ', ''))",
"def validate_phone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(phone=value).exists():\n raise serializers.ValidationError('phone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])",
"def readrecord(phones,username,phonenum):\r\n if username in phones:\r\n raise ValueError(username+ \":\"+phones[username])\r\n else:\r\n raise ValueError(\"This username are not exist\")",
"def get_info(email):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n return user",
"def get_user_from_jwt_token(user_from_token):\n user_id = user_from_token.get('id')\n user_address = user_from_token.get('address')\n\n if user_id and user_address:\n user = User.get_by_id(user_id)\n\n if user and user.id == user_id and user.address == user_address:\n return user\n\n return None"
]
| [
"0.84271127",
"0.83181894",
"0.79030603",
"0.77142286",
"0.76056916",
"0.74515444",
"0.7231632",
"0.70186967",
"0.6720994",
"0.66531473",
"0.6642142",
"0.6481888",
"0.6426293",
"0.63712114",
"0.6245403",
"0.6233911",
"0.620776",
"0.6156473",
"0.6141519",
"0.6140055",
"0.6127874",
"0.61268115",
"0.60856605",
"0.6063589",
"0.6026829",
"0.6009662",
"0.5990604",
"0.5987787",
"0.59747785",
"0.59735864"
]
| 0.8424238 | 1 |
Delete a user from DB by phone num | def remove_user(user):
# user.confirmed = False
# user = get_user_by_phone(phone_num)
db.session.delete(user)
db.session.commit()
return user
# DELETE FROM users WHERE user.phone_num == phone) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")",
"def delete_user():",
"def delete_user(id):\n pass",
"def delete_user():\n #TODO user delete\n pass",
"def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()",
"async def delete_phone(self, code: int, prefix: int, phone: int, password: str):\n data = {\n \"countryCode\": code,\n \"prefix\": prefix,\n \"phone\": phone,\n \"password\": password\n }\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone/delete', method='post',\n data=data)\n return e",
"def delete_user(id_user: int):\n mycursor.execute(f\"\"\"DELETE FROM User\n WHERE id_user = {id_user}\"\"\")\n mydb.commit()\n return f\"L'utilisateur {id_user} a été supprimé\"",
"def delete_user():\r\n raise NotImplementedError()",
"def delete(self):\n data = UserRegister.parser.parse_args()\n user = UserModel.find_by_username(data['username'])\n\n if user:\n user.delete_from_db()\n else :\n return {'message': 'User not found!'} , 204\n\n return {'message': 'User deleted'},202",
"def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401",
"def delete_user(self,userid, cursor):\n sql=\"DELETE FROM users WHERE userid = %s\"\n cursor.execute(sql,(userid))",
"def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])",
"def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()",
"def delete(no):\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n # delete a record\n c.execute(f\"DELETE from person_info WHERE oid= \" + str(no))\n\n conn.commit()\n conn.close()",
"def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")",
"def delete(self, userinformation):\n self.db.remove(userinformation)",
"def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)",
"def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )",
"def delete_user(self, user):\n self.delete(user)",
"def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200",
"def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")",
"def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def del_user(self, username):\n pass",
"def delete_user(UserName=None, AuthenticationType=None):\n pass",
"def delete(self, request, phone):\n attrs = self.flatten_dict(request.POST)\n try:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n np = NumberPlan.objects.get(phone_number=phone, site__name__exact=request.user)\n endpoint.enable=False\n np.status=2\n endpoint.save()\n np.save()\n # TODO add parking\n return rc.DELETED\n except:\n return rc.NOT_HERE",
"def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def delete_user(id):\n user = Users.query.filter_by(id=id).first()\n user.delete()\n if not user:\n return send_msg(404, 'Not Found')\n return send_msg(204, \"No data\")",
"def del_user_by_username(name):\n collection = get_collection(\"user\")\n collection.delete_one({\"name\": name})\n return True"
]
| [
"0.7692403",
"0.7642171",
"0.7386278",
"0.7218295",
"0.69013256",
"0.6894429",
"0.6786851",
"0.6780384",
"0.6767511",
"0.66952723",
"0.66802424",
"0.66753626",
"0.663638",
"0.66360664",
"0.6624687",
"0.65962803",
"0.6593279",
"0.65769345",
"0.65746725",
"0.6572516",
"0.65620494",
"0.65520096",
"0.6538418",
"0.65315956",
"0.65108615",
"0.64959466",
"0.6488288",
"0.6481205",
"0.6460259",
"0.6457153"
]
| 0.7751427 | 0 |
Merge two sorted lists. Returns a new sorted list containing all of the elements that are in both list1 and list2. This function can be iterative. | def merge(list1, list2):
answer = []
assert answer == sorted(answer)
idx1 = 0
idx2 = 0
while (idx1 < len(list1)) and (idx2 < len(list2)):
if list1[idx1] < list2[idx2]:
answer.append(list1[idx1])
idx1 += 1
elif list1[idx1] > list2[idx2]:
answer.append(list2[idx2])
idx2 += 1
else:
answer.append(list1[idx1])
answer.append(list2[idx2])
idx1 += 1
idx2 += 1
assert answer == sorted(answer)
answer.extend(list1[idx1:])
answer.extend(list2[idx2:])
assert answer == sorted(answer)
return answer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result",
"def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list",
"def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output",
"def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res",
"def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2",
"def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged",
"def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list",
"def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results",
"def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]",
"def merge(l1, l2):\n i = j = 0\n output = []\n\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n output.append(l1[i])\n i += 1\n else:\n output.append(l2[j])\n j += 1\n\n output.extend(l1[i:] + l2[j:])\n\n return output",
"def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list",
"def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result",
"def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list",
"def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list",
"def merge(list1, list2):\n merge_list = []\n l1_copy = list(list1)\n l2_copy = list(list2)\n\n # cycling through list1 and list2: we check the first element in\n # list2, if it's smaller than the first element in list1 we copy it to\n # the merge list and pop it out of list2. Else we break the loop and\n # copy the first element of list1, then pop it and proceed again\n while l1_copy:\n while l2_copy:\n if l2_copy[0] < l1_copy[0]:\n merge_list.append(l2_copy[0])\n l2_copy.pop(0)\n else:\n break\n merge_list.append(l1_copy[0])\n l1_copy.pop(0)\n\n # if list2 is not empty once list1 is, add the remaining elements to the\n # end of the merge list\n if l2_copy:\n merge_list.extend(l2_copy)\n\n return merge_list",
"def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li",
"def merge_lists(l1, l2):\n return [ *l1, *l2 ]",
"def _merge_two_sorted_list(sorted_list_head, sorted_list_tail):\n sorted_list_result = list()\n head_index = 0\n tail_index = 0\n len_head = len(sorted_list_head)\n len_tail = len(sorted_list_tail)\n\n while head_index < len_head and tail_index < len_tail:\n print(sorted_list_head, ' : ', sorted_list_tail)\n if sorted_list_head[head_index] < sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n head_index += 1\n elif sorted_list_head[head_index] > sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_tail[tail_index])\n tail_index += 1\n elif sorted_list_head[head_index] == sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n sorted_list_result.append(sorted_list_tail[tail_index])\n head_index += 1\n tail_index += 1\n\n if head_index < len_head:\n sorted_list_result.extend(sorted_list_head[head_index:])\n elif tail_index < len_tail:\n sorted_list_result.extend(sorted_list_tail[tail_index:])\n\n return sorted_list_result",
"def merge(l1, l2):\n\n #Reverse the lists\n l1 = list(reversed(l1))\n l2 = list(reversed(l2))\n\n ret = []\n\n while True:\n # If either list is empty, reverse the other one and append it to the end\n if not l1:\n ret.extend(reversed(l2))\n return ret\n if not l2:\n ret.extend(reversed(l1))\n return ret\n\n # Append the lowest last element of the two lists\n ret.append(l1.pop() if l1[-1] < l2[-1] else l2.pop())",
"def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list",
"def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1",
"def merge(lst1, lst2):\n if not lst1 or not lst2:\n return lst1 + lst2\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])",
"def merge(l1, l2):\n # Edge cases, where nothing is to be done.\n if l1 is None and l2 is None: return l1\n if l1 is None: return l2\n if l2 is None: return l1\n\n # Vars to hold,\n # head -> a dummy head to keep a reference to the start of the merged\n # list.\n # _iter -> to move through the merged list.\n head = ListNode(float('-inf'))\n _iter = head\n\n # As long as both the lists are not exhausted,\n while l1 and l2:\n\n # Make the next of _iter as the smaller node.\n if l1.val <= l2.val:\n _iter.next = l1\n l1 = l1.next\n else:\n _iter.next = l2\n l2 = l2.next\n # Move _iter forward.\n _iter = _iter.next\n\n # If either of the lists remain, add them to the end,\n # Note: at-least one of the lists would be exhausted by now,\n # and the remaining one is sorted in itself, which is why this works.\n if not l1: _iter.next = l2\n if not l2: _iter.next = l1\n\n # Return a reference to the start of the merged list.\n return head.next",
"def merge(left, right):\n\n # Initializing pointers.\n leftPtr = 0\n rightPtr = 0\n result = []\n\n # Merging and sorting two sublists.\n while leftPtr < len(left) and rightPtr < len(right):\n if left[leftPtr][0] < right[rightPtr][0] or \\\n (left[leftPtr][0] == right[rightPtr][0] and left[leftPtr][1] < right[rightPtr][1]):\n result.append(left[leftPtr])\n leftPtr += 1\n else:\n result.append(right[rightPtr])\n rightPtr += 1\n\n # Extending the leftover in the sublists.\n if leftPtr < len(left):\n result.extend(left[leftPtr:])\n elif rightPtr < len(right):\n result.extend(right[rightPtr:])\n\n return result",
"def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result",
"def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output",
"def merge_two_sorted_lists(lst1, lst2):\n\n dummy_head = tail = ListNode() # head and tail start pointing to the same dummy node, then tail converges\n while lst1 and lst2:\n if lst1.data < lst2.data:\n tail.next = lst1 # the FIRST tail.next node is where the actual merge begins\n lst1 = lst1.next\n else:\n tail.next = lst2\n lst2 = lst2.next\n tail = tail.next\n # append the remaining nodes of list 1 or list 2\n tail.next = lst1 or lst2 # when one list becomes None, the 'or' returns the remaining nodes of the other\n return dummy_head.next # dummy_head.next is the node appended with the FIRST tail.next statement",
"def merge(first_list, second_list):\r\n result_list = []\r\n\r\n def check_for_group():\r\n \"\"\"Inner function,so that it has access to merges' local variables,\r\n that checks for groups\"\"\"\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False\r\n\r\n while first_list and second_list:\r\n if first_list[0] > second_list[0]:\r\n if not check_for_group():\r\n result_list.append(second_list[0])\r\n second_list.remove(second_list[0])\r\n else:\r\n if not check_for_group():\r\n result_list.append(first_list[0])\r\n first_list.remove(first_list[0])\r\n empty_lists(first_list, second_list, result_list)\r\n return result_list",
"def merge(l1,l2):\n\n result = []\n\n while l1 and l2:\n if l1[0] < l2[0]:\n result.append(l1.pop(0))\n else:\n result.append(l2.pop(0))\n\n while l1:\n result.append(l1.pop(0))\n\n while l2:\n result.append(l2.pop(0)) \n\n return result",
"def merge(left, right):\n aList = []\n lt = 0\n rt = 0\n\n #Repeatedly move the smallest of left and right to the new list\n while lt < len(left) and rt < len(right):\n if left[lt] < right[rt]:\n aList.append(left[lt])\n lt += 1\n else:\n aList.append(right[rt])\n rt += 1\n\n #There will only be elements left in one of the original two lists.\n\n #Append the remains of left (lt..end) on to the new list.\n while lt < len(left):\n aList.append(left[lt])\n lt += 1\n \n #Append the remains of right (rt..end) on to the new list.\n while rt < len(right):\n aList.append(right[rt])\n rt += 1\n\n return aList"
]
| [
"0.83323663",
"0.8261311",
"0.8246922",
"0.8237985",
"0.82039124",
"0.81994414",
"0.81919336",
"0.8135797",
"0.81266",
"0.81188804",
"0.80913657",
"0.8061905",
"0.7968006",
"0.79673326",
"0.7962454",
"0.7873351",
"0.7739397",
"0.7723716",
"0.7709938",
"0.7707734",
"0.76907146",
"0.76156855",
"0.7576954",
"0.75732195",
"0.7542789",
"0.74996483",
"0.74935913",
"0.74731165",
"0.74669826",
"0.7443875"
]
| 0.8383289 | 0 |
Override initializer for Grid, add queue to store boundary of fire | def __init__(self, grid_height, grid_width):
poc_grid.Grid.__init__(self, grid_height, grid_width)
self._fire_boundary = poc_queue.Queue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_init(self, queue=None, **kwargs):\n self.queue = queue if queue else Queue()",
"def __init__(self) -> None:\n self._queue = []",
"def __init__(self):\n self._grid = [[None]]",
"def __init__(self):\n self.queue = Queue()",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n\n super(VirtualTimeScheduler, self).__init__()\n self.event_queue = Queue.PriorityQueue()",
"def __init__(self):\r\n self.queue = []",
"def __init__(self):\r\n self.queue = []",
"def __init__(self):\n self._queue_items = []",
"def __init__(self):\n self.queues=[]",
"def __init__(self):\n self.l_queue, self.r_queue = [], []",
"def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data",
"def __init__(self):\n Queue.__init__(self)",
"def __post_init__(self) -> None:\n self.gtex += [None]\n self.bm += [None]\n self._q: queue.Queue = queue.Queue(maxsize=self.maxsize)",
"def __init__(self): \n self.queue = []",
"def __init__(self):\n self.push_queue = []\n self.pop_queue = []",
"def __init__(\n self, grid, mean_fire_recurrence=1.0, shape_parameter=3.5, scale_parameter=None\n ):\n super().__init__(grid)\n self._mean_fire_recurrence = mean_fire_recurrence\n\n self._shape_parameter = shape_parameter\n\n if scale_parameter is None:\n self.get_scale_parameter()\n\n else:\n self._scale_parameter = scale_parameter",
"def __init__(self, queue: Queue):\n super().__init__()\n self._cursor = 0\n self._queue = queue\n self._all_transferred = threading.Event()",
"def __init__(self, grid, location):\n self.grid = grid\n self.location = location # Tuple containing (x, y) coordinates.",
"def __init__( self ):\n\n self.__grid = list(range(1,10))\n self.__drawnGrid = 0",
"def __init__(self, initial_grid):\n part_1.Grid.__init__(self, initial_grid)\n self.turn_on_corners()",
"def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1,\n width=50, height=50):\n self.origin_x = origin_x\n self.origin_y = origin_y\n self.resolution = resolution\n self.width = width\n self.height = height\n self.grid = np.zeros((height, width))",
"def __init__(self, queue_id=None):\n super().__init__()\n self.queue_id = queue_id",
"def __init__(self):\n self.data = Queue()",
"def __init__(self):\n # NOTE: DO not modify this method\n self.items = SimpleQueue()",
"def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols"
]
| [
"0.6840169",
"0.6793105",
"0.6785803",
"0.67526734",
"0.6619239",
"0.6619239",
"0.6619239",
"0.6619239",
"0.6619239",
"0.6547735",
"0.653138",
"0.653138",
"0.65267134",
"0.65173596",
"0.6460772",
"0.64500934",
"0.64124566",
"0.64041156",
"0.63946074",
"0.6388912",
"0.63709074",
"0.6337994",
"0.633792",
"0.6330944",
"0.63199544",
"0.62859887",
"0.6244523",
"0.6206802",
"0.6206394",
"0.6204063"
]
| 0.82730323 | 0 |
Return the size of the boundary of the fire | def boundary_size(self):
return len(self._fire_boundary) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def length(self):\n return pyvista.Box(self.bounds).length",
"def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])",
"def size(self):\n bbox = self.bbox\n return bbox[1] - bbox[0]",
"def length(self):\n return float(np.max([self.width(),self.height()]))",
"def get_dimensions(self):\n x = max(self.bodies, key=lambda p: p.position[0]).position[0]\n y = max(self.bodies, key=lambda p: p.position[1]).position[1]\n return max(x, y) * 1.2",
"def get_size(self):\n return self._surf.get_size()",
"def getLength(self):\n return self.geometry.length",
"def area(self):\n return self.__size ** 2",
"def w(self):\r\n return self.size.x",
"def boundaries_size(*args):\n return _ida_hexrays.boundaries_size(*args)",
"def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()",
"def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1",
"def area(self):\n return self.__size ** 2",
"def area(self):\n return self.__size ** 2",
"def area(self):\n return self.__size ** 2",
"def area(self):\n return self.__size ** 2",
"def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)",
"def area(self):\n return (self.__size ** 2)",
"def area(self):\n return (self.__size ** 2)",
"def area(self):\n return (self.__size ** 2)",
"def pointsize(self):\n\treturn self.m_pointsize",
"def size(self) -> Point:\n\t\treturn self._size",
"def __len__(self) -> int:\n return self.width * self.height",
"def getWidth(self):\n\t\tif (self.position==[]):\n\t\t\treturn 0\n\t\treturn abs(self.position[1][0]-self.position[0][0])",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width"
]
| [
"0.73574746",
"0.71820253",
"0.71315867",
"0.70857227",
"0.7034427",
"0.7019021",
"0.6930683",
"0.6853269",
"0.6830643",
"0.67914754",
"0.6754808",
"0.6728078",
"0.67057216",
"0.67057216",
"0.67057216",
"0.67057216",
"0.6703964",
"0.66926634",
"0.66926634",
"0.66926634",
"0.6624104",
"0.6624085",
"0.661498",
"0.66049755",
"0.6601803",
"0.6601803",
"0.6601803",
"0.6601803",
"0.6601803",
"0.6601803"
]
| 0.811731 | 0 |
Generator for the boundary of the fire | def fire_boundary(self):
for cell in self._fire_boundary:
yield cell
# alternative syntax
#return (cell for cell in self._fire_boundary) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundary(self): # -> BaseGeometry:\n ...",
"def boundaries_next(*args):\n return _ida_hexrays.boundaries_next(*args)",
"def make_boundaries(self):\n p = self.project\n c = p[0]\n outlet = p.NewOutlet('GW', c.x, c.y, c.z - c.soildepth)\n cmf.FreeDrainagePercolation(c.layers[-1], outlet)\n rainfall = cmf.timeseries.from_sequence(self.starttime, cmf.day, [25, 0, 0, 0, 0, 0, 0] * 200)\n p.rainfall_stations.add('Heavy rain once a week', rainfall, (0, 0, 0))\n print(cmf.describe(p.rainfall_stations))\n p.use_nearest_rainfall()\n\n return outlet",
"def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])",
"def closure(self):\n return self + self.boundary",
"def interior(self):\n return self - self.boundary",
"def dirichlet_boundary(self,u,intStep = None):\n return u",
"def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ",
"def boundary(self):\n return self.substrates.boundary",
"def old_data(self) -> Generator[Tuple[int, int], Tuple[bool, bool], None]:\n x = self.x_start\n y = self.y_start\n yield x, y\n while True:\n x += self.x_rate\n y += self.y_rate\n # Get whether the object has collided with a wall\n horizontal_hit, vertical_hit = yield int(x), int(y)\n # Simply reverse the x/y rate depending on the wall type\n # TODO: could add a little variance in the angle with every bounce\n if horizontal_hit:\n self.y_rate *= -1\n if vertical_hit:\n self.x_rate *= -1",
"def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save",
"def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1",
"def irr_boundary_as_space(self):\n p1,p2 = self.next_marks()\n \n return Mgn(self.genus - 1, self.marks.union([p1,p2])), p1, p2",
"def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)",
"def boundary_invariant(self):\n for cell in self.fire_boundary():\n if self.is_empty(cell[0], cell[1]):\n print \"Cell \" + str(cell) + \" in fire boundary is empty.\"\n return False\n return True",
"def boundary(self):\n\n\t\tif not hasattr(self,\"_hessian_boundary\"):\n\t\t\tself.maskBoundaries()\n\n\t\treturn self._hessian_boundary",
"def propagation(self,map):\n near_cells = self.get_near(map)\n \n #fire spreading\n burnable = [] #list of burnable cells\n for cell in near_cells:\n if(cell.nat != 0 and cell.state == 0): #conditions to burn a cell\n burnable.append(cell)\n \n if(self.nat == 2): #spread faster if it's a forest\n n = rdm.randint(0,(self.state*2)) #n: number of cells to burn, n < 9\n if n>8: n=8\n else: n = rdm.randint(0,self.state)\n \n if map.wind_active: \n for i in range(n):\n \n #creating the list in which the choice is made (changing probability according to the wind direction)\n indexes=[]\n for ce in burnable:\n \n if map.wind==0:\n if ce.y > self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y == self.y:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==4:\n if ce.y < self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y== self.y: \n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==2:\n if ce.x > self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==6:\n if ce.x < self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest \n elif map.wind==1:\n if ce.y >= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce)) \n\n elif map.wind==3:\n if ce.y <= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce)) \n \n elif map.wind==5:\n if ce.y <= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce))\n \n elif map.wind==7:\n if ce.y >= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce))\n \n \n if len(indexes)>0:\n r = rdm.choice(indexes) #choose randoly the cell, among the availables, with weight\n cell = near_cells[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n\n\n #without the wind active\n else:\n if n>=len(burnable): #if n is greater than the number of burnable cells, they are all burned\n for cell in burnable:\n cell.state = 1\n map.burn_list.append(cell) #add cell to burn_list\n else: \n for i in range(n):\n r = rdm.randint(0,len(burnable)-1) #choose randoly the cell, among the availables\n cell = burnable[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n \n #fire intensity growing \n if(self.nat == 3): #burn faster if it's a house\n self.state += 2\n else:\n self.state += 1\n \n if(self.state > 5): #if it's burned\n self.charred = True\n self.state = 1\n map.burn_list.remove(self) #burned cells are removed form the burn_list",
"def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)",
"def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]",
"def border_function_generator(self, stencil):\n\n def is_on_border(indice):\n for i in range(self.dim):\n if indice[0] < stencil.b[0][0] or indice[0] >= self.mid.shape[0]+stencil.b[0][0]:\n return True\n return is_on_border",
"def detect_boundary(self, x, l_old):\n pass",
"def boundaryPosition(self, gameState):\n myState = gameState.getAgentState(self.index)\n myPosition = myState.getPosition()\n boundaries = []\n if self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0], i[1]):\n validPositions.append(i)\n return validPositions",
"def edges(self):\n\t\tleftback = self.center + self.left*self.wr - self.forward*self.hr\n\t\tleftfront = self.center + self.left*self.wr + self.forward*self.hr\n\t\trightfront = self.center - self.left*self.wr + self.forward*self.hr\n\t\trightback = self.center - self.left*self.wr - self.forward*self.hr\n\t\tyield (leftback, leftfront)\n\t\tyield (leftfront, rightfront)\n\t\tyield (rightfront, rightback)\n\t\tyield (rightback, leftback)",
"def iter_edges(self):\n yield self.upperleft, self.upperright\n yield self.upperright, self.bottomright\n yield self.bottomright, self.bottomleft\n yield self.bottomleft, self.upperleft",
"def iter_edges(self):\n yield self.upperleft, self.upperright\n yield self.upperright, self.bottomright\n yield self.bottomright, self.bottomleft\n yield self.bottomleft, self.upperleft",
"def boundary(active, objects):\n limit = SIZE[1]\n for obj in objects:\n if active.pos_x == obj.pos_x:\n limit = min(limit, obj.pos_y)\n active.pos_y = limit-active.height\n active.col_d = True",
"def dequeue_boundary(self):\n return self._fire_boundary.dequeue()",
"def bounds(self, pos):",
"def boundaries_begin(*args):\n return _ida_hexrays.boundaries_begin(*args)",
"def compute_bb(self) -> List[float]:\n all_shapes = list(self.parts.values())\n bbox_vertices = unary_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x, min_y, max_y]"
]
| [
"0.66496694",
"0.59829056",
"0.5887077",
"0.58223045",
"0.5735941",
"0.57314575",
"0.5677762",
"0.5677131",
"0.5668234",
"0.56558734",
"0.56557745",
"0.5644102",
"0.56180614",
"0.5571235",
"0.55560315",
"0.5545058",
"0.55429775",
"0.5520432",
"0.5505475",
"0.5489425",
"0.5470334",
"0.5459626",
"0.5454275",
"0.5442501",
"0.5442501",
"0.54280293",
"0.54116625",
"0.5407034",
"0.5399658",
"0.53939605"
]
| 0.7614882 | 0 |
Function that spreads the wild fire using one step of BFS Updates both the cells and the fire_boundary | def update_boundary(self):
cell = self._fire_boundary.dequeue()
neighbors = self.four_neighbors(cell[0], cell[1])
#neighbors = self.eight_neighbors(cell[0], cell[1])
for neighbor in neighbors:
if self.is_empty(neighbor[0], neighbor[1]):
self.set_full(neighbor[0], neighbor[1])
self._fire_boundary.enqueue(neighbor)
# Check class invariant after update
assert self.boundary_invariant() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()",
"def simulate_fire(self, FGPathway_object, location, weather, supr_decision):\n\n if len(weather) == 0:\n print(\"weather is empty\")\n\n ######################################\n # Initialize the fire sequence\n ######################################\n current_day = 0\n current_time = 0.0\n cells_burned = 0\n cells_crowned = 0\n spreading = False\n loc = location[:]\n sppr_dec = supr_decision\n\n # 1) is this fire going to spread at all?\n init_ignitions = self.get_neighbor_ignitions(FGPathway_object, loc, weather[current_day], sppr_dec)\n for ign in init_ignitions:\n #check to see if any of the spread rates are greater than zero.\n if ign[0] > 0:\n spreading = True\n break\n\n\n if not spreading:\n fr = FireRecord()\n fr.ignition_location=location[:]\n fr.weather = [weather[0]] #only including the first day of weather\n fr.suppressed = sppr_dec\n fr.note = \"non-spreading\"\n return fr\n\n #initialize the priority queue\n pq = Queue.PriorityQueue()\n\n #initialize the burn maps\n burn_map = np.zeros(FGPathway_object.size, dtype='int')\n crown_burn_map = np.zeros(FGPathway_object.size, dtype='int')\n\n #add the initial ignition location to the queue\n pq.put((current_time, loc[0], loc[1]))\n\n #the weather stream is arranged by day, so if the weather model gave us 4 days\n # to spread a fire (before a fire-ending weather event), it will have length 4.\n #We want to spread a fire for four whole days, starting at time = 0, and ending\n # at time = 3.9999, for a total of 4 \"time units\" which is represents four days.\n max_time = len(weather)\n\n #start the loop, and continue while there's anything queued, or until time expires\n while (current_time < max_time) and not (pq.empty()):\n #get the next queue item\n current_ign = pq.get()\n loc = (current_ign[1], current_ign[2])\n\n\n #check if the location is out of bounds, and if so, ignore this point\n if ( loc[0] < 0 ) or (loc[0] >= FGPathway_object.size[0]):\n continue\n if ( loc[1] < 0 ) or (loc[1] >= FGPathway_object.size[1]):\n continue\n\n\n #increment current time to this cell's ignition time. This can allow a single\n # ignition to go beyond the max time, so, check the index, since the weather\n # stream will not have a day for that index.\n current_time = current_ign[0]\n #adjust day, if needed\n if current_time - current_day >= 1.0:\n current_day += 1\n if current_day >= len(weather): break\n\n #check to see if this cell has already been burned\n if burn_map[loc[0], loc[1]]:\n #it's already burned in a previous step, so lets move on\n continue\n \n #we haven't 'continued', so this cell hasn't burned yet.\n # a) update the burn map\n burn_map[loc[0], loc[1]] = 1\n cells_burned += 1\n\n # b) check for crown fire, and if necessary, update the crown burn map\n crowned = False\n if self.get_crown_burn(FGPathway_object, loc, weather[current_day], sppr_dec):\n crown_burn_map[loc[0], loc[1]] = 1\n cells_crowned += 1\n crowned = True\n \n # c) get the neighbor ignitions\n n_igns = self.get_neighbor_ignitions(FGPathway_object, loc, weather[current_day], sppr_dec)\n\n # d) add ignitions to the priority queue\n for ign in n_igns:\n #if the spread rate is other than zero\n if ign[0] > 0:\n pq.put(ign)\n\n # e) update the pathway's data to reflect what happened\n self.update_cell(FGPathway_object, loc, burned=True, crowned=crowned)\n\n\n #all done with the queue, so either we ran out of new cells, or the time expired\n fr = FireRecord()\n fr.acres_burned = cells_burned * FGPathway_object.acres_per_cell\n fr.acres_crown_burned = cells_crowned * FGPathway_object.acres_per_cell\n fr.weather = weather[:]\n fr.suppressed = sppr_dec\n fr.ignition_location = location[:]\n\n #save the maps, if desired\n if self.SAVE_BURN_MAPS:\n fr.burn_map = burn_map\n fr.crown_burn_map = crown_burn_map\n\n fr.suppression_cost = self.calc_suppression_cost(fr)\n\n return fr",
"def propagation(self,map):\n near_cells = self.get_near(map)\n \n #fire spreading\n burnable = [] #list of burnable cells\n for cell in near_cells:\n if(cell.nat != 0 and cell.state == 0): #conditions to burn a cell\n burnable.append(cell)\n \n if(self.nat == 2): #spread faster if it's a forest\n n = rdm.randint(0,(self.state*2)) #n: number of cells to burn, n < 9\n if n>8: n=8\n else: n = rdm.randint(0,self.state)\n \n if map.wind_active: \n for i in range(n):\n \n #creating the list in which the choice is made (changing probability according to the wind direction)\n indexes=[]\n for ce in burnable:\n \n if map.wind==0:\n if ce.y > self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y == self.y:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==4:\n if ce.y < self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y== self.y: \n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==2:\n if ce.x > self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==6:\n if ce.x < self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest \n elif map.wind==1:\n if ce.y >= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce)) \n\n elif map.wind==3:\n if ce.y <= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce)) \n \n elif map.wind==5:\n if ce.y <= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce))\n \n elif map.wind==7:\n if ce.y >= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce))\n \n \n if len(indexes)>0:\n r = rdm.choice(indexes) #choose randoly the cell, among the availables, with weight\n cell = near_cells[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n\n\n #without the wind active\n else:\n if n>=len(burnable): #if n is greater than the number of burnable cells, they are all burned\n for cell in burnable:\n cell.state = 1\n map.burn_list.append(cell) #add cell to burn_list\n else: \n for i in range(n):\n r = rdm.randint(0,len(burnable)-1) #choose randoly the cell, among the availables\n cell = burnable[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n \n #fire intensity growing \n if(self.nat == 3): #burn faster if it's a house\n self.state += 2\n else:\n self.state += 1\n \n if(self.state > 5): #if it's burned\n self.charred = True\n self.state = 1\n map.burn_list.remove(self) #burned cells are removed form the burn_list",
"def bfs(self, queue, target, targetx,\n targety): # finds BFS path to the finish. if there is no path, will return nothing\n\n '''\n 1. So we have a parent matrix\n 2. This records the parent\n 3. We have a dictionary of cell: parents'''\n if self.map1[queue[0][0]][queue[0][1]] == target:\n return [1]\n\n thisset = {queue[0]}\n traceSet = {queue[0]: None}\n\n flag = False # variable to see if it is possible to reach the goal\n while queue:\n fringe = queue.pop(0) # gets 0, 0 first\n adjs = self.getAdj(fringe[0], fringe[1])\n\n if self.map1[fringe[0]][fringe[1]] == 2:\n print(\"Our attempt has started\")\n\n if self.map1[fringe[0]][fringe[1]] == target:\n print(\"Goal reached\")\n print(\"This is how you go about it\")\n # print(traceSet)\n ans = self.trace(traceSet, targetx, targety)\n path = self.savePath(ans)\n flag = True\n # print(ans.pop())\n break\n\n if self.map1[fringe[0]][fringe[1]] == 0 or self.map1[fringe[0]][fringe[1]] == 3 or self.map1[fringe[0]][fringe[1]] == 4:\n continue\n\n for i in range(len(adjs)):\n if self.legal(adjs[i][0], adjs[i][1]):\n if adjs[i] in thisset:\n continue\n\n thisset.add(adjs[i])\n traceSet[adjs[i]] = fringe\n queue.append(adjs[i])\n if flag is False:\n print(\"No way to goal\")\n return []\n return path",
"def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret",
"def fire_strategy_2(maze, q):\n\n n = len(maze)\n\n timestep = 0\n maze_f = copy.deepcopy(maze)\n agent_pos = (0, 0)\n\n while(agent_pos != (n-1, n-1)):\n\n path = BFS(maze_f, agent_pos, (n-1, n-1))\n\n # End if no path exists from start to goal\n if(not path[0]):\n return False\n\n route = path[1]\n timestep += 1 # increase timer by 1\n agent_pos = route[1] # update to new position\n\n # if agent moves into fire, report failure\n if(maze_f[agent_pos[0]][agent_pos[1]] != 0):\n return False\n\n # if agent has reached goal, report success\n if(agent_pos == (n-1, n-1)):\n return True\n\n maze_f = advance_fire_one_step(maze_f, q) # advance fire\n\n # if fire spread into agent, report failure\n if(maze_f[agent_pos[0]][agent_pos[1]] != 0):\n return False\n\n # function should always return before while loop is completed\n return False",
"def _sweepDir(self, f):\n lastCellFaceVal = np.zeros((self.cells[0].nG, self.cells[0].sNords))\n if f == 2:\n cellList = reversed(self.cells)\n else:\n cellList = self.cells\n blowoff = False\n for cell in cellList:\n if hasattr(cell, 'boundaryCond') and not blowoff:\n cell.applyBC(self.depth)\n blowoff = True\n else:\n # Interior cell\n cell.ordFlux[:, f, :] = lastCellFaceVal[:, :]\n # Only sweep through ordinates that have a component in same direction as\n # current sweep dir.\n dotDir = cell.sNmu * cell.faceNormals[f - 1]\n ordsInSweepDir = np.where(dotDir < 0.)\n for o in np.arange(cell.sNords)[ordsInSweepDir]:\n cell.ordFlux[:, 0, o] = (cell.ordFlux[:, f, o] + self.deltaX * cell.qin[:, 0, o] / (2. * np.abs(cell.sNmu[o]))) / \\\n (1. + self.totalXs * self.deltaX / (2. * np.abs(cell.sNmu[o])))\n if f == 1:\n cell.ordFlux[:, 2, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 2, o]\n elif f == 2:\n cell.ordFlux[:, 1, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 1, o]\n if np.any(cell.ordFlux[:, :, :] < 0.0):\n #print(\"WARNING: Negative flux detected! Refine mesh.\")\n maxStepSize = 2. * np.min(np.abs(cell.sNmu)) * min(1. / self.totalXs)\n #print(\"Max Step size in 1D: \" + str(maxStepSize))\n # refineFactor = 2. # TODO: compute refinement factor on the fly\n #raise Exception('coarse', refineFactor)",
"def bfs(self, starting_vertex, destination_vertex):\n pass # TODO",
"def _sweepDir(self, f):\n lastCellFaceVal = np.zeros((self.regions[0].cells[0].nG,\n self.regions[0].cells[0].sNords))\n if f == 2:\n sweepTree = reversed(self.sweepTree)\n else:\n sweepTree = self.sweepTree\n blowoff = False\n for j, i in sweepTree:\n cell = self.regions[j].cells[i]\n if hasattr(cell, 'boundaryCond') and not blowoff:\n cell.applyBC(self.depth)\n blowoff = True\n else:\n # Interior cell\n cell.ordFlux[:, f, :] = lastCellFaceVal[:, :]\n # Only sweep through ordinates that have a component in same direction as\n # current sweep dir. Filter ords by dot product\n dotDir = cell.sNmu * cell.faceNormals[f - 1]\n ordsInSweepDir = np.where(dotDir < 0.)\n for o in np.arange(cell.sNords)[ordsInSweepDir]:\n cell.ordFlux[:, 0, o] = (cell.ordFlux[:, f, o] + self.regions[j].deltaX * cell.qin[:, 0, o] / (2. * np.abs(cell.sNmu[o]))) / \\\n (1. + self.regions[j].totalXs * self.regions[j].deltaX / (2. * np.abs(cell.sNmu[o])))\n if f == 1:\n cell.ordFlux[:, 2, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 2, o]\n elif f == 2:\n cell.ordFlux[:, 1, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 1, o]\n if np.any(cell.ordFlux[:, :, :] < 0.0):\n #print(\"WARNING: Negative flux detected! Refine mesh in region #:\" + str(j))\n maxStepSize = 2. * np.min(np.abs(cell.sNmu)) * min(1. / self.regions[j].totalXs)\n #print(\"Max Step size in 1D: \" + str(maxStepSize))\n # automatically gen refine factor: TODO: auto refine mesh\n # refineFactor = self.regions[j].deltaX / maxStepSize\n #raise Exception('coarse', refineFactor)",
"def fire_strategy_1(maze, q):\n n = len(maze)\n path = BFS(maze, (0, 0), (n-1, n-1))\n\n # End if no path exists from start to goal\n if(not path[0]):\n return False\n\n route = path[1]\n\n timestep = 0\n agent_pos = route[timestep]\n maze_f = copy.deepcopy(maze)\n\n while(timestep < len(route)):\n\n timestep += 1 # increase timer by 1\n agent_pos = route[timestep] # update to new position\n\n # if agent moves into fire, report failure\n if(maze_f[agent_pos[0]][agent_pos[1]] != 0):\n # print_maze(maze_f)\n #print(\"timestep \", timestep)\n return False\n\n # if agent has reached goal, report success\n if(timestep == len(route)-1):\n # print_maze(maze_f)\n #print(\"timestep \", timestep)\n return True\n\n maze_f = advance_fire_one_step(maze_f, q) # advance fire\n\n # if fire spread into agent, report failure\n if(maze_f[agent_pos[0]][agent_pos[1]] != 0):\n # print_maze(maze_f)\n #print(\"timestep \", timestep)\n return False\n\n # function should always return before while loop is completed\n return False",
"def bfs(game, game_coords):\n # *** main queue to record steps and corresponding costs ***\n queue_moves = [[game.player.row, game.player.col]]\n cost_moves = [0]\n\n # record cost and illegal moves\n cost = 1\n declined_moves = []\n\n # record the moves in the previous turn(iteration)\n last_steps = [[game.player.row, game.player.col]]\n\n # ***** Step 1: Marking game board using cost *****\n while True:\n\n # struggled in a location, loss\n if not last_steps:\n return 0, 0, 0\n\n # collect all potential moves: left, down, right, up, teleport(if possible)\n potential_steps = []\n for step in last_steps:\n potential_steps.append(left(step))\n potential_steps.append(down(step))\n potential_steps.append(right(step))\n potential_steps.append(up(step))\n\n if search_coords(game_coords, step) in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n potential_steps.append(step)\n\n current_steps = []\n for step in potential_steps:\n if step in declined_moves:\n continue\n elif step in queue_moves:\n # the step existed in main queue, replace it if cost is lower, otherwise skip\n if cost >= cost_moves[queue_moves.index(step)]:\n if step != queue_moves[-1]:\n continue\n\n # check if move is legal\n will_move = step\n item = search_coords(game_coords, will_move)\n\n if item == '*' or item == -1:\n declined_moves.append(will_move)\n continue\n\n elif item == 'W':\n game.player.num_water_buckets += 1\n\n for i in range(len(game_coords['W'])):\n # water picked up, set current display from 'W' to ' ' in game_coords\n if game_coords['W'][i] == will_move:\n game_coords['W'].pop(i)\n game_coords[' '].append(will_move)\n break\n\n elif item == 'F':\n if game.player.num_water_buckets < 1:\n # cannot put out fire, refuse this move :(\n declined_moves.append(will_move)\n continue\n\n game.player.num_water_buckets -= 1\n elif item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n if coords != will_move:\n will_move = coords\n break\n\n current_steps.append(will_move)\n\n # append to main queue\n queue_moves.append(will_move)\n cost_moves.append(cost)\n\n cost += 1\n\n # reach end point\n if game_coords['Y'][0] in current_steps:\n break\n\n # last_steps <- current_steps\n last_steps = []\n last_steps.extend(current_steps)\n\n cost -= 1\n\n # ***** Step 2: recall through main queue to generate a path *****\n # *** Queue: last in first out ***\n recall_moves = queue_moves[::-1]\n recall_cost = cost_moves[::-1]\n cursor = recall_moves[0]\n\n # generated path\n route = []\n\n # 'action to cmd' translator\n action_map = {(1, 0): 'w', (-1, 0): 's', (0, 1): 'a', (0, -1): 'd'}\n\n for i in range(len(recall_moves)):\n if recall_cost[i] == cost - 1:\n x, y = coords_sub(recall_moves[i], cursor)\n\n # simple move: left, down, right, up\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport move\n elif teleport_pair(cursor, game_coords) != -1:\n pair = teleport_pair(cursor, game_coords)\n x, y = coords_sub(recall_moves[i], pair)\n\n # teleport after simple move\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport after no move ('e')\n elif abs(x) + abs(y) == 0:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, 'e')\n\n # convert list of paths to string\n trace = ''\n for action in route:\n trace += action + ', '\n\n return 1, cost_moves[-1], trace",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()",
"def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)",
"def fire_boundary(self):\n for cell in self._fire_boundary:\n yield cell\n # alternative syntax\n #return (cell for cell in self._fire_boundary)",
"def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')",
"def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path",
"def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal",
"def updateFCFS_queue(self, junc):\n for tl_combination in junc.tl_combinations:\n for lane in tl_combination.corresponding_lanes:\n for vehicle in traci.lane.getLastStepVehicleIDs(lane.ID):\n junc.FCFS_queue[vehicle] = tl_combination.ryg_state",
"def bfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = deque([s])\n while len(boundary) > 0:\n v = boundary.popleft()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop(0)\n visited += [v]\n for w in neighbours(v, graph):\n if w not in goals:\n if w not in visited and w not in boundary:\n boundary.append(w)\n else:\n if w not in visited and w not in boundary:\n boundary.append(w)\n v = boundary.pop(0)\n visited += [v]\n break\n return visited\n \"\"\"",
"def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy",
"def run_model(f_grid, h_grid, i_threshold, w_direction, burn_seeds):\n \n burnt_cells = burn_seeds\n \n # a list of all the cells to iterate over\n cell_list = []\n for i in range(len(f_grid)):\n for j in range(len(f_grid)):\n cell = (i, j)\n cell_list.append(cell)\n \n # create a mutable burning grid to be refered to in check_ignition function\n b_grid = []\n for i in range(len(f_grid)):\n b_grid.append([])\n for j in range(len(f_grid)):\n b_grid[i].append(False)\n for cell in cell_list:\n if cell in burn_seeds:\n b_grid[cell[0]][cell[1]] = True\n \n \n while test_bool(b_grid) is True:\n \n # lists for how the cells are currently behaving so that next_t and \n # check ignition can iterate through the same values for every cell in \n # each time frame\n current_fuel = copy_list(f_grid)\n current_burning = copy_list(b_grid)\n \n # generate scenario in the next time frame\n next_t(cell_list, current_burning, b_grid, current_fuel, f_grid, \n h_grid, i_threshold, w_direction, burnt_cells)\n \n return f_grid, len(burnt_cells)",
"def pull():\r\n\t\tglobal bodies, counter\r\n\r\n\t\tfor n, b in bodies:\r\n\t\t\tl = list(b.getPosition())\r\n\t\t\tscalp (l, -1000 / length(l))\r\n\t\t\tb.addForce(l)\r\n\t\t\tif counter%60 == 0:\r\n\t\t\t\tb.addForce((0, 10000, 0))",
"def bfs(maze):\n # TODO: Write your code here\n q = queue.Queue()\n q.put(maze.getStart())\n traversed = []\n path = []\n tracker = {maze.getStart(): None} #Tracker needs to contain tuples\n\n while q:\n curr_loc = q.get() \n\n if curr_loc not in traversed: #Add to traversed points list\n traversed.append(curr_loc)\n\n if maze.isObjective(curr_loc[0], curr_loc[1]): #Reached end of maze\n finished = curr_loc \n break\n\n nextpath = maze.getNeighbors(curr_loc[0], curr_loc[1]) #Search neighbor points\n for point in nextpath:\n if point not in traversed and maze.isValidMove(point[0], point[1]):\n q.put(point)\n tracker[point] = curr_loc #Update curr_loc\n\n while finished:\n path.insert(0, finished) \n finished = tracker[finished]\n\n return path",
"def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)",
"def bfs(graph, start, goal):\n\n final = []\n agenda = [[start]]\n\n # Process node queue\n while agenda:\n path = agenda.pop(0)\n\n # Exit if a path is found which reaches the goal\n if path[-1] == goal:\n final = path\n break\n\n # Push the new paths onto the queue\n connected = graph.get_connected_nodes(path[-1])\n for node in connected:\n # Ignore previously visited nodes\n if node not in path:\n agenda.append(path + [node])\n\n # Return the final path or initial empty list\n return final",
"def group_boundary_elements(self,force=False):\n if force or self._bc_groups is None:\n # This part is the same as in waq_scenario\n g=self.grid()\n if g is None:\n return super(SunHydro,self).group_boundary_elements()\n\n self.infer_2d_elements()\n\n poi=self.pointers\n bc_sel = (poi[:,0]<0)\n bc_elts = np.unique(self.seg_to_2d_element[ poi[bc_sel,1]-1 ])\n\n groups=np.zeros(self.n_2d_elements,self.group_dtype)\n groups['id']-=1\n\n gforce=forcing.GlobalForcing(sun=self.sun)\n sun_g=self.sun.grid()\n\n def node_sun_to_g(n):\n return g.select_nodes_nearest(sun_g.points[n])\n\n # map group id as returned by this method to a dict with items \n # like which shapefile did it come from, index in that shapefile,\n # and fields from the feature.\n # note that it is possible for two boundary flows to enter the same\n # cell - only the first will be marked, with the second feature\n # skipped in both groups and bc_group_mapping\n # self.bc_group_mapping={} \n ngroups=0\n\n for flow_shp in self.flow_shps:\n flows=wkb2shp.shp2geom(flow_shp)\n sun_groups=gforce.add_groups_bulk(defs=flows)\n\n for feat_id in range(len(flows)):\n grp=sun_groups[feat_id]\n if grp.cell_based():\n sun_cells=grp.cells\n cells=[]\n for cell in sun_cells:\n g_nodes=[node_sun_to_g(n)\n for n in sun_g.cells[cell]]\n cells.append( g.nodes_to_cell(g_nodes) )\n\n cells=np.array(cells)\n else:\n # for the purposes of bc_groups, figure out the\n # respective cells\n cells=[]\n for sun_e in grp.edges:\n sun_e_nodes=sun_g.edges[sun_e,:2]\n e=g.nodes_to_edge(node_sun_to_g(sun_e_nodes[0]),\n node_sun_to_g(sun_e_nodes[1]))\n assert e is not None\n cells.append(g.edge_to_cells(e))\n cells=np.array(cells)\n cells=cells[cells>=0]\n\n details=dict(flow_shp=flow_shp,\n feat_id=feat_id)\n for n in flows.dtype.names:\n details[n]=flows[n][feat_id]\n\n # limit this to cells which are not already marked, but *are*\n # in bc_elts\n cells=[c for c in cells\n if (groups['id'][c]<0) and (c in bc_elts) ] \n if len(cells):\n groups['id'][cells] = ngroups\n groups['name'][cells]=details.get('name','group %d'%ngroups)\n groups['attrs'][cells] = details\n # self.bc_group_mapping[ngroups]=details\n ngroups+=1\n else:\n self.log.warning(\"Feature %d from %s (name=%s) overlaps another flow or wasn't\" \n \" found as a boundary, \"\n \" and will be skipped\"%(feat_id,flow_shp,\n details.get('name','n/a')))\n\n # anything not marked already then gets grouped by adjacency and marked\n # the same way as before - see waq_scenario.py for more comments\n def adjacent_cells(g,c,candidates):\n a=list(g.cell_to_adjacent_boundary_cells(c))\n b=list(g.cell_to_cells(c))\n nbrs=filter(lambda cc: cc in candidates,a+b)\n return np.unique(nbrs)\n def trav(c,mark):\n groups['id'][c]=mark\n groups['name'][c]=\"group %d\"%mark\n for nbr in adjacent_cells(g,c,bc_elts):\n if groups['id'][nbr]<0:\n trav(nbr,mark)\n\n ngroups=1+groups['id'].max()\n\n for bc_elt in bc_elts:\n if groups['id'][bc_elt]<0:\n # This is the part where if there are other cells \n # which are part of the same forcing group, they should\n # all get this value\n trav(bc_elt,ngroups)\n ngroups+=1\n self._bc_groups=groups\n return self._bc_groups",
"def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)",
"def breadthFirstSearch(problem):\n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in tempSuccList:\n repeat = False\n for s in successor:\n if (s[0] == succ[0]):\n repeat = True\n if (repeat == False):\n successor.append(succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])",
"def _add_force(Fg: np.ndarray, dof_map: Dict[Tuple[int, int], int], model: BDF,\n load, offset: int, ndof_per_grid: int, cid: int=0, show_warning: bool=True):\n #cid = load.cid\n nid = load.node\n node_ref = load.node_ref\n ndofi = ndof_per_grid if node_ref.type == 'GRID' else 1\n assert ndofi == 6, f'GRID must have 6 DOF for structural analysis\\n{node_ref}'\n\n if node_ref.cd == cid:\n fglobal = load.mag * load.xyz\n elif node_ref.cd != cid:\n fbasic = load.to_global()\n if show_warning:\n model.log.warning(f'differing cid & cd is not supported; cid={cid} cd={node_ref.cd}')\n show_warning = False\n cd_ref = node_ref.cd_ref\n Tbg = cd_ref.beta()\n fglobal = _force_to_local(cd_ref, fbasic)\n\n if 0: # pragma: no cover\n if cd_ref.type[-1] in ['C', 'S']:\n ex = Tbg[0, :]\n ey = Tbg[1, :]\n #ez = Tbg[2, :]\n xyz_local = node_ref.get_position_wrt(model, node_ref.cd)\n if cd_ref.type[-1] == 'C':\n theta = radians(xyz_local[1])\n ct = cos(theta)\n st = sin(theta)\n T = np.array([\n [ct, -st, 0.],\n [st, ct, 0.],\n [0., 0., 1.],\n ])\n Tbg = Tbg @ T\n else:\n from pyNastran.bdf.cards.coordinate_systems import CORD2S\n rho, thetad, phid = xyz_local\n coord = CORD2S.add_ijk(-1, origin=cd_ref.origin, i=ex, j=ey, k=None, rid=0, comment='')\n beta = coord.beta()\n Tbg = Tbg @ beta\n coord.transform_vector_to_local([rho, thetad, phid])\n #theta = radians(xyz_local[1])\n #phi = radians(xyz_local[2])\n #ct = cos(theta)\n #st = sin(theta)\n\n #cp = cos(phi)\n #sp = sin(phi)\n\n str(xyz_local)\n else:\n # rectangular\n pass\n Tgb = Tbg.T\n fglobal = Tgb @ fbasic\n else:\n raise NotImplementedError(f'node_ref.cd={node_ref.cd} cid={cid} load:\\n{str(load)}')\n\n for dof in range(3):\n irow = dof_map[(nid, dof+offset)]\n Fg[irow] += fglobal[dof]\n return show_warning"
]
| [
"0.6349423",
"0.63270336",
"0.6020645",
"0.5898426",
"0.5608068",
"0.5601368",
"0.5590194",
"0.5546037",
"0.55457413",
"0.5523806",
"0.55145603",
"0.55054224",
"0.5476916",
"0.54490596",
"0.5432119",
"0.53815204",
"0.5355134",
"0.53444767",
"0.5340942",
"0.53302777",
"0.53175646",
"0.53147566",
"0.53057945",
"0.5301544",
"0.52692235",
"0.52681",
"0.5254753",
"0.5208442",
"0.52054936",
"0.52052206"
]
| 0.6963751 | 0 |
Class invariant that checks whether every cell on the boundary also has the corresponding grid cell set to FULL | def boundary_invariant(self):
for cell in self.fire_boundary():
if self.is_empty(cell[0], cell[1]):
print "Cell " + str(cell) + " in fire boundary is empty."
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True",
"def fullGrid(state):\n return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()",
"def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True",
"def is_boundary_cell(self,c):\n edges=self.cell_to_edges(c)\n return np.any( self.edge_to_cells()[edges] < 0 )",
"def check_grid(self) -> None:\n if not len(self.grid) == 81:\n raise ValueError(\"Grid does not have 81 elements. Aborting\")",
"def _is_valid_land(x, y, grid):\n return (x >= 0) and (x < len(grid)) and (y >= 0) and (y < len(grid[0])) and grid[x][y]",
"def update_boundary(self):\n\n cell = self._fire_boundary.dequeue()\n neighbors = self.four_neighbors(cell[0], cell[1])\n #neighbors = self.eight_neighbors(cell[0], cell[1])\n for neighbor in neighbors:\n if self.is_empty(neighbor[0], neighbor[1]):\n self.set_full(neighbor[0], neighbor[1])\n self._fire_boundary.enqueue(neighbor)\n\n # Check class invariant after update\n assert self.boundary_invariant()",
"def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True",
"def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True",
"def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True",
"def _cell_in_boundary(self, i_row, i_col):\n return ((i_row, i_col) == self._tl_cell or\n (i_row, i_col) == self._tr_cell or\n (i_row, i_col) == self._bl_cell or\n (i_row, i_col) == self._br_cell or\n (i_row, i_col) in self._ls_cells or\n (i_row, i_col) in self._rs_cells or\n (i_row, i_col) in self._ts_cells or\n (i_row, i_col) in self._bs_cells)",
"def is_full(self):\n return all(map(lambda x: x != self.CELL_EMPTY, self.__values))",
"def check_border_cells(self):\n for row, col in self.land_cells:\n if row == 1 or row == self.unique_rows[-1] or col == 1 or col == self.unique_cols[-1]:\n raise ValueError(\"Only water cells may be border cells!\")",
"def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True",
"def check_boundary(self, width, height):\r\n if 0 <= self.head[0] + self.direction[0]*10 <= width - 10 and 0 <= self.head[1] + self.direction[1]*10 <= height - 10:\r\n return True\r\n else:\r\n return False",
"def check_boundedness(self):\n if SymEq.check_boundedness(self.aMatrix,\n self.bMatrix,\n self.eqMatrix,\n SymEq.get_var_list(self.raw_expression)):\n return True\n else:\n raise Exception(\"[RectangleSet ERROR]: (Initial) Set NOT Bounded.\")",
"def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x",
"def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True",
"def row0_invariant(self, target_col):\r\n \r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[0][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n if self._grid[1][target_col] != solved_grid[1][target_col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right",
"def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True",
"def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)",
"def is_filled(self, x, y):\n if not (0 <= x and x < self.map_size[0]\n and 0 <= y and y < self.map_size[1]):\n return False\n\n # Is there something else than Floor?\n return (len(self.map.get_cell_nodes(x, y)) > 1)",
"def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True",
"def full_board( self ):\n\n for x in self.__grid:\n if isinstance(x, int):\n return False\n else:\n continue\n\n return True",
"def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny",
"def _board_is_full(self):\n return (self.get_counts()[0] + self.get_counts()[1] == self._num_rows * self._num_cols)"
]
| [
"0.71920717",
"0.70023197",
"0.67449224",
"0.66790205",
"0.6631955",
"0.6613186",
"0.6605032",
"0.6601281",
"0.6562532",
"0.65374047",
"0.6527449",
"0.6509587",
"0.6494093",
"0.64925843",
"0.64523965",
"0.6358073",
"0.63485545",
"0.63382304",
"0.631841",
"0.6313765",
"0.6303063",
"0.6300042",
"0.6297778",
"0.6288825",
"0.6282923",
"0.628269",
"0.627512",
"0.6269781",
"0.62648284",
"0.6264487"
]
| 0.7699241 | 0 |
Returns a range from a to b, including both endpoints | def from_inclusive(a, b):
c = int(b > a)*2-1
return range(a, b+c, c) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)",
"def range_inclusive(start, stop):\n return range(start, stop + 1)",
"def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]",
"def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)",
"def range(self, b, e, s=None):\n if s == None:\n return list(range(b,e+1))\n return list(range(b,e+s,s))",
"def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result",
"def get_range(n0: int, n1: int, ns: int) -> List[int]:\n # Return a range as a list\n def lrange(a, b, n=1) -> List[int]:\n return list(range(a, b, n))\n # Get the in-bounds part of the range\n n_range = lrange(max(0, n0), min(ns, n1))\n # Handle out-of-bounds indices by reflection across boundaries\n if n0 < 0:\n # Underflow\n n_range = lrange(-n0, 0, -1) + n_range\n if n1 > ns:\n # Overflow\n n_range = n_range + lrange(ns - 1, 2 * ns - n1 - 1, -1)\n\n return n_range",
"def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))",
"def closed_range(start, stop, step):\n return range(start, stop + 1, step)",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums",
"def range(series):\n return min(series), max(series)",
"def binrange(_min, _max, stepsize, include_upper=False):\n _min = _min - _min % stepsize\n _max = _max - _max % stepsize + stepsize * (1 + include_upper)\n return np.arange(_min, _max, stepsize)",
"def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)",
"def _translate_range(self, len_, start, end):\n start = int(start)\n end = int(end)\n if start < 0:\n start += len_\n start = max(0, min(start, len_))\n if end < 0:\n end += len_\n end = max(-1, min(end, len_ - 1))\n return start, end",
"def in_range(x, a, b):\n return (x >= a and x <= b) or (x <= a and x >= b)",
"def merge_ranges():",
"def normalizeToRange(v, a, b):\n return (v - a) / (b - a)",
"def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))",
"def range_union(ranges):\n union = []\n for r in sorted(ranges, key=lambda r: r.start):\n if len(union) > 0 and union[-1].stop >= r.start:\n union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))\n else:\n union.append(r)\n return union",
"def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]",
"def make_slice_inclusive(start, stop=None, step=None):\n if stop is None:\n return start, stop, step\n\n if step is None or step > 0:\n if stop == -1:\n stop = None\n else:\n stop += 1\n else:\n if stop == 0:\n stop = None\n else:\n stop -= 1\n return start, stop, step",
"def range(self):\n return (self._start, self._end)",
"def new_ranges(rs):\n return tuple(chain(*[new_range(r) for r in rs]))",
"def range(start: int, stop: int = None, step: int = None) -> ObservableBase:\n from ..operators.observable.range import from_range\n return from_range(start, stop, step)",
"def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )",
"def rangeArray(first, last):\n \n return np.arange(first, last+1)",
"def _range_overapped(self, x, y):\n xs = set( range(x[0], x[1]))\n ys = set( range(y[0], y[1]))\n return xs.intersection(ys)",
"def gen_ranges(starts, ends):\n if starts.size != ends.size:\n raise ValueError(\"starts and ends must be same size\")\n if not ((ends - starts) > 0).all():\n raise ValueError(\"all ends must be greater than starts\")\n lengths = ends - starts\n segs = ak.cumsum(lengths) - lengths\n totlen = lengths.sum()\n slices = ak.ones(totlen, dtype=ak.int64)\n diffs = ak.concatenate((ak.array([starts[0]]), \n starts[1:] - starts[:-1] - lengths[:-1] + 1))\n slices[segs] = diffs\n return segs, ak.cumsum(slices)",
"def __and__(self, other):\n if not isinstance(other, Range):\n raise TypeError(\n f\"unsupported operand types for &: \"\n f\"{type(self).__name__!r} and {type(other).__name__!r}\"\n )\n\n if self == other:\n return Range(self.vmin, self.vmax)\n elif self < other or self > other:\n return None\n\n return Range(\n vmin=max(self.vmin, other.vmin),\n vmax=min(self.vmax, other.vmax)\n )"
]
| [
"0.77354187",
"0.73943543",
"0.7353963",
"0.7293135",
"0.72048956",
"0.68755573",
"0.6775559",
"0.6761502",
"0.6750063",
"0.67284167",
"0.66356015",
"0.66242915",
"0.66224957",
"0.6580224",
"0.6560372",
"0.6533194",
"0.6526771",
"0.65064925",
"0.64840794",
"0.6469163",
"0.64078075",
"0.64049447",
"0.63976896",
"0.6389151",
"0.637755",
"0.6367411",
"0.6363976",
"0.6362144",
"0.63561153",
"0.635247"
]
| 0.8278248 | 0 |
Returns the chunk at given chunk coordinates | def get_chunk(self, x: int, z: int) -> EmptyChunk:
if not self.inside(x, 0, z, chunk=True):
raise OutOfBoundsCoordinates(f'Chunk ({x}, {z}) is not inside this region')
return self.chunks[z % 32 * 32 + x % 32] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_block(self, chunk, coords):\n\n return chunk.get_block(coords)",
"def getchunk(self, *args, **kwargs):\n return _image.image_getchunk(self, *args, **kwargs)",
"def sync_get_block(self, chunk, coords):\n\n return chunk.get_block(coords)",
"def chunk_coords_to_block_coords(\n x: int, z: int, chunk_x_size: int = 16, chunk_z_size: int = 16\n) -> ChunkCoordinates:\n return x * chunk_x_size, z * chunk_z_size",
"def chunk_to_slice(chunk):\n stops = [a + b for a, b in zip(chunk.offset, chunk.extent)]\n indices_per_dim = zip(chunk.offset, stops)\n index_tuple = map(lambda s: slice(s[0], s[1], None), indices_per_dim)\n return tuple(index_tuple)",
"def chunk_coords_to_region_coords(cx: int, cz: int) -> ChunkCoordinates:\n return cx >> 5, cz >> 5",
"def region_coords_to_chunk_coords(rx: int, rz: int) -> ChunkCoordinates:\n return rx << 5, rz << 5",
"def get_chunk_coordinates(self, node_or_chunk_id: np.uint64\n ) -> np.ndarray:\n layer = self.get_chunk_layer(node_or_chunk_id)\n bits_per_dim = self.bitmasks[layer]\n\n x_offset = 64 - self._n_bits_for_layer_id - bits_per_dim\n y_offset = x_offset - bits_per_dim\n z_offset = y_offset - bits_per_dim\n\n x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1\n y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1\n z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1\n return np.array([x, y, z])",
"def chunk(self):\n # easy enough\n return self.dcpl.getChunk(rank=len(self.shape))",
"def block_coords_to_chunk_coords(\n *args: int, sub_chunk_size: int = 16\n) -> Tuple[int, ...]:\n return tuple(int(math.floor(coord / sub_chunk_size)) for coord in args)",
"def chunk_array_position(self, x, y, z):\n return y * 256 + z * 16 + x",
"def get_chunk_id_from_coord(self, layer: int,\n x: int, y: int, z: int) -> np.uint64:\n base_chunk_span = int(self.fan_out) ** max(0, layer - 2)\n\n return self.get_chunk_id(\n layer=layer,\n x=x // (int(self.chunk_size[0]) * base_chunk_span),\n y=y // (int(self.chunk_size[1]) * base_chunk_span),\n z=z // (int(self.chunk_size[2]) * base_chunk_span))",
"def get_chunk(self, dimension: Dimension, cx: int, cz: int) -> Optional[Chunk]:\n chunk_key = (dimension, cx, cz)\n if chunk_key in self._chunk_cache:\n chunk = self._chunk_cache[chunk_key]\n elif chunk_key in self._chunk_history:\n chunk = self._chunk_cache[chunk_key] = self.get_current(dimension, cx, cz)\n else:\n raise ChunkDoesNotExist\n return chunk",
"def ReturnBlockOfCoordinates(x, y):\n block_x = int(x / block_size)\n block_y = int(y / block_size)\n\n if block_x == block_size:\n block_x = block_x - 1\n\n if block_y == block_size:\n block_y = block_y - 1\n\n return (block_x, block_y)",
"def chunk_coords(self) -> Generator[Tuple[int, int], None, None]:\n cx, cz = int(self.camera_location[0]) >> 4, int(self.camera_location[2]) >> 4\n\n sign = 1\n length = 1\n for _ in range(self.render_distance*2+1):\n for _ in range(length):\n yield cx, cz\n cx += sign\n for _ in range(length):\n yield cx, cz\n cz += sign\n sign *= -1\n length += 1",
"def getTilingSplitCoordsMP(args):\n (metadata, index) = args\n return getTilingSplitCoordsTuple(*metadata, index)",
"def blocks_slice_to_chunk_slice(\n blocks_slice: slice, chunk_shape: int, chunk_coord: int\n) -> slice:\n return slice(\n min(max(0, blocks_slice.start - chunk_coord * chunk_shape), chunk_shape),\n min(max(0, blocks_slice.stop - chunk_coord * chunk_shape), chunk_shape),\n )",
"def getChunks():",
"def get_chunks(cube_shape, coord_names, chunk=True, step=2):\n\n ntimes = cube_shape[0]\n\n if chunk:\n assert coord_names[0] == 'time'\n\n remainder = ntimes % step\n while remainder == 1:\n step = step + 1\n remainder = ntimes % step\n\n start_indexes = range(0, ntimes, step)\n else:\n start_indexes = [0]\n step = ntimes\n\n return start_indexes, step",
"def get_block(self, coords):\n\n x, y, z = coords\n index, y = divmod(y, 16)\n\n return self.sections[index].get_block((x, y, z))",
"def chunk_slices_to_unit_index(self, slices):\n # remove dimension for channel\n slices = slices[-len(self.chunk_shape):]\n return tuple((slice.start - b.start) // s for b, s, slice in zip(self.bounds, self.strides, slices))",
"def getchunk( self, name ):\n nm= self.fullNameFor( name )\n if nm in self.named:\n return self.named[nm]\n raise Error( \"Cannot resolve {!r} in {!r}\".format(name,self.named.keys()) )",
"def get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)",
"def update_piece_coordinates(chunk, copy_piece_coordinates):\n\n dimensions = chunk.shape # In the case of only one piece it will return a tuple with 1 values\n height = 0\n width = 0\n if len(dimensions) == 1:\n height = 1\n width = 1\n else:\n height, width = chunk.shape\n for i in range(height):\n for j in range(width):\n # Check if -1, we don't want -1, No one wants them, They do not have friends, they are weird\n key = chunk[i][j]\n if key != -1:\n copy_piece_coordinates[key] = (i, j)\n\n return height, width",
"def coords_to_chunk(f):\n\n @wraps(f)\n def decorated(self, coords, *args, **kwargs):\n x, y, z = coords\n\n bigx, smallx, bigz, smallz = split_coords(x, z)\n d = self.request_chunk(bigx, bigz)\n\n @d.addCallback\n def cb(chunk):\n return f(self, chunk, (smallx, y, smallz), *args, **kwargs)\n\n return d\n\n return decorated",
"def get_position(self, position):",
"def find_chunk(self, identifier):\n for i, name in enumerate(self._per_chunk_arrays[\"identifier\"]):\n if name == identifier:\n return i\n raise KeyError(f\"No chunk named {identifier}\")",
"def chunk_from_mem(self, ptr):\n if self.state.solver.symbolic(ptr):\n try:\n ptr = self.state.solver.eval_one(ptr)\n except SimSolverError:\n l.warning(\"A pointer to a chunk is symbolic; maximizing it\")\n ptr = self.state.solver.max_int(ptr)\n else:\n ptr = self.state.solver.eval(ptr)\n return PTChunk(ptr - (2 * self._chunk_size_t_size), self.state) if ptr != 0 else None",
"def chunk(self):\n return int((self.center_x + config.SPRITE_PIXEL_SIZE / 2) // 320)",
"def core_slices(self, chunk):\n intersect_slices = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n intersect_slices.append(slice(s.start + olap, s.stop))\n elif s.stop == b.stop:\n intersect_slices.append(slice(s.start, s.stop - olap))\n else:\n intersect_slices.append(s)\n\n return tuple(self.remove_chunk_overlap(chunk, intersect_slices))"
]
| [
"0.71494114",
"0.6753957",
"0.67533576",
"0.6625385",
"0.65714836",
"0.6403521",
"0.640261",
"0.6368248",
"0.62888116",
"0.6236681",
"0.61390483",
"0.6115911",
"0.60418653",
"0.6006928",
"0.59817696",
"0.59290576",
"0.58626676",
"0.5813215",
"0.5805423",
"0.5748618",
"0.57186234",
"0.57083964",
"0.5706288",
"0.5699535",
"0.56943727",
"0.5673969",
"0.5672107",
"0.56546897",
"0.56452304",
"0.5569768"
]
| 0.6806264 | 1 |
Adds given chunk to this region. Will overwrite if a chunk already exists in this location | def add_chunk(self, chunk: EmptyChunk):
if not self.inside(chunk.x, 0, chunk.z, chunk=True):
raise OutOfBoundsCoordinates(f'Chunk ({chunk.x}, {chunk.z}) is not inside this region')
self.chunks[chunk.z % 32 * 32 + chunk.x % 32] = chunk | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_chunk(self, chunk):\n self.chunkbuffer.appendleft(chunk)",
"def add(self, chunkOrToken):\n chunkOrToken.setParent(self)\n self.dtrs.append(chunkOrToken)\n self.positionCount += 1",
"def _add_cached_chunk(self, offset, data):\n if (\n self._cached_chunk_position + len(self._cached_chunk) == offset\n and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE\n ):\n # add to existing cache\n self._cached_chunk += data\n else:\n self._cached_chunk = data\n self._cached_chunk_position = offset",
"def add( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)",
"def addfragment(self, fragment):\n self.__fragments.append(fragment)",
"def add_section(self, section: EmptySection, x: int, z: int, replace: bool=True):\n if not self.inside(x, 0, z, chunk=True):\n raise OutOfBoundsCoordinates(f'Chunk ({x}, {z}) is not inside this region')\n chunk = self.chunks[z % 32 * 32 + x % 32]\n if chunk is None:\n chunk = EmptyChunk(x, z)\n self.add_chunk(chunk)\n chunk.add_section(section, replace)",
"def put_original_chunk(\n self, dimension: Dimension, cx: int, cz: int, chunk: Optional[Chunk]\n ):\n # If the chunk does not exist in the chunk history then add it\n\n # This code is only called when loading a chunk from the database.\n # If this code is called and the chunk history already exists then the\n # chunk must have been unloaded from the World class and reloaded\n # only chunks that are unchanged or have been saved can be unloaded so\n # the latest chunk here should be the same as the one on disk\n\n key = (dimension, cx, cz)\n if key not in self._chunk_history:\n if chunk is not None:\n assert cx == chunk.cx and cz == chunk.cz\n self._chunk_index[key] = (0, 0)\n self._chunk_history[key] = [self._serialise_chunk(chunk, dimension, 0)]\n self._chunk_cache[key] = chunk",
"def putchunk(self, *args, **kwargs):\n return _image.image_putchunk(self, *args, **kwargs)",
"def addNamed( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n nm= self.addDefName( chunk.name )\n if nm:\n # We found the full name for this chunk\n self.sequence += 1\n chunk.seq= self.sequence\n chunk.fullName= nm\n self.named[nm].append( chunk )\n chunk.initial= len(self.named[nm]) == 1\n self.logger.debug( \"Extending chunk {!r} from {!r}\".format(nm, chunk.name) )\n else:\n raise Error(\"No full name for {!r}\".format(chunk.name), chunk)",
"async def add_chunk(self, chunk, token: str = '', measurement_id: str = ''):\n # If params are not provided, take the last one stored\n with open(self.config_file) as json_file:\n data = json.load(json_file)\n if token == '':\n token = data[self.server][self.license_key][self.user.email][\"user_token\"]\n\n if token == '':\n raise ValueError(\"No user token provided. Please log in.\")\n if not measurement_id or measurement_id == '':\n measurement_id = self.measurement_id\n\n self.addData_done = False\n\n properties = {\n \"valid\": chunk.valid,\n \"start_frame\": chunk.start_frame,\n \"end_frame\": chunk.end_frame,\n \"chunk_number\": chunk.chunk_number,\n \"number_chunks\": chunk.number_chunks,\n \"first_chunk_start_time_s\": chunk.first_chunk_start_time_s,\n \"start_time_s\": chunk.start_time_s,\n \"end_time_s\": chunk.end_time_s,\n \"duration_s\": chunk.duration_s,\n }\n payload = chunk.payload_data\n meta = chunk.metadata\n\n chunk_num = chunk.chunk_number\n self.num_chunks = chunk.number_chunks\n\n # Determine action from chunk order\n if chunk_num == 0 and self.num_chunks > 1:\n action = 'FIRST::PROCESS'\n elif chunk_num == self.num_chunks - 1:\n action = 'LAST::PROCESS'\n self.addData_done = True\n else:\n action = 'CHUNK::PROCESS'\n\n chunkOrder = properties['chunk_number']\n startTime = properties['start_time_s']\n endTime = properties['end_time_s']\n duration = properties['duration_s']\n\n # Websockets\n status = 0\n body = {}\n if self.conn_method == \"websocket\" or self.conn_method == \"ws\":\n if not self.ws_obj.ws:\n await self.ws_obj.connect_ws()\n response = await self.measurement.add_data_ws(measurement_id, chunkOrder, action, startTime, endTime,\n duration, payload, meta)\n if response:\n status = int(response[10:13].decode('utf-8'))\n body = response.decode('utf-8')\n else:\n self.addData_done = True\n # REST\n else:\n response = await self.measurement.add_data_rest(measurement_id, chunkOrder, action, startTime, endTime,\n duration, payload, meta)\n status = int(response.status_code)\n body = response.json()\n\n # Handle several types of errors.\n # Since `addData` times out after 120s for each measurement, when that\n # happens, we make a call to an internal method `__handle_ws_timeout`.\n # If timeout occurs earlier than 120s, or if there is another type of\n # error, the `addData` process would stop by setting\n # `self.addData_done = True`.\n if int(status) != 200:\n if int(status) == 400 or int(status) == 405:\n if chunk_num * duration < 120 and chunk_num != 0: # Timed out earlier than 120s\n self.addData_done = True\n\n if self.conn_method == \"websocket\" or self.conn_method == \"ws\":\n if 'MEASUREMENT_CLOSED' in body:\n await self.__handle_ws_timeout(chunkOrder, action, startTime, endTime, duration, payload, meta)\n else:\n self.addData_done = True\n else:\n if body['Code'] == 'MEASUREMENT_CLOSED':\n await self.__handle_ws_timeout(chunkOrder, action, startTime, endTime, duration, payload, meta)\n else:\n self.addData_done = True\n else:\n self.addData_done = True\n\n # Sleep for the chunk duration as the data gets sent.\n # (This ensures we don't hit the rate limit)\n await asyncio.sleep(duration)\n\n # Close the websocket connection if all websocket processes are complete.\n await self.__handle_exit()",
"def put_chunk(self, chunk: Chunk, dimension: Dimension):\n chunk.changed = True\n chunk.block_palette = self._block_palette\n chunk.biome_palette = self._biome_palette\n self._chunk_cache[(dimension, chunk.cx, chunk.cz)] = chunk",
"def addOutput( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n if chunk.name not in self.output:\n self.output[chunk.name] = []\n self.logger.debug( \"Adding chunk {!r}\".format(chunk.name) )\n self.sequence += 1\n chunk.seq= self.sequence\n chunk.fullName= chunk.name\n self.output[chunk.name].append( chunk )\n chunk.initial = len(self.output[chunk.name]) == 1",
"def add(self, block: Block):\n self._buffer.append(block)",
"def add_node(self, node):\n frame = self.stack[-1]\n curr_node, index, line = frame\n variants = self.get_variants()\n # adding to the end of the variant\n if len(line) == index + 1:\n line.append(node)\n # adding new variant\n elif variants:\n # check that node doesn't exist yet\n for variant in variants:\n if len(variant) and variant[0] == node:\n raise CursorError(\"Node already exists.\")\n variants.append([node])\n # forking the simple variant\n else:\n if line[index +1] == node:\n raise CursorError(\"Node already exists.\")\n variants = []\n variants.append(line[index + 1:])\n variants.append([node])\n while len(line) > index + 1:\n line.pop()\n line.append(variants)",
"def add(self, octree_chunk: OctreeChunk, atlas_tile: AtlasTile) -> None:\n tile_index = atlas_tile.index\n\n self._tiles[tile_index] = TileData(octree_chunk, atlas_tile)\n self._chunks.add(octree_chunk)",
"def add_region(self, address, data):\n region = HexFileRegion(address, data)\n self.regions.append(region)\n self.check()",
"def add_fragment(self, fragment, delay_sort = False):\n assert isinstance(fragment, Fragment)\n assert fragment.chain_id == self.chain_id\n\n if self.fragment_dict.has_key(fragment.fragment_id):\n raise FragmentOverwrite()\n\n self.fragment_list.append(fragment)\n self.fragment_dict[fragment.fragment_id] = fragment\n\n if not delay_sort:\n self.fragment_list.sort()",
"def add_block(self, block):\n if block.index >= len(self.blockchain):\n self.blockchain.append(block)\n else:\n self.blockchain[block.index] = block\n self.write_to_disk()",
"def add(self, content):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n with open(self.location, 'a') as f:\n f.write(content)",
"def addChunk(self, direction):\n pass\n\n ## get size of actual map\n ## create array of fitting size\n ## stack created array to map",
"def add_fragment(self, fragment, as_last=True):\n if not isinstance(fragment, SyncMapFragment):\n self.log_exc(u\"fragment is not an instance of SyncMapFragment\", None, True, TypeError)\n self.fragments_tree.add_child(Tree(value=fragment), as_last=as_last)",
"def add_segment(self, segment):\n self.segments.append(segment)",
"def add(self, str):\n if str in self.settings['Core::Blocks']:\n return\n self.settings['Core::Blocks'].append(str)\n self.rebuild()\n self.settings.save()",
"def add(self, line):\n self.body.append(line)",
"def append( self, command ):\n self.commands.append( command )\n command.chunk= self",
"def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)",
"def add_fragment(self, fragment, delay_sort=False):\n Segment.add_fragment(self, fragment, delay_sort)\n fragment.chain = self",
"def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth",
"def add(self, block):\n\n try:\n self.blocks[block.height]\n except:\n\n self.blocks[block.height] = [block]\n if self.current_height < block.height:\n self.current_height = block.height\n return\n\n if not block.hash() in [b.hash() for b in self.blocks[block.height]]:\n self.blocks[block.height].append(block)\n loggerutil.debug(\"fork detected for height:\" + str(block.height) +\n \"block candidats:\" + str(self.blocks[block.height]))\n if self.current_height < block.height:\n self.current_height = block.height",
"def append(self, tag):\r\n self.insert(len(self.contents), tag)"
]
| [
"0.7879799",
"0.67803276",
"0.6672726",
"0.6668966",
"0.64762497",
"0.62061226",
"0.616433",
"0.60951906",
"0.60454017",
"0.6009306",
"0.596302",
"0.5851554",
"0.5826008",
"0.57065004",
"0.56892127",
"0.56521547",
"0.56358415",
"0.55565655",
"0.5546457",
"0.5528825",
"0.5493096",
"0.5489708",
"0.5480572",
"0.5469954",
"0.54632545",
"0.54272956",
"0.54233074",
"0.54197234",
"0.541746",
"0.5396512"
]
| 0.79272586 | 0 |
Adds section to chunk at (x, z). Same as ``EmptyChunk.add_section(section)`` | def add_section(self, section: EmptySection, x: int, z: int, replace: bool=True):
if not self.inside(x, 0, z, chunk=True):
raise OutOfBoundsCoordinates(f'Chunk ({x}, {z}) is not inside this region')
chunk = self.chunks[z % 32 * 32 + x % 32]
if chunk is None:
chunk = EmptyChunk(x, z)
self.add_chunk(chunk)
chunk.add_section(section, replace) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_section(self, section_name: str) -> None:\n pass",
"def add_section(self, section_name: str) -> None:\n pass",
"def add_section(self, section):\n if section.lower() == \"default\":\n raise ValueError, 'Invalid section name: %s' % section\n\n if section in self._sections:\n raise DuplicateSectionError(section)\n self._sections[section] = self._dict()",
"def add_chunk(self, chunk: EmptyChunk):\n if not self.inside(chunk.x, 0, chunk.z, chunk=True):\n raise OutOfBoundsCoordinates(f'Chunk ({chunk.x}, {chunk.z}) is not inside this region')\n self.chunks[chunk.z % 32 * 32 + chunk.x % 32] = chunk",
"def add_section(self, section):\n if self.has_section(section):\n raise DuplicateSectionError(section)\n self._dict[section] = {}",
"def add_section(self, section, lyrics):\n self.sections[section] = lyrics",
"def _add_section(self, name, last_section=None):\n if last_section is None:\n last_section = self.sections\n last_section[name] = Section()\n return last_section[name]",
"def add_section(self) -> None:\n\n if self.rows:\n self.rows[-1].end_section = True",
"def add_to_drawn(section: str, thing: Base, index: int=None):\r\n if index is None:\r\n drawn[section].append(thing)\r\n else:\r\n drawn[section].insert(index, thing)",
"def visit_section(self, node):\n self.section_level += 1\n self.body.append(self.starttag(node, \"section\"))",
"def add_section(self, section: NetSection) -> None:\n if section.name in self.sections:\n self.logger.warning(\"Overriding section {}\".format(section.name))\n self.sections[section.name] = section",
"def add_new_section(self, name, context=...):\n ...",
"def addsection(config_file, section):\n error, parser = _get_config_parsser(config_file)\n if error:\n return error, parser\n # add section\n try:\n parser.add_section(section)\n except Exception, e:\n return 1, e\n # backup config_file\n _backup_config(config_file)\n # write new config to file\n return _write_config_parser_to_file(parser, config_file)",
"def do_section(parser, token, template='parts/section.html', end='endsection'):\n bits = token.split_contents()[1:]\n if len(bits) is 0:\n title, attrs = '', {}\n elif len(bits) is 1:\n title, attrs = bits[0], {}\n elif len(bits) % 2 is 0:\n raise template.TemplateSyntaxError(\"Your attributes don't match up: %s\" % ', '.join(bits[1:]))\n else:\n title = bits[0]\n attrs = dict(zip(bits[1::2], bits[2::2]))\n nodelist = parser.parse((end,))\n parser.delete_first_token()\n return SectionNode(template, title, attrs, nodelist)",
"def add_section(self, text: str) -> None:\n\n tag = r'''\\newpage\n \\section{%s}''' % (text)\n self.doc = self.doc + tag",
"def create_section(section):\n\tif not config_parser or config_parser.has_section(section):\n\t\treturn False\n\tconfig_parser.add_section(section)\n\treturn True",
"def _write_section_start(section_name, fobj):\n\n fobj.write(string.capwords(section_name, '_') + '\\n')",
"def new_section(name):\n name = normalize_name(name)\n n = get_and_increment(name)\n return section_name(name, n)",
"def _add_section(self, section, before_section=None):\n inserted_before_other = False\n\n if before_section is not None:\n if before_section in self._sections:\n # If before_section was already introduced, we simply need to\n # insert the new section on its position, which will put it\n # exactly behind before_section.\n idx = self._sections.index(before_section)\n self._sections.insert(idx, section)\n inserted_before_other = True\n else:\n # If before_section hasn't been introduced yet, we know we need\n # to insert it after section when it's finally added to the\n # menu. So, we preserve that info in the _after_sections dict.\n self._after_sections[before_section] = section\n\n # Append section to the list of sections because we assume\n # people build menus from top to bottom, i.e. they add its\n # upper sections first.\n self._sections.append(section)\n else:\n self._sections.append(section)\n\n # Check if section should be inserted after another one, according to\n # what we have in _after_sections.\n after_section = self._after_sections.pop(section, None)\n\n if after_section is not None:\n if not inserted_before_other:\n # Insert section to the right of after_section, if it was not\n # inserted before another one.\n if section in self._sections:\n self._sections.remove(section)\n\n index = self._sections.index(after_section)\n self._sections.insert(index + 1, section)\n else:\n # If section was already inserted before another one, then we\n # need to move after_section to its left instead.\n if after_section in self._sections:\n self._sections.remove(after_section)\n\n idx = self._sections.index(section)\n idx = idx if (idx == 0) else (idx - 1)\n self._sections.insert(idx, after_section)",
"def add_section(self, section_name: str) -> None:\n if self.is_section_exist(section_name=section_name) is False:\n config = ConfigParser(allow_no_value=True)\n config.read(self.connection_string)\n config.add_section(section=(section_name.replace('_', ' ')))\n with open(file=self.connection_string, mode='w') as file:\n config.write(fp=file)",
"def add(self, package=\"\", position=\"\", params={}):\n payload = { 'package': package, 'position': position }\n if params:\n payload = self.__cc.merge_payloads(payload, params)\n return self.__cc.http_post('add-nat-section', payload=payload)",
"def add(self, layer=\"\", position=\"\", params={}):\n return self.__common_client._add_with_layer('add-access-section', layer, position, params)",
"def add_segment(self, segment):\n self.segments.append(segment)",
"def add_section(self):\n section = CodeBuilder(self.indent_level)\n self.code.append(section)\n return section",
"def add_section(self):\n section = CodeBuilder(self.indent_level)\n self.code.append(section)\n return section",
"def write_section(self, fhandle, sect):\n fhandle.write(\"[%s]\\n\" % sect)\n for opt in sorted(self.file_parser.options(sect)):\n fhandle.write('{0} = {1}\\n'.format(opt, self.file_parser.get(sect, opt)))",
"def _create_section(self, parent, sectionid, title=None, term=None):\n\n idb = nodes.make_id(sectionid)\n section = nodes.section(ids=[idb])\n parent.append(section)\n\n if term:\n if term != '**':\n section.append(nodes.term('', term))\n\n definition = nodes.definition()\n section.append(definition)\n\n return definition\n\n if title:\n section.append(nodes.title('', title))\n\n return section",
"def push_sections(self):\n self._model_stack.append(Section)\n return self",
"def add_chunk(self, chunk):\n self.chunkbuffer.appendleft(chunk)",
"def add_sections(self, op):\n if(is_listing(op)):\n self.__sections += op\n else:\n self.__sections += [op]"
]
| [
"0.6537357",
"0.6537357",
"0.6398103",
"0.6397857",
"0.6276737",
"0.6092696",
"0.58850425",
"0.58581275",
"0.5814254",
"0.57519996",
"0.5741328",
"0.57047933",
"0.56777245",
"0.5639013",
"0.55746704",
"0.5516169",
"0.5390092",
"0.5364603",
"0.5347104",
"0.5342008",
"0.5234331",
"0.5216506",
"0.52085286",
"0.5175962",
"0.5175962",
"0.5142958",
"0.51148933",
"0.5109945",
"0.5109605",
"0.5106801"
]
| 0.84367806 | 0 |
Helper function that only sets the block if ``self.inside(x, y, z)`` is true | def set_if_inside(self, block: Block, x: int, y: int, z: int):
if self.inside(x, y, z):
self.set_block(block, x, y, z) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_block(x, y, input_suduko_3d):\n block_id = 100\n\n if x < 3 and y < 3: # First if statements defines into which block the element of given x,y values is, this are\n # from 0-8 representing the 9 boxes\n block_id = 0\n if 6 > x >= 3 > 0 <= y:\n block_id = 3\n if 6 <= x <= 8 and 0 <= y < 3:\n block_id = 6\n if x < 3 <= y < 6:\n block_id = 1\n if 3 <= x < 6 and 3 <= y < 6:\n block_id = 4\n if 8 >= x >= 6 > 3 <= y:\n block_id = 7\n if x < 3 and 6 <= y <= 8:\n block_id = 2\n if 3 <= x < 6 <= y <= 8:\n block_id = 5\n if 6 <= x <= 8 and 6 <= y <= 8:\n block_id = 8\n\n suduko_blocks = blockshaped(np.array(input_suduko_3d[:, :, 0])) # suduko_blocks is a 2D array that holds all of the\n # 9 constituent block elements\n suduko_blocks_flatten = suduko_blocks[block_id].flatten()\n\n value_in_block = np.in1d(input_suduko_3d[x, y, :], suduko_blocks_flatten)\n\n for i, value in enumerate(value_in_block):\n if value_in_block[i] == True and i != 0:\n input_suduko_3d[x, y, i] = 0",
"def inside(self, x: int, y: int, z: int, chunk: bool=False) -> bool:\n factor = 32 if chunk else 512\n rx = x // factor\n rz = z // factor\n return not (rx != self.x or rz != self.z or y < 0 or y > 255)",
"def is_inside(self, x: int, y: int) -> bool:\n pass",
"def fill(self, block: Block, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, ignore_outside: bool=False):\n if not ignore_outside:\n if not self.inside(x1, y1, z1):\n raise OutOfBoundsCoordinates(f'First coords ({x1}, {y1}, {z1}) is not inside this region')\n if not self.inside(x2, y2, z2):\n raise OutOfBoundsCoordinates(f'Second coords ({x}, {y}, {z}) is not inside this region')\n\n for y in from_inclusive(y1, y2):\n for z in from_inclusive(z1, z2):\n for x in from_inclusive(x1, x2):\n if ignore_outside:\n self.set_if_inside(block, x, y, z)\n else:\n self.set_block(block, x, y, z)",
"def set_block(self, block: Block, x: int, y: int, z: int):\n if not self.inside(x, y, z):\n raise OutOfBoundsCoordinates(f'Block ({x}, {y}, {z}) is not inside this region')\n cx = x // 16\n cz = z // 16\n chunk = self.get_chunk(cx, cz)\n if chunk is None:\n chunk = EmptyChunk(cx, cz)\n self.add_chunk(chunk)\n chunk.set_block(block, x % 16, y, z % 16)",
"def is_blocking(self, x, y):\r\n\r\n if not 0 <= x < self.width or not 0 <= y < self.height:\r\n return True\r\n return self.get_bool(x, y, 'block')",
"def Active(self,coord):\n\n x,y,z = coord\n box = False\n if x >= self.xmin and x <= self.xmax:\n if y >= self.ymin and y <= self.ymax:\n if z < 0:\n if z <= self.zmin and z >= self.zmax:\n box = True\n else:\n if z >= self.zmin and z <= self.zmax:\n box = True\n \n return box",
"def _isInside(self, v, select, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n inside = np.ones((xyz.shape[0],), dtype=bool)\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Get distance to zero :\n xyz_t0 = np.sqrt((xyz[k, :] ** 2).sum())\n v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())\n inside[k] = xyz_t0 <= v_t0\n progress.setValue(100 * k / N)\n self.data.mask = False\n self.data.mask = inside if select != 'inside' else np.invert(inside)\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()",
"def _in_box(self, point, extent):\n return ((point[0] >= extent[0]) and\n (point[0] <= extent[1]) and\n (point[1] >= extent[2]) and\n (point[1] <= extent[3]))",
"def is_block(self):\n return self.v & 1 == 0",
"def inside(self, x, on_boundary):\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)",
"def _inblock(row, column, init, end):\n return all([row[column][0] >= init[0],\n row[column][1] >= init[1],\n row[column][0] <= end[0],\n row[column][1] <= end[1]])",
"def _setBlock(o,block):\n o.board.add(block)\n o._cachedShadow = None\n clearedLines = o.board.clearLines()\n o.lines += clearedLines\n if clearedLines > 0 and o.onClearLines is not None:\n o.onClearLines(clearedLines)\n o.board.addRows(o.penalty)\n o.penalty = 0\n o._initBlock(o.queue.pop())\n if o.lines >= o.level*10: o.level+=1\n o.canHold = True\n isGameOver = all(sq.y >= o.board.height for sq in block) \\\n or any(o.board[sq.x,sq.y] != None for sq in o.block)\n if isGameOver and o.onGameOver: o.onGameOver(o)",
"def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False",
"def check_block(self, block):\n pass",
"def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey",
"def check_contained(self,x,y):\n if self.active:\n self.reset()\n #if in horizontal bounds\n if x > self.left and x < self.right:\n slope = 1/sqrt(3)\n #use to set create verticle bounds\n if x - self.center_x <= 0:\n slope *= -1\n\n ################\n x_rel = x - self.center_x #bounds depends on x location of the mouse \n bottom_bound = self.bottom - (x_rel*slope)\n top_bound = self.top - (x_rel*-slope)\n ################\n\n if y >= top_bound and y <= bottom_bound:\n if Ctrl_Vars.Left_MouseDown:\n self.press() # if all conditions are met use functionality",
"def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False",
"def propogate(self):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n for DIR in [[1,0], [-1,0], [0,1], [0,-1]]:\r\n target_x, target_y = self.block_loc[0]+DIR[0], self.block_loc[1]+DIR[1]\r\n if 0 <= target_x < X and 0 <= target_y < Y: #if inbounds:\r\n target_block = grid[target_y][target_x]\r\n if not target_block.collapsed: #only ping uncollapsed blocks\r\n self.send_update(target_block,DIR)\r\n return",
"def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False",
"def isInside(self, point):\n # we rotate back the point to the frame parallel to the axis of the ellipse\n rotatedPoint = self.rotatePoint(point)\n # we check if each point is inside the associated liquid drop\n return ((rotatedPoint[:, :, 0]/self.axisA[:, None])**2 + (rotatedPoint[:, :, 1]/self.axisB[:, None])**2 < 1)",
"def _has_blocks_to_place(self, exclude=None):\n for block_ in self._inventory:\n if block_ != exclude:\n return True\n return False",
"def nail_in(self):\n if not self.in_wall:\n self.in_wall = True",
"def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)",
"def _set_block(self, pos, block_):\n raise NotImplementedError",
"def block(array):\r\n grid = []\r\n for z in range(0,7,3): #0,3,6\r\n #vertical down 3\r\n for n in range(0,7,3): #0,3,6\r\n #horiz across 3\r\n line = []\r\n for i in range(3):\r\n for j in range(3):\r\n vert,hor = i+z,j+n\r\n line.append(array[vert][hor])\r\n grid.append(line)\r\n won = True\r\n for i in range(len(grid)):\r\n if won == True:\r\n if len(grid[i]) != len(set(grid[i])):\r\n won = False\r\n else:\r\n pass\r\n else:\r\n break\r\n return won",
"def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True",
"def collision(self, block):\n if self.pos_x == block.pos_x and self.pos_y+self.height == block.pos_y:\n self.col_d = True\n if self.pos_x == block.pos_x+block.width and self.pos_y == block.pos_y:\n self.col_l = True\n if self.pos_x == block.pos_x-self.width and self.pos_y == block.pos_y:\n self.col_r = True",
"def is_inside(self, p) -> bool:\r\n h = self.wedge\r\n inside = False\r\n if lefton(h, p):\r\n while not h.nexthedge is self.wedge:\r\n h = h.nexthedge\r\n if not lefton(h, p):\r\n return False\r\n return True\r\n else:\r\n return False",
"def block_active(self, x):"
]
| [
"0.64218473",
"0.63526237",
"0.6153189",
"0.6030201",
"0.60202265",
"0.59773934",
"0.59161836",
"0.5609168",
"0.558946",
"0.55853575",
"0.5523145",
"0.5518083",
"0.54748404",
"0.5467124",
"0.545464",
"0.5417525",
"0.5356588",
"0.5327588",
"0.5282958",
"0.5258888",
"0.5206799",
"0.5200192",
"0.51999044",
"0.51917124",
"0.51876307",
"0.5186447",
"0.51792115",
"0.5178668",
"0.5170953",
"0.5159326"
]
| 0.8620236 | 0 |
Fills in blocks from ``(x1, y1, z1)`` to ``(x2, y2, z2)`` in a rectangle. | def fill(self, block: Block, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, ignore_outside: bool=False):
if not ignore_outside:
if not self.inside(x1, y1, z1):
raise OutOfBoundsCoordinates(f'First coords ({x1}, {y1}, {z1}) is not inside this region')
if not self.inside(x2, y2, z2):
raise OutOfBoundsCoordinates(f'Second coords ({x}, {y}, {z}) is not inside this region')
for y in from_inclusive(y1, y2):
for z in from_inclusive(z1, z2):
for x in from_inclusive(x1, x2):
if ignore_outside:
self.set_if_inside(block, x, y, z)
else:
self.set_block(block, x, y, z) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fill_rect(self, value, x1, y1, x2, y2):\n self.fill(x1, y1, x2-x1, y2-y1)",
"def fill_triangle(self, x0, y0, x1, y1, x2, y2, color=Color['white']):\n pointlist = [(x0, y0), (x1, y1), (x2, y2)]\n pygame.draw.polygon(self.display, color, pointlist)",
"def fill(self, xrange=range(0,16), yrange=range(0,16), zrange=range(0,16), **blockstate):\n blk = self.block_state_index(**blockstate)\n seq = array(self._blocks.typecode, (blk for i in xrange))\n\n def fct(section, blocks, row, *args):\n blocks[row] = seq\n\n self.row_apply(fct, xrange, yrange, zrange)",
"def fill(self, value, x, y, width, height):\n for sub_y in range(y, y+height):\n for sub_x in range(x, x+width):\n self[sub_x, sub_y] = value",
"def fill(self, topleft, downright, value):\n\t\ttopleft = Point(topleft)\n\t\tdownright = Point(downright)\n\t\tfor x in range(topleft.x, downright.x + 1):\n\t\t\tfor y in range(topleft.y, downright.y + 1):\n\t\t\t\tself.set_cell(Point(x, y), value)",
"def fill_grid(self, gx, gy, color=Color['white']):\n area = [gx * self.px, gy * self.py, self.px, self.py]\n pygame.draw.rect(self.display, color, area)",
"def fill_rect(self, x, y, width, height, color=Color['white']):\n area = [x, y, width, height]\n pygame.draw.rect(self.display, color, area)",
"def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n for _x in range(x, x + width):\n for _y in range(y, y + height):\n GS2HMSBFormat.set_pixel(framebuf, _x, _y, color)",
"def fill_rectangle(min_x: float, min_y: float, max_x: float, max_y: float, color: Color):\n turtle.goto(min_x, min_y)\n turtle.fillcolor(color)\n turtle.begin_fill()\n turtle.goto(max_x, min_y)\n turtle.goto(max_x, max_y)\n turtle.goto(min_x, max_y)\n turtle.end_fill()",
"def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n fill = (color >> 16) & 255, (color >> 8) & 255, color & 255\n for _x in range(x, x + width):\n for _y in range(y, y + height):\n index = (_y * framebuf.stride + _x) * 3\n framebuf.buf[index : index + 3] = bytes(fill)",
"def box(self, x0, y0, width, height):\n assert width > 1\n assert height > 1\n\n width -= 1\n height -= 1\n\n for x in range(x0, x0 + width):\n self.point(x, y0, \"-\")\n self.point(x, y0 + height, \"-\")\n\n for y in range(y0, y0 + height):\n self.point(x0, y, \"|\")\n self.point(x0 + width, y, \"|\")\n\n self.point(x0, y0, \"+\")\n self.point(x0 + width, y0, \"+\")\n self.point(x0, y0 + height, \"+\")\n self.point(x0 + width, y0 + height, \"+\")",
"def __fill_lip_solid(self, outer, inner):\n inner[0].reverse()\n inner[1].reverse()\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n points = []\n for point in outer_curve:\n points.append(np.array(point, dtype=np.int32))\n for point in inner_curve:\n points.append(np.array(point, dtype=np.int32))\n points = np.array(points, dtype=np.int32)\n self.red_l = int(self.red_l)\n self.green_l = int(self.green_l)\n self.blue_l = int(self.blue_l)\n cv2.fillPoly(self.image, [points], (self.red_l, self.green_l, self.blue_l))",
"def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n for _x in range(x, x + width):\n offset = 7 - _x & 0x07\n for _y in range(y, y + height):\n index = (_y * framebuf.stride + _x) // 8\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )",
"def fill_box(self, x, y, w, h):\n\t\tpass",
"def fill_rect(framebuf, x, y, width, height, color):\n # pylint: disable=too-many-arguments\n while height > 0:\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n for w_w in range(width):\n framebuf.buf[index + w_w] = (\n framebuf.buf[index + w_w] & ~(0x01 << offset)\n ) | ((color != 0) << offset)\n y += 1\n height -= 1",
"def fill_blockgroups(sf, df,geoids, colors):\n color_ids = []\n for i in geoids:\n color_ids.append(df[df.GEOID==i].index[0])\n \n i = 0\n for bg in color_ids:\n shape_ex = sf.shape(bg)\n x_lon = np.zeros((len(shape_ex.points),1))\n y_lat = np.zeros((len(shape_ex.points),1))\n for ip in range(len(shape_ex.points)):\n x_lon[ip] = shape_ex.points[ip][0]\n y_lat[ip] = shape_ex.points[ip][1]\n plt.fill(x_lon,y_lat, colors[i])\n i = i +1",
"def draw_triangle_filled(x1, y1,\n x2, y2,\n x3, y3, color):\n\n first_point = [x1, y1]\n second_point = [x2, y2]\n third_point = [x3, y3]\n point_list = (first_point, second_point, third_point)\n draw_polygon_filled(point_list, color)",
"def draw_rect_filled(self, x, y, w, h, color=None, aa=False):\n for i in range(x, x + w):\n self._draw_fast_vline(i, y, h, color, aa)",
"def add_blocks(self, bottom_left, top_right, texture, immediately=False):\n x, y, z = bottom_left\n X, Y, Z = top_right\n assert ((X - x) * (Y - x) * (Z - z)) <= 500000, \"Unable to fill more than 500,000 blocks. Number of blocks: {}\"\\\n .format((X - x) * (Y - x) * (Z - z))\n\n for x_coord in range(x, X, 1):\n for y_coord in range(y, Y, 1):\n for z_coord in range(z, Z, 1):\n self.add_block((x_coord, y_coord, z_coord), texture, immediately=immediately)",
"def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))",
"def fill_rectangle(self, x, y, w, h, color):\n if self.is_off_grid(x, y, x + w - 1, y + h - 1):\n return\n if w > h:\n self.fill_hrect(x, y, w, h, color)\n else:\n self.fill_vrect(x, y, w, h, color)",
"def drawRectangle_3():\n\n # Calculate the coordinates for the four corners of the rectangle\n\n x1 = Lucia.xcor()\n y1 = Lucia.ycor()\n\n fourCorners = [(x1 + 50, y1), (x1 + 50, y1 + 100), (x1, y1 + 100), (x1, y1)]\n \n Lucia.color(\"green\", \"yellow\")\n Lucia.begin_fill()\n \n Lucia.goto(fourCorners[0][0], fourCorners[0][1])\n Lucia.goto(fourCorners[1][0], fourCorners[1][1])\n Lucia.goto(fourCorners[2][0], fourCorners[2][1])\n Lucia.goto(fourCorners[3][0], fourCorners[3][1])\n\n Lucia.end_fill()",
"def draw_filled_polygon(\n self, points: Iterable[Vec3], properties: Properties\n ) -> None:\n raise NotImplementedError",
"def fill_rect(self, x, y, width, height, color):\n self._set_window(x, y, x + width - 1, y + height - 1)\n chunks, rest = divmod(width * height, _BUFFER_SIZE)\n pixel = _encode_pixel(color)\n self.dc.on()\n if chunks:\n data = pixel * _BUFFER_SIZE\n for _ in range(chunks):\n self._write(None, data)\n if rest:\n self._write(None, pixel * rest)",
"def _fill_cell_rectangle(size = (20, 20), layers = (0, 1, 3),\n densities = (0.5, 0.25, 0.7),\n inverted = (False, False, False)):\n D = Device('fillcell')\n for layer, density, inv in zip(layers, densities, inverted):\n rectangle_size = np.array(size) * sqrt(density)\n # r = D.add_ref(rectangle(size = rectangle_size, layer = layer))\n R = rectangle(size = rectangle_size, layer = layer)\n R.center = (0, 0)\n if inv is True:\n A = rectangle(size = size)\n A.center = (0,0)\n A = A.get_polygons()\n B = R.get_polygons()\n p = gdspy.boolean(A, B, operation = 'not')\n D.add_polygon(p, layer = layer)\n else:\n D.add_ref(R)\n return D",
"def make_grass_field(shape, x, y, z):\n shape.penup()\n shape.speed(10)\n shape.setpos(x,y)\n shape.color(z)\n shape.begin_fill()\n for side in range(2):\n shape.forward(800)\n shape.right(90)\n shape.forward(800)\n shape.right(90)\n shape.end_fill()\n\n # ...",
"def fill_rectangle(D, fill_size = (40, 10), avoid_layers = 'all',\n include_layers = None, margin = 100,\n fill_layers = (0, 1, 3), fill_densities = (0.5, 0.25, 0.7),\n fill_inverted = None, bbox = None):\n # Create the fill cell.\n # If fill_inverted is not specified, assume all False\n fill_layers = _loop_over(fill_layers)\n fill_densities = _loop_over(fill_densities)\n if fill_inverted is None: fill_inverted = [False]*len(fill_layers)\n fill_inverted = _loop_over(fill_inverted)\n if len(fill_layers) != len(fill_densities):\n raise ValueError('[PHIDL] phidl.geometry.fill_rectangle() '\n '`fill_layers` and `fill_densities` parameters '\n 'must be lists of the same length')\n if len(fill_layers) != len(fill_inverted):\n raise ValueError('[PHIDL] phidl.geometry.fill_rectangle() '\n '`fill_layers` and `fill_inverted` parameters must '\n 'be lists of the same length')\n\n fill_cell = _fill_cell_rectangle(size = fill_size, layers = fill_layers,\n densities = fill_densities,\n inverted = fill_inverted)\n F = Device(name = 'fill_pattern')\n\n if avoid_layers == 'all':\n exclude_polys = D.get_polygons(by_spec = False, depth = None)\n else:\n avoid_layers = [_parse_layer(l) for l in _loop_over(avoid_layers)]\n exclude_polys = D.get_polygons(by_spec = True, depth = None)\n exclude_polys = {key:exclude_polys[key]\n for key in exclude_polys if key in avoid_layers}\n exclude_polys = itertools.chain.from_iterable(exclude_polys.values())\n\n if include_layers is None:\n include_polys = []\n else:\n include_layers = [_parse_layer(l) for l in _loop_over(include_layers)]\n include_polys = D.get_polygons(by_spec = True, depth = None)\n include_polys = {key:include_polys[key]\n for key in include_polys if key in include_layers}\n include_polys = itertools.chain.from_iterable(include_polys.values())\n\n if bbox is None: bbox = D.bbox\n\n raster = _rasterize_polygons(polygons = exclude_polys, bounds = bbox,\n dx = fill_size[0], dy = fill_size[1])\n raster = raster & ~_rasterize_polygons(polygons = include_polys,\n bounds = bbox, dx = fill_size[0],\n dy = fill_size[1])\n raster = _expand_raster(raster, distance = margin/np.array(fill_size))\n\n for i in range(np.size(raster, 0)):\n sub_rasters = [list(g) for k, g in itertools.groupby(raster[i])]\n j = 0\n for s in sub_rasters:\n if s[0] == 0:\n x, y = _raster_index_to_coords(i, j, bbox, fill_size[0],\n fill_size[1])\n # F.add(gdspy.CellArray(ref_cell = fill_cell,\n # columns = len(s), rows = 1,\n # spacing = fill_size, ))\n a = F.add_array(fill_cell, columns = len(s), rows = 1,\n spacing = fill_size)\n a.move((x, y))\n j += len(s)\n\n return F",
"def fill(self, filler):\n\n for x in range(self.__xmax):\n for y in range(self.__ymax):\n self.__data[(x,y)] = filler(x,y) % self.mod",
"def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)",
"def proc_filled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=True)"
]
| [
"0.6759221",
"0.66726756",
"0.6315122",
"0.6239038",
"0.6214571",
"0.61948085",
"0.6184842",
"0.6096984",
"0.60784644",
"0.6054093",
"0.5992815",
"0.5992283",
"0.5988265",
"0.59807956",
"0.5937357",
"0.5853239",
"0.57890373",
"0.57832766",
"0.5764153",
"0.5758203",
"0.57576334",
"0.5743747",
"0.5721083",
"0.5701469",
"0.5674279",
"0.56725866",
"0.5606686",
"0.55978745",
"0.55959696",
"0.5578735"
]
| 0.761372 | 0 |
Return the last element from an iterator. | def last(iterator):
item = None
for item in iterator:
pass
return item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item",
"def return_last(iter):\n for thing in iter:\n pass\n return thing",
"def last(iterable):\n d = deque(iterable, maxlen=1)\n try:\n return d.pop()\n except IndexError:\n raise ValueError(\"Cannot return last item from empty iterable {!r}\".format(iterable))",
"def last_item(self):\n return self.container[self.length-1]",
"def last(seq):\n try:\n return seq[-1]\n except TypeError:\n old = None\n it = iter(seq)\n while True:\n try:\n old = next(it)\n except StopIteration:\n return old",
"def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._element",
"def last(self):\n return self.deque[-1]",
"def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._prev._element",
"def get_last(self):\n return self.get_block(len(self.chain)-1)",
"def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x",
"def last(self):\n if self.tail:\n self.cursor = self.tail\n return self.cursor\n return None",
"def last(self):\n if self.is_empty():\n raise Emtpy(\"List is empty!\")\n return self._trailer._prev._element",
"def last_el(x):\n if N.isscalar(x): return x\n else: return x[-1]",
"def getLast(self):\r\n return self._data[-1]",
"def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)",
"def getLast(self):\n\n if self.firstItem == None:\n raise Exception(\"cannot getLast - linked list is empty\")\n\n # 1. Find the last item\n lastItem = self.firstItem\n while lastItem.next != None:\n lastItem = lastItem.next\n\n # 2. Return the value\n return lastItem",
"def last(self):\n return self.last and self.last.value or None",
"def last(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._trailer._prev._element #real item just before trailer",
"def peek_last(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.tail.data",
"def at_last(self):\n return self._collection.at_last()",
"def last(self):\n return _(self._[-1])",
"def last(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).last()\n\n return self[-1]",
"def get_last(self, limit = 1):\n if len(self.data) == 0:\n return None\n self.sort_and_reduce()\n if len(self.data) < limit:\n limit = len(self.data)\n\n return self.data[-limit:][0]",
"def last(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.last_node().element().value()",
"def last_node(self):\n nodes = self.as_list()\n\n if nodes:\n # If there are nodes return the last one.\n return nodes[-1]\n # No nodes, return None\n return None",
"def last_block(self):\n return self.chain[-1]",
"def last_block(self):\n return self.chain[-1]",
"def last(self):\n return self._reduce_for_stat_function(lambda col: F.last(col, ignorenulls=True),\n only_numeric=False)",
"def get_last_index(self):\n return len(self.chain) - 1",
"def get_last(self, count):"
]
| [
"0.81704766",
"0.8126013",
"0.7631656",
"0.7508389",
"0.7461644",
"0.72906893",
"0.7239407",
"0.7238677",
"0.7135802",
"0.71334594",
"0.7124381",
"0.70423234",
"0.7003483",
"0.6959612",
"0.6933803",
"0.6911308",
"0.69061714",
"0.686835",
"0.6841431",
"0.68262434",
"0.68242246",
"0.6768055",
"0.6713281",
"0.66753167",
"0.65752965",
"0.6524731",
"0.6524731",
"0.65027946",
"0.64871055",
"0.64798677"
]
| 0.8675218 | 0 |
Initializes population, cache and storage | def initialize(self):
self.population.initialize()
self.cache.initialize()
if self.storage:
self.storage.initialize() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_population(self):\n pass",
"def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()",
"def __init__(self):\n self._datastore = dict()",
"def initialise(self):",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()",
"def initialise(self):\r\n return",
"def initialise(self):\r\n return",
"def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()",
"def __init__(self):\n self._store: ObservationStore = {}\n self._memory_lower_bound = 0",
"def initialize(self):\n pass",
"def initialize(self):\n pass",
"def initialize(self):\n pass",
"def initialize(self):\n pass",
"def initialize(self):\n pass",
"def __init__(self):\n self.__dict__ = dict()\n self.load()",
"def memb_init(self):\n self.initialize()",
"def __init__(self):\n self.store = {}",
"def __init__(self):\r\n self.__storage = {}",
"def initialize(self):\n\t\tpass",
"def initialize(self):\r\n pass",
"def initialize(self):\r\n pass",
"def _init_storage(self):\n if self._ is None:\n self._ = Parameters(self)"
]
| [
"0.7593586",
"0.73463607",
"0.73275894",
"0.7324741",
"0.7172695",
"0.7172695",
"0.7172695",
"0.7172695",
"0.7172695",
"0.7172695",
"0.7172695",
"0.7172695",
"0.71597743",
"0.7140536",
"0.7140536",
"0.70901275",
"0.70898193",
"0.7082336",
"0.7082336",
"0.7082336",
"0.7082336",
"0.7082336",
"0.70596814",
"0.7058529",
"0.7051508",
"0.7035571",
"0.7026737",
"0.70220894",
"0.70220894",
"0.70195115"
]
| 0.883915 | 0 |
Store an individual in the storage backend. | def store_individual(self, hash, individual):
if self.storage:
self.storage.write_individual(hash, self.generation, individual ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self):\r\n if self._storage:\r\n self._storage.store(self, *self._storage_args)\r\n else:\r\n raise StorageNotSetError()",
"def put(self, name, obj, lifetime=ObjectLifetime.Event):\n\n # check if object with the same name is already stored?\n if name in self.store.keys():\n raise AlreadyInDataStore()\n # no, store it!\n self.store[name] = (lifetime, obj)",
"def save(self, storage, name, meta):\n with closing(self.open()) as handle:\n storage.save(name, handle)",
"def Store(self):\n\n if FLAGS.verbose or FLAGS.verbose_writes:\n print 'Writing track:'\n for key in sorted(self.persistant):\n print ' %s = %s' %(key, self.persistant[key])\n\n if not self.persistant:\n return\n \n try:\n self.db.WriteOneRow('tracks', 'id', self.persistant)\n except MySQLdb.Error, (errno, errstr):\n if errno != 1064:\n raise TrackException(self.db, 'Could not store track %s: %s \"%s\"'\n %(self.persistant['id'], errno, errstr))\n except sql.FormatException, e:\n raise e\n except Exception, e:\n raise TrackException(self.db, 'Could not store track: %s: \"%s\" (%s)'\n %(self.persistant['id'], e, type(e)))",
"def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)",
"def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)",
"def add(self, storage_object: StorageObject) -> None:\n self._store[storage_object.instance_id] = storage_object",
"def store_object(self, _object):\n\n # replace an existing list member, else, append\n\n index = [self.object_store.index(_object_) for _object_ in self.object_store if _object_.LocalID == _object.LocalID]\n\n if index != []:\n\n self.object_store[index[0]] = _object\n\n #if self.settings.LOG_VERBOSE: logger.debug('Updating a stored object: %s in region \\'%s\\'' % (_object.FullID, self.region.SimName))\n\n else:\n\n self.object_store.append(_object)\n\n #if self.settings.LOG_VERBOSE: logger.debug('Stored a new object: %s in region \\'%s\\'' % (_object.LocalID, self.region.SimName))",
"def save(self, key, sort_key, _object):\n return self.storage.set(key, sort_key, _object.to_json())",
"def save(self):\n value = self.volume()\n session = self.resource.conn.session\n # self.artifice.\n try:\n tenant_id = self.resource[\"tenant_id\"]\n except KeyError:\n tenant_id = self.resource[\"project_id\"]\n resource_id = self.resource[\"resource_id\"]\n\n tenant = session.query(tenants.Tenant).get(tenant_id)\n\n if tenant is None:\n res = resources.Resource()\n tenant = tenants.Tenant()\n tenant.id = tenant_id\n\n res.id = resource_id\n res.tenant = tenant\n session.add(res)\n session.add(tenant)\n else:\n try:\n res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0]\n tenant = res.tenant\n except IndexError:\n res = resources.Resource()\n tenant = tenants.Tenant()\n tenant.id = tenant_id\n res.id = resource_id\n res.tenant = tenant\n session.add(res)\n session.add(tenant)\n\n this_usage = usage.Usage(\n res,\n tenant,\n value,\n self.start,\n self.end,\n )\n session.add(this_usage)\n session.commit() # Persist to Postgres",
"def store(self, item):\n cursor = self.conn.cursor()\n # Store the item\n if item:\n cursor.execute(*self._build_insert(item, 'items'))\n for file_ in item.files:\n cursor.execute(\"\"\"insert into files (filename, item_id)\n values (?, ?)\"\"\", (file_, item.kg_id))\n self.conn.commit()\n self.logger.info(\"Succesfully stored item %d\" % item.kg_id)",
"def _store(self):\n database.mongo_store_object_by_label(self, self.label)",
"def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())",
"def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError",
"def store(self, key: object, value: object):\n self._user_data.update({key: value})",
"def storage_create(context, values):\n if not values.get('id'):\n values['id'] = uuidutils.generate_uuid()\n\n storage_ref = models.Storage()\n storage_ref.update(values)\n\n session = get_session()\n with session.begin():\n session.add(storage_ref)\n\n return _storage_get(context,\n storage_ref['id'],\n session=session)",
"def store(self, key, obj):\n attrs = self.load_attrs()\n attrs[key] = obj\n self.store_attrs(attrs)",
"def save(self):\n self.updated_at = datetime.now()\n storage.save()",
"def save(self):\n self.updated_at = datetime.now()\n storage.save()",
"def store(self):\n\n pass",
"def store(self, key, value):\n pass",
"def put(self, uid):\n raise NotImplementedError",
"def _persist(self):\n trunk.set(self.uuid, self.json)",
"def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)",
"def put(cls, obj):\n pass",
"def save(self):\n\n self.updated_at = datetime.now()\n models.storage.save()",
"def _save(self, name, content):\n cloud_obj = self.container.create_object(name)\n mimetype, _ = mimetypes.guess_type(name)\n cloud_obj.content_type = mimetype\n cloud_obj.send(content)\n return name",
"def save(self, obj):\n self.uow.save(obj)\n self.imap.save(obj)\n state(obj).session = self",
"def store_blob(self, data, download_meta, blob_id=None):\n if blob_id is None:\n blob_id = uuid.uuid4()\n elif isinstance(blob_id, str):\n blob_id = uuid.UUID(blob_id)\n session = self.DBSession()\n blob = Blob(blob_id=blob_id, data=data)\n session.add(blob)\n download_meta['blob_id'] = str(blob_id)",
"def save(self):\n self.updated_at = datetime.now()\n models.storage.save()"
]
| [
"0.6551306",
"0.6530587",
"0.64019746",
"0.6363247",
"0.6284706",
"0.6276256",
"0.6260118",
"0.6160291",
"0.61023945",
"0.6091016",
"0.6070906",
"0.6041757",
"0.60393125",
"0.6016974",
"0.6006164",
"0.5992561",
"0.5963782",
"0.59499836",
"0.59499836",
"0.5940263",
"0.5908173",
"0.589369",
"0.58792853",
"0.5876155",
"0.58726287",
"0.58672833",
"0.5848838",
"0.582103",
"0.5811204",
"0.5792041"
]
| 0.74466157 | 0 |
Returns best individual in population (relies on Population method) | def best_individual(self):
return self.population.best_individual() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))",
"def get_best_individual(population: List[IndividualType]) -> IndividualType:\n best_individual = population[0]\n for individual, rating in population:\n if rating < best_individual[1]:\n best_individual = (individual, rating)\n return best_individual",
"def get_best(self, population):\n best = min(population, key=self.cost_function)\n return best, self.cost_function(best)",
"def get_best_individual(self):\n return self._best_indv",
"def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best",
"def result(self):\n return min(self.population, key=lambda individual: individual.get_fitness())",
"def best(self):\n self.population.ascendent_sort()\n self.best_genome = self.population.collection[0]\n return self.best_genome",
"def best_observer(population, num_generations, num_evaluations, args):\r\n print(\"Best Individual: {0}\\n\".format(str(max(population))))",
"def _find_solution(self, population, num_of_best_chromosomes):\n data = self._Individuals()\n for x in population:\n curr_fit = self._fitness(x)\n data.append_object(self._Individual(curr_fit, x))\n return data.sort_objects()[:num_of_best_chromosomes]",
"def get_best(self) -> Chromosome:\n if not (self._best_chromosome is None): # if the best chromosome is unchanged since the last calculation\n return self._best_chromosome\n\n best = None\n best_fitness = None\n\n for chromosome in self._population:\n chromosome_fitness = chromosome.get_fitness()\n\n if best_fitness is None or self._is_fitter(chromosome_fitness, best_fitness):\n best = chromosome\n best_fitness = chromosome_fitness\n\n return best",
"def get_personal_best(self):\n return self._personal_best",
"def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]",
"def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]",
"def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())",
"def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)",
"def get_best_fitness(self):\n f = max(self.characters, key=operator.attrgetter('fitness'))\n self.best_fitness = round(f.fitness, 3)\n self.best_candidate = f",
"def choose_mi_best(self, R):\n population = np.empty([self.P.shape[0] + R.shape[0], 2*self.d + 1])\n i = 0\n for individual in np.vstack([self.P, R]):\n population[i, 0] = -self.J(individual[0:self.d], self.nCEC)\n population[i, 1:] = individual\n i = i+1\n\n sorted_population = population[np.argsort(population[:, 0])]\n\n return sorted_population[-self.mi:, 1:]",
"def best_genome(self):\n return self.best_genomes(1)[0]",
"def next_population():\n result = [best]\n while len(result) < population_size:\n chromosomes = crossover(tournament(), tournament()) if random() < crossover_rate else [tournament()]\n for chromosome in chromosomes:\n for i in range(box_count):\n if random() < mutation_rate:\n j = randrange(box_count)\n (chromosome[i], chromosome[j]) = (chromosome[j], chromosome[i])\n result.append(Individual(evaluate(chromosome), chromosome))\n return result[:population_size]",
"def bestIndividual(hof, X, y):\n maxAccurcy = 0.0\n for individual in hof:\n #print(individual.fitness.values)\n #print(maxAccurcy)\n if(individual.fitness.values[0] > maxAccurcy):\n maxAccurcy = individual.fitness.values\n _individual = individual\n\n _individualHeader = [list(X)[i] for i in range(\n len(_individual)) if _individual[i] == 1]\n return _individual.fitness.values, _individual, _individualHeader",
"def personal_best(scores):\n return max(scores)",
"def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]",
"def best_genome(self) -> Genome:\n return self._candidate",
"def best(self, side):\n return Library.functions.best(self._book, side)",
"def best_value(self):\r\n return self._best_value",
"def get_best_particle(self):\n index = self.weights.argmax()\n return self.particles[index, :]",
"def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]",
"def population_selection(population, sack, max_weight):\n sorted_population = population_performance(population, sack, max_weight)\n new_gen = []\n \n for fit_member in range(len(sorted_population) - 2): #killing two weakest\n new_gen.append(sorted_population[fit_member][0])\n\n return new_gen",
"def get_individual_fitness(individual):\r\n fitness = 0\r\n # A COMPLETER\r\n \r\n #Si distance avec le point objectif diminue, alors fitness augmente ?\r\n \r\n return fitness",
"def selection(population, rate):\n\n\tmating_pool = []\n\tscores = []\n\n\tdef fitness(timetable_info):\n\t\t\"\"\" Calculates the fitness of an individual \"\"\"\n\n\t\treturn calc_score(timetable_info[0][0],\n\t\t\t\t\t\t timetable_info[0][1],\n\t\t\t\t\t\t timetable_info[0][2])\n\n\t# choose the fittest individuals\n\tpopulation = sorted(population, key=fitness, reverse=True)\n\n\t# set max and range\n\trate = int(rate * 100)\n\n\tfor i in range(rate):\n\n\t\t# fittest schedules have highest probabilities\n\t\tscores.append(calc_score(population[i][0][0], population[i][0][1], population[i][0][2]))\n\t\tmating_pool.append(population[i])\n\n\treturn mating_pool"
]
| [
"0.84718853",
"0.81538415",
"0.81047726",
"0.7823421",
"0.7633607",
"0.74540997",
"0.7401855",
"0.7353317",
"0.7279851",
"0.7126376",
"0.709254",
"0.7077304",
"0.68827885",
"0.67887175",
"0.6566941",
"0.6518445",
"0.64974064",
"0.64597523",
"0.64550704",
"0.64477754",
"0.6445187",
"0.6382104",
"0.6355458",
"0.6331994",
"0.63272566",
"0.6292308",
"0.6289052",
"0.6274741",
"0.62367314",
"0.6227007"
]
| 0.8571775 | 0 |
creates a new population with population.evolve, sets the new population as the current and increment generation. After this the population in the previous generation is lost | def evolve_population(self, **options):
new_population = self.population.evolve(**options)
self.population = new_population
self.generation = new_population.generation_number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_new_population(self):\n self.check_for_generation_cap()\n pop_container = list()\n for chromosome in self.population:\n partner = bm.select_partner(\n self.fitness_scores, self.population)\n child = bm.mutate(bm.crossover(chromosome, partner))\n pop_container.append(child)\n if self.population == pop_container:\n print(\"newly created populous is the same as the old populous\")\n self.population = pop_container\n print(\"generations: \", self.generations)\n self.generations += 1",
"def next_generation(self, population):\n pass",
"def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population",
"def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])",
"def next_generation(self):\r\n self.calculate_stats()\r\n\r\n self.population = []\r\n\r\n # Getting amounts for different types of neural net replacements\r\n random_size = self.random_round(self.population_size * self.settings[\"random_offspring\"])\r\n elitism_size = self.random_round(self.population_size * self.settings[\"elitism_offspring\"])\r\n crossover_size = self.population_size - random_size - elitism_size\r\n\r\n # Keeping best neural nets (elitism)\r\n self.population.extend(self.sorted_population[i].copy() for i in range(elitism_size))\r\n\r\n # Adding neural nets with crossover\r\n\r\n probs = self._get_selection_probabilities()\r\n crossovers = (self._uniform_crossover(*np.random.choice(self.sorted_population, 2, replace=False, p=probs)) for _ in range(crossover_size))\r\n self.population.extend(crossovers)\r\n\r\n # Mutating neural nets\r\n for neural_net in self.population:\r\n if np.random.rand() < self.settings[\"mutation_rate\"]:\r\n neural_net.mutate(self.settings[\"mutation_chance\"], self.settings[\"mutation_amount\"])\r\n\r\n # Adding random nets\r\n self.population.extend(self._random_child() for _ in range(random_size))\r\n\r\n # Shuffling new population\r\n np.random.shuffle(self.population)\r\n\r\n # Increment current generation\r\n self.current_generation += 1",
"def _next_gen(self):\n\n selected = self.select()\n offspring = self.population.mate(mating_individuals=selected)\n self.population.delete(np.arange(len(self.population.individuals)))\n self.population.add(offspring)\n self._current_gen_count += 1\n self._gen_count_in_curr_iteration += 1\n self._function_evaluation_count += offspring.shape[0]",
"def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)",
"def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)",
"def population_increase(population):\n population = population * decrease()\n return population",
"def generate_next_generation(environment, population, adaptive_mutation):\n\t# generate pairs of parents that can be used for recombination\n\tparent_pairs = parent_selection_ranking(population, num_pairs=len(population)*4)\n\n\t# generate offspring\n\toffspring = []\n\tfor i in range(len(parent_pairs)):\n\t\tchildren = create_offspring(environment, parent_pairs[i][0], parent_pairs[i][1], adaptive_mutation, num_offspring=1)\n\t\toffspring += children # concatenate children to offspring list\t\n\n\tnew_population = survival_selection_top(offspring, len(population))\n\treturn new_population",
"def evolve_population(population):\r\n \r\n \r\n pop = sort_population(population)\r\n \r\n # test de la meilleure solution ?\r\n\r\n #On choisit les parents\r\n #parents = pop[:NB_POP_TO_KEEP]\r\n for individual in pop[:NB_POP_TO_KEEP]:\r\n parents.append(i[0])\r\n \r\n #On garde des mauvais\r\n for individual in pop[NB_POP_TO_KEEP:]:\r\n if random.random() < PART_OF_BAD_TO_KEEP :\r\n parents.append(i[0])\r\n \r\n \r\n #On réalise des mutations\r\n for individual in parents :\r\n if random.random() < CHANCE_TO_MUTATE :\r\n indice = int( random.random() * PARAMETERS_COUNT )\r\n individual[indice] = random.random()\r\n \r\n #Create new pop\r\n size_parents = len(parents)\r\n size_to_create = POPULATION_COUNT - size_parents\r\n children = []\r\n while len(children) < size_to_create:\r\n parent1 = choose(parents)\r\n parent2 = choose(parents)\r\n child = parent1[:(PARAMETERS_COUNT/2)] + parent2[(PARAMETERS_COUNT/2):]\r\n children.append(child)\r\n \r\n return parents",
"def evolve(self, population):\n n = len(population)\n\n # Create offspring as crossover of parents\n offspring = []\n while len(offspring) < n:\n parent_1 = copy.deepcopy(random.choice(population))\n parent_2 = copy.deepcopy(random.choice(population))\n try:\n self.crossover.crossover(parent_1, parent_2)\n except CrossoverError:\n pass # Just keep parents\n offspring += [parent_1, parent_2]\n\n # Mutate offspring\n offspring = [self.mutator.mutate(tree) for tree in offspring]\n\n # Add it to population\n population += offspring\n\n # Keep the fitter part of the population\n population.sort(key=self.fitness_key, reverse=True)\n population = population[:n]\n\n return population",
"def create_population(self):\n stagnation = DefaultStagnation(self.config.population, self.reporters)\n self.reporters = ReporterSet()\n self.reproduction = DefaultReproduction(self.reporters, stagnation)\n \n # Create a population from scratch, then partition into species\n self.population = self.reproduction.create_new(config=self.config,\n num_genomes=self.config.population.pop_size)\n self.species = DefaultSpecies(reporters=self.reporters)\n self.species.speciate(config=self.config,\n population=self.population,\n generation=self.generation,\n logger=self.log)\n \n # Add to each of the species its elites\n self.update_species_fitness_hist()\n \n # Use 'print' to output information about the run\n reporter = StdOutReporter()\n self.add_reporter(reporter)\n \n # Save newly made population\n self.save()\n \n # Write population configuration to file\n with open(f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\"\n f\"config.txt\", 'w') as f:\n f.write(self.config.read())",
"def test__evolve(self):\n f0 = 3 * np.random.rand(10, 5)\n ga = population.Evolver(f0, eval_one_max)\n\n new = 0\n ngen = 10000\n for i in range(ngen):\n ga._evolve()\n #print ga.generations[-1].individuals[-1]\n #print ga.generations[-1].fitness[-1]\n\n self.assertEqual(len(ga.generations), ngen + 1)",
"def generational_step(self, population):\n offspring = self.variation(population, self._number_offspring)\n self.evaluation(population)\n self.evaluation(offspring)\n if self._target_populations_size is None:\n new_pop_size = len(population)\n else:\n new_pop_size = self._target_populations_size\n self.update_diagnostics(population, offspring)\n return self.selection(population + offspring, new_pop_size)",
"def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]",
"def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()",
"def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness",
"def create_next_generation(pop, pop_num, fit_val, mut_prob, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max):\n #Saves top 3 performing genomes\n pop_top = []\n for m in range(1) :\n pop_top.append(pop[m])\n\n #Crossover performed in top 20\n pop_cross = []\n for n in range(25):\n new_pop1 = crossover(pop[n], pop[n+1])\n pop_cross.append(new_pop1)\n\n #Adds all currently available members\n #Then mutates them.\n pop_new = []\n pop_premut = []\n pop_premut = pop_top + pop_cross\n pop_new = mutate(pop_premut, mut_prob, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max)\n\n #Create random members and saves them \n for s in range(pop_num - len(pop_new)):\n #Creating the random PID values\n kd_cur = round(random.uniform(kd_min, kd_max), 2)\n kp_cur = round(random.uniform(kp_min, kp_max), 2)\n ki_cur = round(random.uniform(ki_min, ki_max), 2)\n #Into 2-D List. Access via pop[i][j]\n pop_new.append([kd_cur, kp_cur, ki_cur])\n return pop_new",
"def evolve(self,noOfIterations=50):\n\t\tself.population = Population.Population(self.factory,self.population_size)\n\t\tself.statistics = Statistics.Statistics()\n\t\tself.last_20_fitnesses = collections.deque([])\n\t\tself.continue_evolve(noOfIterations)",
"def _next(self, population):\n # split the population for crossover\n selected, the_rest = self._split_population(\n population, self._get_selected_number(population,\n self._selection_crossover))\n\n # crossover\n generated_items_crossover = []\n while len(selected) >= 2:\n male, female = random.sample(selected, 2)\n selected.remove(male)\n selected.remove(female)\n generated_items_crossover.extend(\n self._crossover.crossover(male, female))\n\n # if there is a impar number of selected items\n # add it back to the list\n the_rest.extend(selected)\n\n # Make the mutations\n selected, the_rest = self._split_population(\n the_rest, self._get_selected_number(population,\n self._selection_mutation))\n # mutation\n generated_items_mutation = []\n for item in selected:\n generated_items_mutation.append(self._mutation.mutate(item))\n\n # compute the population\n population = []\n population.extend(the_rest)\n population.extend(generated_items_crossover)\n population.extend(generated_items_mutation)\n\n return population",
"def _generate_population(self) -> None:\n self._population = list()\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n initial_fitval = self._fit_test(blank_img_ext)\n\n for i in range(self._pop_size):\n # Each chromosome is an empty black image\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((blank_img_ext, initial_fitval))",
"def genetic_algorithm(self) -> Tour:\n print(\"{:*^120}\".format(\"Initialize first population\")) \n # Init first population\n pop = Population(self.population_size, self.map, initial=True)\n # print(pop)\n print(\"{:*^120}\".format(\"Initial fittes tour\")) \n print(\"Tour:\", pop.get_fittess_tour())\n print(\"Cost:\", pop.get_fittess_tour().get_cost())\n # print(\"Initial tour fitness:\", pop.get_fittess_tour().get_fitness())\n\n # Set first pop\n self.evol.setPop(pop)\n \n for i in range(self.no_generations):\n # Evolve next generation\n pop = self.evol.evolve_generation()\n \n if i % self.print_cost_per_gen == 0:\n print(\"{:-^50}Generation {}{:-^50}\".format(\"\", i, \"\"))\n print(\"Tour:\", pop.get_fittess_tour())\n print(\"Tour cost: {}\".format(pop.get_fittess_tour().get_cost()))\n\n # Set new pop\n self.evol.setPop(pop)\n\n return self.evol.getPop().get_fittess_tour()",
"def reset_next_population(self):\n self.next_population = []",
"def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()",
"def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')",
"def init_population(self):\n pass",
"def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()",
"def generational_replacement(new_pop, individuals):\n individuals.sort(reverse=True)\n for ind in individuals[:ELITE_SIZE]:\n new_pop.append(copy.copy(ind))\n new_pop.sort(reverse=True)\n return new_pop[:GENERATION_SIZE]",
"def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))"
]
| [
"0.7709369",
"0.7311488",
"0.7299069",
"0.7189038",
"0.7013157",
"0.6869076",
"0.67646176",
"0.6729715",
"0.66529423",
"0.66127074",
"0.66050977",
"0.65951574",
"0.65723693",
"0.6494388",
"0.6463658",
"0.6457739",
"0.64527386",
"0.64378035",
"0.642856",
"0.6409268",
"0.64065254",
"0.64058644",
"0.63542986",
"0.63390046",
"0.6328424",
"0.62790316",
"0.6261025",
"0.6256871",
"0.62450665",
"0.6240787"
]
| 0.78997153 | 0 |
Performs the evolution cycle. This is the main method that should be normally called. Evolution goes on until a termination criterium becomes True. At the end the best individual is returned. | def evolve(self, **options):
if not self.termination_criteria:
raise TypeError("You Must set one or more termination criteria")
self.initialize()
self.evaluate_population(**options)
while 1:
if self.should_terminate():
break
self.evolve_population(global_stats=self.population_stats, last_stats=self.current_stats, ga_engine=self)
self.evaluate_population(**options)
return self.best_individual() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)",
"def search_loop(max_generations, individuals, grammar, replacement, selection, fitness_function):\n #Evaluate initial population\n evaluate_fitness(individuals, grammar, fitness_function)\n best_ever = max(individuals)\n individuals.sort(reverse=True)\n print_stats(1, individuals)\n for generation in range(2, (max_generations+1)):\n individuals, best_ever = step(\n individuals, grammar, replacement, selection, fitness_function, best_ever)\n print_stats(generation, individuals)\n return best_ever",
"def evolution(inipop):\n popmorph = inipop\n countgenerations = 0\n terminalcount = 0\n bestindiv = inipop[0]\n print(\"initializing with fitness: \",bestindiv.fitness)\n \n \n while terminalcount != 25:\n \n\n selection(popmorph)\n \n generationsbest = top_individual(popmorph)\n print(\"evaluating generation: \",countgenerations)\n countgenerations += 1\n \n \n if generationsbest.fitness > bestindiv.fitness:\n \n bestindiv = generationsbest\n print(\"Better individual found in generation\",countgenerations,\"! Top fitness now: \",bestindiv.fitness)\n terminalcount = 0\n else:\n terminalcount += 1\n\n \n \n popmorph = next_generation(popmorph)\n \n \n return(bestindiv, countgenerations)",
"def search(self):\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())",
"def continue_evolve(self, noOfIterations=20):\n\t\tself.normalizeWeights()\n\t\tif self.population == None:\n\t\t\traise Exception('Call evolve before calling continue_evolve')\n\t\tprint(\"gen\\tavg\\t\\tbest\\tworst\\t\")\n\t\ti=0\n\t\twhile i<noOfIterations:\n\t\t\tself.generateFitnessMappings()\n\t\t\tfitnesses = [ x[1] for x in self.fitness_mappings]\n\t\t\tself.statistics.add_statistic('best-fitness',self.fitness_mappings[0][1])\n\t\t\tself.statistics.add_statistic('worst-fitness',self.fitness_mappings[-1][1])\n\t\t\tself.mean_fitness = sum(fitnesses)/len(fitnesses)\n\t\t\tself.statistics.add_statistic('avg-fitness',self.mean_fitness)\n\t\t\tself.diversity = math.sqrt(sum((fitness - self.mean_fitness)**2 for fitness in fitnesses)) / len(fitnesses)\n\t\t\tprint(\"%i\\t%.2f\\t\\t%s\\t%s\\t\" % (len(self.statistics.statistic_dict['best-fitness']),self.mean_fitness,self.fitness_mappings[0][1],self.fitness_mappings[-1][1]))\n\t\t\tif self.adaptive_mutation:\n\t\t\t\tself.mut_prob = self.initial_mut_prob * ( 1 + ((self.best_fitness[1]-self.diversity) / (self.diversity+self.best_fitness[1]) ) )\n\t\t\t\tself.mut_prob = np.clip(self.mut_prob,0.0001,0.8)\n\t\t\tself.statistics.add_statistic('mutation_rate',self.mut_prob)\n\t\t\tself.statistics.add_statistic('diversity',self.diversity)\n\t\t\tfor statistic in self.extra_statistics:\n\t\t\t\tself.statistics.add_statistic(statistic,self.extra_statistics[statistic](self.fitness_mappings,self))\n\n\t\t\tresult = self.evolution.evolve(self)\n\n\t\t\tif self.hall_of_fame_injection and (i+1)%20 == 0:\n\t\t\t\tself.population.new_members.insert(0,self.hall_of_fame[0]) \n\n\t\t\tif self.population_control:\n\t\t\t\tif len(self.population.new_members) > self.population_size:\n\t\t\t\t\tself.population.new_members = self.population.new_members[:self.population_size]\n\t\t\t\telif len(self.population.new_members) < self.population_size:\n\t\t\t\t\tself.population.new_members = self.population.new_members * int(self.population_size/len(self.population.new_members)) + self.population.new_members[:self.population_size%len(self.population.new_members)]\n\n\t\t\tif self.efficient_iteration_halt:\n\t\t\t\tif len(self.last_20_fitnesses)==20:\n\t\t\t\t\tself.last_20_fitnesses.popleft()\n\t\t\t\t\tself.last_20_fitnesses.append(self.best_fitness[1])\n\t\t\t\t\tif all(x == self.last_20_fitnesses[0] for x in self.last_20_fitnesses):\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tself.last_20_fitnesses.append(self.best_fitness[1])\n\n\t\t\t# For next iteration\n\t\t\tself.population.members = self.population.new_members\n\t\t\tself.population.new_members = []\n\n\t\t\tif result == 1:\n\t\t\t\tprint('GA Problem Solved')\n\t\t\t\tbreak\n\t\t\ti += 1",
"def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, api, dataset):\n population = collections.deque()\n api.reset_time()\n history, total_time_cost = [], [] # Not used by the algorithm, only used to report results.\n current_best_index = []\n # Initialize the population with random models.\n while len(population) < population_size:\n model = Model()\n model.arch = random_arch()\n model.accuracy, _, _, total_cost = api.simulate_train_eval(model.arch, dataset, hp='12')\n # Append the info\n population.append(model)\n history.append((model.accuracy, model.arch))\n total_time_cost.append(total_cost)\n current_best_index.append(api.query_index_by_arch(max(history, key=lambda x: x[0])[1]))\n\n # Carry out evolution in cycles. Each cycle produces a model and removes another.\n while total_time_cost[-1] < time_budget:\n # Sample randomly chosen models from the current population.\n start_time, sample = time.time(), []\n while len(sample) < sample_size:\n # Inefficient, but written this way for clarity. In the case of neural\n # nets, the efficiency of this line is irrelevant because training neural\n # nets is the rate-determining step.\n candidate = random.choice(list(population))\n sample.append(candidate)\n\n # The parent is the best model in the sample.\n parent = max(sample, key=lambda i: i.accuracy)\n\n # Create the child model and store it.\n child = Model()\n child.arch = mutate_arch(parent.arch)\n child.accuracy, _, _, total_cost = api.simulate_train_eval(child.arch, dataset, hp='12')\n # Append the info\n population.append(child)\n history.append((child.accuracy, child.arch))\n current_best_index.append(api.query_index_by_arch(max(history, key=lambda x: x[0])[1]))\n total_time_cost.append(total_cost)\n\n # Remove the oldest model.\n population.popleft()\n return history, current_best_index, total_time_cost",
"def solve(self):\n initial_fes = eades(self.graph, self.force_forward_edges)\n initial_fes_vec = self.edge_vector(initial_fes)\n\n # bounds for the objective\n lower_bound = 0\n upper_bound = np.sum(initial_fes_vec @ self.weights)\n\n self.logger.info('Calculating FES for graph with %d edges, max %d feedback edges', self.m, len(initial_fes))\n\n simple_cycles = set(induced_cycles(self.graph, initial_fes))\n\n for iteration in itertools.count(1):\n self.logger.info('Baharev iteration %d, %g <= objective <= %g, %d simple cycles', iteration, lower_bound,\n upper_bound, len(simple_cycles))\n\n # Formulate and solve the problem for this iteration:\n y = cp.Variable(self.m, boolean=True, name=\"y\")\n objective = cp.Minimize(cp.sum(y @ self.weights))\n\n cycle_vectors = [self.edge_vector(nx.utils.pairwise(cycle)) for cycle in simple_cycles]\n constraints = [cp.sum(a @ y) >= 1 for a in cycle_vectors]\n constraints.append(cp.sum(y @ self.force_forward_vec) == 0) # no force forward vec may be in the result set\n problem = cp.Problem(objective, constraints)\n resolution = problem.solve(**self.solver_args)\n if problem.status != 'optimal':\n self.logger.warning('Optimization solution is %s. Try solver != %s?', problem.status,\n problem.solver_stats.solver_name)\n self.logger.debug(\n \"Solved optimization problem with %d constraints: %s -> %s (%g + %g seconds, %d iterations, solver %s)\",\n len(constraints), resolution, problem.solution.status,\n problem.solver_stats.solve_time or 0, problem.solver_stats.setup_time or 0,\n problem.solver_stats.num_iters or 0, problem.solver_stats.solver_name)\n current_solution = np.abs(y.value) >= 0.5 # y.value = vector of floats each ≈ 0 or 1\n current_fes = self.edges_for_vector(current_solution)\n self.logger.debug('Iteration %d, resolution: %s, %d feedback edges', iteration, resolution,\n len(current_fes))\n # S, the feedback edge set calculated using the constraint subset, can be an incomplete solution\n # (i.e. cycles remain after removing S from the graph). So lets compare this with the upper bound\n # from the heuristic\n lower_bound = max(lower_bound, objective.value)\n if lower_bound == upper_bound:\n self.logger.info('upper == lower bound == %g, optimal solution found', lower_bound)\n break # y.value is the optimal solution\n\n if resolution > upper_bound:\n self.logger.error('Solution %g > upper bound %g!', resolution, upper_bound)\n break\n\n Gi = self.graph.copy()\n Gi.remove_edges_from(current_fes)\n if nx.is_directed_acyclic_graph(Gi):\n self.logger.info('Graph is acyclic, optimal solution found')\n break # y.value is the optimal solution\n\n # The solution is not yet ideal. So we take G^(i), the graph still containing some feedback edges,\n # calculate a heuristic on it and use the heuristic (= over-estimation) to adjust upper bound and\n # determine additional simple cycles (= constraints)\n Fi = eades(Gi, self.force_forward_edges)\n yi = self.edge_vector(Fi) | current_solution\n zi = np.sum(yi @ self.weights)\n if zi < upper_bound:\n upper_bound = zi\n current_solution = yi\n simple_cycles |= set(induced_cycles(Gi, Fi))\n\n self.solution_vector = current_solution\n self.solution = self.edges_for_vector(current_solution)\n self.objective = objective.value\n self.iterations = iteration\n self.simple_cycles = simple_cycles\n return self.solution",
"def run(self) -> (ExtendedImage, int):\n # Generate an initial population\n print('Generating the first population...')\n self._generate_population()\n print('The first population generated! Starting the algorithm...')\n\n # Start the algorithm itself to obtain the solution for the problem\n for i in range(self._max_iter_num):\n # Also calculate execution time of 1 iteration\n start = datetime.now()\n # Run selection to generate a new population based on the prev. one\n self._selection()\n end = datetime.now()\n print(f'Iteration {i} finished; time = {(end - start).total_seconds()}')\n\n # If STOPPING_CRITERIA is reached, terminate the algorithm\n # (the best chromosome in the new generation is good enough)\n if self._population[0][1] <= self._stop_crit:\n print('Stopping criteria reached. Terminate.')\n break\n\n print('Algorithm finished!')\n # Return obtained result in the form of tuple (picture, fitness value)\n return self._population[0]",
"def run_experiment(self):\n\n start_time = time.time()\n\n strategy_instance = None\n if (self.strategy == 'ccegp'):\n strategy_instance = CCEGPStrategy(self)\n else:\n print('strategy unknown:', self.strategy)\n sys.exit(1)\n\n # For each run...\n for curr_run in range(1, self.num_runs_per_experiment + 1):\n\n # Update log\n self.curr_run = curr_run\n print('\\nRun', curr_run)\n self.log_file.write('\\nRun ' + str(curr_run) + '\\n')\n\n # Execute one run and get best values.\n attacker_run_high_fitness, attacker_run_best_world_data, attacker_run_best_solution, \\\n defender_run_high_fitness, defender_run_best_solution, attacker_dot, defender_dot \\\n = strategy_instance.execute_one_run()\n\n print('\\nBest attacker tree of run:\\n' + attacker_run_best_solution)\n if (self.print_dots):\n print('\\nBest attacker dot of run:\\n' + str(attacker_dot))\n print('\\nBest defender tree of run:\\n' + defender_run_best_solution)\n if (self.print_dots):\n print('\\nBest defender dot of run:\\n' + str(defender_dot))\n\n # If best of run is best overall, update appropriate values\n if (self.strategy != 'ccegp'):\n if (attacker_run_high_fitness > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = attacker_run_high_fitness\n print('New exp Attacker high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n # If Competitive Co-evolution, add fitnesses (use Attacker to store most data)\n else:\n if ((attacker_run_high_fitness + defender_run_high_fitness) > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = (attacker_run_high_fitness + defender_run_high_fitness)\n print('New exp Attacker+Defender high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.defender_exp_best_solution = defender_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n self.defender_exp_best_dot = defender_dot\n\n # Dump best world to file\n the_file = open(self.high_score_world_file_path, 'w')\n for line in self.attacker_exp_best_world_data:\n the_file.write(line)\n the_file.close()\n\n # Dump best Attacker solution (text) to file\n the_file = open(self.attacker_solution_file_path, 'w')\n the_file.write(self.attacker_exp_best_solution)\n the_file.close()\n\n # Dump best Defender solution (text) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_file_path, 'w')\n the_file.write(self.defender_exp_best_solution)\n the_file.close()\n\n # Dump best Attacker solution (dot) to file\n the_file = open(self.attacker_solution_dot_path, 'w')\n the_file.write(str(self.attacker_exp_best_dot))\n the_file.close()\n\n # Dump best Defender solution (dot) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_dot_path, 'w')\n the_file.write(str(self.defender_exp_best_dot))\n the_file.close()\n\n # Dump and display best Attacker solution\n if (self.render_solutions):\n self.attacker_exp_best_dot.render(filename=self.attacker_solution_png_path,\n view=self.attacker_open_png,\n format='png')\n\n # Dump and display best Defender solution\n if (self.render_solutions and self.strategy == 'ccegp'):\n self.defender_exp_best_dot.render(filename=self.defender_solution_png_path,\n view=self.defender_open_png,\n format='png')\n\n # Close out the log file\n if (not(self.log_file is None)):\n self.log_file.close()\n\n print(time.time() - start_time, 'seconds')",
"def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info):\n population = collections.deque()\n history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.\n\n # Initialize the population with random models.\n while len(population) < population_size:\n model = Model()\n model.arch = random_arch()\n model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)\n population.append(model)\n history.append(model)\n total_time_cost += time_cost\n\n # Carry out evolution in cycles. Each cycle produces a model and removes\n # another.\n #while len(history) < cycles:\n while total_time_cost < time_budget:\n # Sample randomly chosen models from the current population.\n start_time, sample = time.time(), []\n while len(sample) < sample_size:\n # Inefficient, but written this way for clarity. In the case of neural\n # nets, the efficiency of this line is irrelevant because training neural\n # nets is the rate-determining step.\n candidate = random.choice(list(population))\n sample.append(candidate)\n\n # The parent is the best model in the sample.\n parent = max(sample, key=lambda i: i.accuracy)\n\n # Create the child model and store it.\n child = Model()\n child.arch = mutate_arch(parent.arch)\n total_time_cost += time.time() - start_time\n child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)\n if total_time_cost + time_cost > time_budget: # return\n return history, total_time_cost\n else:\n total_time_cost += time_cost\n population.append(child)\n history.append(child)\n\n # Remove the oldest model.\n population.popleft()\n return history, total_time_cost",
"def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result",
"def run(self, _evaluations):\n\n # by default use of mother method to initialize variables\n super().run(_evaluations)\n\n if self.parent:\n self.bestSolution = self.parent.bestSolution\n\n # initialize current solution\n self.initRun()\n\n solutionSize = self.currentSolution.size\n\n # local search algorithm implementation\n while not self.stop():\n\n for _ in range(solutionSize):\n\n # update current solution using policy\n newSolution = self.update(self.currentSolution)\n\n # if better solution than currently, replace it\n if self.isBetter(newSolution):\n self.bestSolution = newSolution\n\n # increase number of evaluations\n self.increaseEvaluation()\n\n self.progress()\n logging.info(\"---- Current %s - SCORE %s\" %\n (newSolution, newSolution.fitness()))\n\n # stop algorithm if necessary\n if self.stop():\n break\n\n logging.info(\"End of %s, best solution found %s\" %\n (type(self).__name__, self.bestSolution))\n\n return self.bestSolution",
"def evolve(self, evolutions_to_run, evolution_count = 0):\n\n\n for i in range(evolutions_to_run):\n self.population = self.mating.mate(self.population)\n self.population = mutate.mutate(self.population)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()\n self.population = self.optimize_best(\n self.population,\n evolution_count\n )\n self._sort()\n self._save_all_chromosomes(evolution_count)\n self.display_population(evolution_count)\n evolution_count += 1",
"def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)",
"def run(self, no_improv_gen):\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n starttime = timeit.default_timer()\r\n\r\n while no_improvement_tries < no_improv_gen:\r\n endtime = timeit.default_timer()\r\n print(f\"Best value: {bestvalue}, no improvement tries: {no_improvement_tries}, time:{endtime - starttime}\")\r\n\r\n self.improve_population()\r\n self.sort_values()\r\n self.make_parents()\r\n self.parents_loop()\r\n \r\n # add best of the old population to the population\r\n while len(self.district_population) < self.population_size:\r\n index = self.best_costs.index(min(self.best_costs))\r\n self.cost_populations.append(self.best_costs[index])\r\n self.district_population.append(self.best_districts[index])\r\n del self.best_costs[index]\r\n del self.best_districts[index]\r\n\r\n if min(self.cost_populations) < bestvalue:\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n else:\r\n no_improvement_tries += 1\r\n \r\n self.best_districts = []\r\n self.best_costs = []\r\n self.worst_districts = []\r\n \r\n bestdistrict = self.cost_populations.index(bestvalue)\r\n return self.district_population[bestdistrict]",
"def next(self):\n old_candidate = self._candidate\n new_candidate = self._genome_factory.build([old_candidate])\n new_candidate.run()\n if new_candidate.fitness > old_candidate.fitness:\n self._candidate = new_candidate\n\n self._converged = self._convergence_criterion.converged(old_candidate, new_candidate)",
"def run(self):\n while self.proteins:\n \n protein = self.get_next_protein()\n \n # Get the next amino acid in the chain.\n amino_position = protein.get_unplaced_amino_position()\n if amino_position is not None:\n self.build_children(protein, amino_position)\n else:\n self.check_solution(protein)\n\n # Set's the output to be the protein with the highest score.\n protein = self.best_solution\n\n return self.best_solution",
"def anneal(self):\n # Initialize with the greedy solution.\n self.cur_solution, self.cur_fitness = self.initial_solution()\n\n print(\"Starting annealing.\")\n while self.T >= self.stopping_temperature and self.iteration < self.stopping_iter:\n candidate = list(self.cur_solution)\n l = random.randint(2, self.N - 1)\n i = random.randint(0, self.N - l)\n candidate[i : (i + l)] = reversed(candidate[i : (i + l)])\n self.accept(candidate)\n self.T *= self.alpha\n self.iteration += 1\n\n self.fitness_list.append(self.cur_fitness)\n\n print(\"Best fitness obtained: \", self.best_fitness)\n improvement = 100 * (self.fitness_list[0] - self.best_fitness) / (self.fitness_list[0])\n print(f\"Improvement over greedy heuristic: {improvement : .5f}%\")\n return self.best_fitness",
"def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())",
"def runAlg_noPrints(self):\n alpha = self.__problem.getAlpha()\n beta = self.__problem.getBeta()\n q0 = self.__problem.getQ0()\n rho = self.__problem.getRho()\n \n bestSol= Ant(self.__n)\n \n for i in range(self.__noEpoch):\n antSol = self.iteration(alpha, beta, q0, rho)\n if antSol.evaluate() < bestSol.evaluate():\n bestSol.setSolution ( deepcopy(antSol.getSolution()) )\n if bestSol.evaluate() == 1 :\n return bestSol\n\n return bestSol",
"def main(self) -> None:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Advancing to 1st Generation\")\n\n # Mandatory first generation advancement\n self.neat.advance_generation()\n\n # Metrics are initialized\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses = [max_fitness]\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Entering Main Loop\")\n\n # The main loop is entered\n stop = 0.0\n while max_fitness <= TARGET_SCORE:\n # Metrics of the last generation are checked and shared\n if LOG[\"Experiment\"]:\n print(\"\\n[Experiment] Generation = \" + str(self.neat.get_generation()))\n print(\"[Experiment] Maximum Fitness of the Generation = \" + str(max_fitness))\n print(\"[Experiment] Compared the Previous Recorded Maximum = \" + str(stop))\n print(\"[Experiment] Maximum Innovation of the Generation = \" + str(self.neat.get_maximum_innovation()))\n print(\"[Experiment] Amount of Species = \", len(self.neat.get_shared_fitness_sums()))\n print(\"[Experiment] Total Shared Fitness = \", self.neat.get_total_shared_fitness(), \"\\n\")\n\n # If an improvement is found, the game may be simulated\n if max_fitness > stop:\n stop = max_fitness\n if LOG[\"FrequentSimulations\"] and input(\"[Experiment] Show Simulation? (y/n)\\n\") == \"y\":\n n = self.neat.get_population()[-1]\n self.snake_game.show(Snake(11, Experiment.ExperimentAI(n)), self.last_used_seed,\n \"Generation = \" + str(self.neat.get_generation()),\n fps=max(4, int(max_fitness / 4)))\n\n # Generation advancement\n self.neat.advance_generation()\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses.append(max_fitness)\n\n # If the target was passed, metrics are consulted\n if LOG[\"Experiment\"]:\n print(\"\\n[Experiment] Generation = \" + str(self.neat.get_generation()))\n print(\"[Experiment] Maximum Fitness of the Generation = \" + str(max_fitness))\n print(\"[Experiment] Compared to a 'stop' value of = \" + str(stop))\n print(\"[Experiment] Maximum Innovation of the Generation = \" + str(self.neat.get_maximum_innovation()))\n print(\"[Experiment] Shared fitness sums = \", self.neat.get_shared_fitness_sums())\n print(\"[Experiment] Total shared fitness = \", self.neat.get_total_shared_fitness(), \"\\n\")\n\n # Metrics are updated again\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses.append(max_fitness)\n\n # A simulation of the result can be shown if the user wants to\n sim = input(\"[Experiment] Show Simulation? (y/n)\\n\")\n while sim == \"y\":\n n = self.neat.get_population()[-1]\n self.snake_game.show(Snake(11, Experiment.ExperimentAI(n)), self.last_used_seed,\n \"Generation = \" + str(self.neat.get_generation()),\n fps=max(4, int(max_fitness / 4)))\n sim = input(\"[Experiment] Show Simulation? (y/n)\\n\")\n\n # The resulting network may be printed\n if SHOW_RESULT:\n print(\"The best network generated is specified as:\\n\", str(self.neat.get_best_network_details()))\n\n # The resulting network may be saved\n if SAVE_RESULT:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Saving Resulting Network\")\n\n # Previous saves are removed\n dm.clear_dir(networks_saving_directory)\n\n # A .txt is generated\n with open(networks_saving_directory+\"/best_network.txt\", \"w\") as text_file:\n text_file.write(str(self.neat.get_best_network_details()))\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Resulting Network Saved\")\n\n # A plot of fitnesses may be created\n if PLOT:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Generating Fitness Plot\")\n\n # The plot is generated in matplotlib\n _, ax = plt.subplots()\n\n ax.plot(range(1, len(max_fitnesses)+1), max_fitnesses)\n ax.set_xlim([0, len(max_fitnesses)+2])\n ax.set_ylim([max(min(min(max_fitnesses), TARGET_SCORE - 100), 0), TARGET_SCORE+5])\n\n plt.title(\"Generational fitness for board size \" + str(BOARD_SIZE) +\n \" using seed \" + str(SEED))\n plt.xlabel(\"Generation\")\n plt.ylabel(\"Fitness\")\n ax.grid(True)\n\n # The plot may be saved to memory\n if SAVE_PLOTS:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Saving Fitness Plot\")\n\n # Previous saves are removed\n dm.clear_dir(plots_saving_directory)\n\n name = plots_saving_directory + \"/plot_board\" + str(BOARD_SIZE)\n name += \".png\"\n\n # A new .png is saved\n plt.savefig(name, bbox_inches='tight')\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Fitness Plot Saved\")\n # Otherwise the plot is displayed\n else:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Showing Fitness Plot\")\n\n plt.show()\n\n plt.close()\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Quitting Experiment\")\n\n # The experiment ends\n self.snake_game.quit()",
"def run(self, seed='old'):\n if seed == 'old':\n founds, number_found = self.find_in_base()\n param = number_found - self.M_N\n\n if param < 0:\n print \"We have only {0} usable chromosomes in the database, per {1} required.\".format(number_found, self.M_N)\n l, __ = self.evolve_partials(abs(param))\n combined = founds+[l[i].x for i in range(len(l))]\n\n elif param > 0:\n combined = random.sample(founds, self.M_N)\n\n else:\n combined = founds\n\n if seed == 'fresh':\n print \"Evolving fresh chromosomes...\"\n l, __ = self.evolve_partials(self.M_N)\n combined = [l[i].x for i in range(len(l))]\n\n if len(combined) != self.M_N: raise ValueError\n print \"\\nLaunching Multi-Objective evolution...\"\n isl, prob = self.mlt_obj_evo(combined)\n self.writing_finals(isl, prob)",
"def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv",
"def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]",
"def run(self, initialPopulation = None):\n\t\tpprint(\"OPT calculating initial population...\", BLUE, self.printing)\n\t\t\n\t\tif initialPopulation == None:\n\t\t\t# if we don't get an initial set of schedules as the initial population,\n\t\t\t# then we need to generate one.\n\t\t\tpopulation = self.initialPopulation()\n\t\telse:\n\t\t\t# if we do get an initial population as input, then we just need to \n\t\t\t# calculate the fitnesses of the schedules in it.\n\t\t\tfor p in initialPopulation:\n\t\t\t\tself.calcIndividualFitness(p)\n\t\t\t# if the population is too small or too large (less than or larger than\n\t\t\t# self.populationSize) then this will fix that for us.\n\t\t\tpopulation = self.mutatePopulation(initialPopulation)\n\t\t\n\t\t# go through the needed number of iterations and mutate the population\n\t\t# everytime, this will keep the best individuals and will return the \n\t\t# best population achieved at the end.\n\t\tfor i in range(self.iterations):\n\t\t\tpprint(\"OPT iteration number %s\" % (i + 1), BLUE, self.printing)\n\t\t\tpopulation = self.mutatePopulation(population)\n\t\treturn population",
"def step(self, particles, best_state, best_fitness, run_locals):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_step_inp(particles,\r\n best_state,\r\n best_fitness,\r\n run_locals):\r\n raise ValueError(\"step won't run, input's aren't valid.\")\r\n # apply the fitness function to get this generations fitness values\r\n fitness = np.empty((particles.shape[0]))\r\n #fitness = np.apply_along_axis(run_locals[\"fitness_function\"], 0, particles[:, 0, :, :]) # hopefully works\r\n for i in range(particles.shape[0]):\r\n fitness[i] = run_locals[\"fitness_function\"](particles[i, 0])\r\n\r\n # find any personal improvements\r\n better = best_fitness < fitness\r\n # set them\r\n best_fitness[better] = fitness[better]\r\n # set their states\r\n best_state[better] = particles[better, 0]\r\n\r\n # find highest of group\r\n best_of_group = np.argmax(best_fitness, axis=0)\r\n\r\n if self.verbosity > 6: # some random high verbosity outputs that were once used for debugging, might give ideas\r\n print(\"step high verb: \")\r\n print(particles[0])\r\n print(particles[:, 1].shape)\r\n print(best_state.shape)\r\n print(np.repeat(best_state[best_of_group][np.newaxis, :], particles[:, 1].shape[0], axis=0).shape)\r\n\r\n # run calculation for the velocity calculation\r\n # Maurice Clerc. Standard Particle Swarm Optimisation. 2012. hal-00764996\r\n particles[:, 1] = (run_locals[\"PSO_VELOCITY_WEIGHT\"] * particles[:, 1] +\r\n run_locals[\"PSO_INDIVIDUAL_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state - particles[:, 0]) +\r\n run_locals[\"PSO_GROUP_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state[best_of_group] - particles[:, 0]))\r\n\r\n # run calculation for point calculation\r\n particles[:, 0] = particles[:, 0] + particles[:, 1]\r\n #if True and ((particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0]).any() or \\\r\n # (particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1]).any()):\r\n #print(particles[:, 0].shape)\r\n #mask = np.logical_or(particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0],\r\n # particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1])\r\n #print(particles.shape)\r\n #print(np.arange(particles.shape[0]).shape)\r\n #print(np.arange(particles.shape[0])[mask])\r\n #print(particles[np.argmax(mask), 1])\r\n # clip the particles to be within the axes\r\n particles[:, 0] = np.clip(particles[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 1])\r\n #if self.globi < 10:\r\n # self.glob[self.globi] = particles[0, 0, 0, 0]\r\n # self.guub[self.globi] = particles[0, 1, 0, 0]\r\n # self.glub[self.globi] = best_state[best_of_group][0, 0]\r\n # self.globi += 1\r\n #else:\r\n #print(self.glob[:10])\r\n #print(self.guub[:10])\r\n #print(self.glub[:10])\r\n #raise ValueError(self.glob)\r\n\r\n return particles, best_state, best_fitness",
"def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life",
"def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life",
"def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records",
"def evo_alg(evo_type,env,n_hidden_neurons):\n # initialise parameters for NEAT1\n ngens = 10\n population = 20\n best = 10\n\n # initialise parameters for NEAT2\n if evo_type=='NEAT2':\n best = False\n\n pop_data = {}\n\n # initialize population randomly\n for ind in range(population):\n # initialise random weights\n\n weights = np.random.uniform(-1,1,size=(n_hidden_neurons+20*n_hidden_neurons+5+n_hidden_neurons*5))\n\n fitness = run_play(weights)\n pop_data[ind] = (fitness, weights)\n\n # perform evolutionary algorithm for all generations\n for gen in range(ngens):\n\n print(f'RUN: {gen+1}')\n\n # sort by fitness\n pop_data={k: v for k, v in sorted(pop_data.items(), key=lambda item: item[1][0], reverse=True)}\n\n # perform cross-over on best individuals\n all_weights = cross_rand(pop_data,best,population)\n\n # overwrite old population data\n pop_data = {}\n\n for ind in range(population):\n weights = mutate(all_weights[ind],gen)\n\n fitness = run_play(weights)\n pop_data[ind] = (fitness,weights)"
]
| [
"0.6539778",
"0.64894867",
"0.6374142",
"0.63138235",
"0.62839043",
"0.6255625",
"0.61972564",
"0.615336",
"0.6120255",
"0.60814714",
"0.60222733",
"0.6005586",
"0.6002664",
"0.59978247",
"0.59944457",
"0.5991057",
"0.59831536",
"0.59788215",
"0.5916169",
"0.5911512",
"0.5872012",
"0.5833023",
"0.5826939",
"0.57843304",
"0.5770718",
"0.57416034",
"0.57403773",
"0.57403773",
"0.5740127",
"0.57369715"
]
| 0.7172024 | 0 |
Allows user to place bets, returns None. | def get_player_bet(self) -> None:
print("Please enter the amount you want to bet.")
while self.user.bet == 0:
input_ = input(">>> ")
try:
input_ = float(input_)
self.user.bet = input_
except ValueError as e:
print(str(e))
continue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)",
"def free_bet(inputs):\n print 'Free bet:'\n free_value = inputs['bet_value']\n free_odds = inputs['bet_odds']\n lay_odds = inputs['lay_odds']\n commission_per_cent = inputs['commission_per_cent']\n # With inputs in place calculate bet\n commission = 0.01 * commission_per_cent\n lay_stake = free_value * (free_odds - 1) / (lay_odds - commission)\n # Calculate profit in both cases (free back wins and free back loses)\n profit_free_wins = free_value * (free_odds - 1) \\\n - lay_stake * (lay_odds - 1)\n profit_lay_wins = lay_stake * (1 - commission)\n # Also calculate free SNR value\n free_SNR = profit_free_wins / free_value\n free_SNR_per_cent = free_SNR * 100\n # Print calculated outputs\n print 'Profit if *back* wins: GBP', round(profit_free_wins,2)\n print 'Profit if *lay* wins: GBP', round(profit_lay_wins,2) \n print 'Free value %: ', round(free_SNR_per_cent,1), '%'\n print 'Lay required: GBP', round(lay_stake,2)",
"def on_place_bet (self,event):\n global placed_bet\n placed_bet = True\n arcade.draw_text(f\"Value: {self.dealer_value}\", 280, 450, arcade.color.BLACK, 16)\n arcade.draw_text(f\"Value: {self.player_value}\", 280, 250, arcade.color.BLACK, 16)\n \n self.final_bet = self.bet\n self.dealer_hand[1].face_up()\n self.player_hand[0].face_up()\n self.player_hand[1].face_up()",
"def bet(self):\n while True:\n try:\n self.round_bet = float(\n input(f'{self.name}, please enter an amount to bet for this round: '))\n if self.round_bet > self.bankroll:\n print('You have bet more than you have!')\n continue\n if self.round_bet <= 0:\n self.out_of_round = True\n else:\n self.bankroll -= self.round_bet\n break\n except TypeError:\n print('Please enter in a valid bet!')\n continue\n except ValueError:\n print('Please enter in a valid bet!')\n return self.name, self.round_bet",
"async def bet(message, user: ParamType.MIXER_USER, amount):\n\n username = user.username.lower()\n username_sender = message.username.lower()\n\n mixcord_user = await database.get_user(message.user_id)\n\n # handle if somebody is trying to accept or deny\n if amount == \"accept\" or amount == \"deny\":\n\n # get the pending bet\n bet = pending_bets.get(username)\n if bet is None or bet[\"username\"] != username_sender:\n return \"failed to find the bet you're responding to.\"\n\n # delete the pending bet, because we're handling it\n del pending_bets[username]\n\n # if the user wants to deny the bet, don't do anything\n if amount == \"deny\":\n return \"you have denied the pending bet from @{}.\".format(username)\n\n # if the user wants to accept the bet, continue\n if amount == \"accept\":\n\n # make sure they have enough money to accept\n if bet[\"amount\"] > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to accept this bet.\"\n\n # make sure the issuer of the challenge still has enough money\n competitor_mixcord_user = await database.get_user(user.id)\n if bet[\"amount\"] > competitor_mixcord_user[\"balance\"]:\n return \"@{} no longer has sufficient funding to run this bet.\".format(username)\n\n # determine winner/loser\n pick = random.randint(0, 1) == 1\n winner_id = user.id if pick else message.user_id\n loser_id = message.user_id if pick else user.id\n winner_username = username if pick else username_sender\n loser_username = message.username if pick else username\n\n # affect balances accordingly\n await database.add_balance(winner_id, bet[\"amount\"])\n await database.add_balance(loser_id, -bet[\"amount\"])\n\n # end the bet!\n await chat.send_message(\"@{} has won {} {}! better luck next time, @{}.\".format(winner_username, bet[\"amount\"], currency_name, loser_username))\n return None\n\n # make sure the amount is numeric by converting it to an int\n amount = utils.get_positive_int(amount)\n if amount is None: return \"amount must be a positive integer.\"\n\n # make sure they're not trying to start a bet against themself :/\n if message.username == username:\n return \"you're not able to start a bet against yourself.\"\n\n # make sure we don't already have a pending bet\n if pending_bets.get(message.username) is not None:\n return \"you already have a pending bet.\"\n\n # make sure the challenger has enough money to start the bet\n if amount > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to request this bet.\"\n\n # store challenge information\n pending_bets[message.username] = {\n \"username\": username,\n \"amount\": amount\n }\n\n # send messages indicating the challenge has been issued\n await chat.send_message(\"@{} has challenged @{} to a bet of {} {}!\".format(message.username, username, amount, currency_name))\n await asyncio.sleep(0.5)\n await chat.send_message(\"use {}bet @{} [accept/deny] to respond to your pending bet!\".format(chat.commands.prefix, message.username), username)\n\n # automatically timeout the bet in 30 seconds\n await asyncio.sleep(30)\n bet = pending_bets.get(message.username)\n if bet is not None:\n del pending_bets[message.username]\n await chat.send_message(\"@{} your pending bet has timed out.\".format(message.username))",
"def bet(self, amount):\n if amount >self.budget:\n print 'you cannot bet because of little money'\n else:\n self.bet_amount = amount\n print 'you bet %s' % (amount)",
"def qualify_bet(inputs):\n print 'Qualifying bet:'\n back_value = inputs['bet_value']\n back_odds = inputs['bet_odds']\n lay_odds = inputs['lay_odds']\n commission_per_cent = inputs['commission_per_cent']\n # With inputs in place calculate bet\n commission = 0.01 * commission_per_cent\n lay_stake = (back_value * back_odds) / (lay_odds - commission)\n # Calculate profit in both cases (back wins and back loses)\n profit_back_wins = back_value * (back_odds - 1) \\\n - lay_stake * (lay_odds - 1)\n profit_lay_wins = lay_stake * (1 - commission) - back_value\n # Print calculated outputs\n print 'Profit if *back* wins: GBP', round(profit_back_wins,2)\n print 'Profit if *lay* wins: GBP', round(profit_lay_wins,2) \n print 'Lay required: GBP', round(lay_stake,2)",
"def place_bet(fav: TeamDict, strategy: str, line: str, user: User, owned: float, stake: float = STAKE) -> None: #I HAVE TO CHANGE THIS FOR REPORT FUNCTION\n bet_stake = STAKE * owned\n bet_stake = \"%.2f\" %bet_stake\n bet_stake = bet_stake.replace(\".\", \",\")\n #selecting bet\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bss-StakeBox_StakeAndReturn \"))).click() #selects bet value box\n WebDriverWait(driver, 10).until(EC.text_to_be_present_in_element((By.CLASS_NAME, \"bss-StakeBox_StakeAndReturn \"), \"0,00\"))\n\n #sending bet value\n driver.find_element_by_class_name(\"bss-StakeBox_StakeValueInput \").send_keys(f'{bet_stake}') # enter bet_stake value\n try:\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bss-PlaceBetButton \"))).click() #places bet\n except TimeoutException:\n try:\n WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bs-AcceptButton \"))).click() #accepts changes in bet\n WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bss-PlaceBetButton \"))).click() #places bet\n except ElementClickInterceptedException:\n tries = 10\n while tries:\n try:\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bs-AcceptButton \"))).click() #accepts changes in bet\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bss-PlaceBetButton \"))).click()\n break\n except ElementClickInterceptedException:\n tries -= 1\n\n tries = 10\n ##SOMETIMES THERE'S THIS 'TERMINAR BUTTON'\n while tries:\n try:\n # WebDriverWait(driver, 5).until(EC.text_to_be_present_in_element((By.CLASS_NAME, \"bs-ReceiptContent_Done \"), 'Terminar')) #Does it work?\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"bs-ReceiptContent_Done \"))).click() #confirms\n break\n except ElementNotInteractableException:\n tries -= 1\n except TimeoutException:\n break\n \n user.write_pending_bet('bet', fav, strategy, line, datetime.datetime.now())",
"def raise_bet(value):\r\n\r\n global total_bet, dealer_bet, in_play, bottom_alert\r\n if value > player.get_cash() or not in_play:\r\n bottom_alert = \"You cannot bet $%i right now.\" % (value)\r\n elif in_play:\r\n player.spend_cash(value)\r\n dealer_bet += value\r\n total_bet += value * 2\r\n bottom_alert = \"\"",
"def place_bet_e24(fav: TeamDict, strategy: str, user: User, time: int, owned: float) -> None:\n part = {'e2': '1', 'e4': '2'}\n tab = None\n group_tabs = driver.find_elements_by_class_name(\"sip-MarketGroup \") #all markets\n try:\n name_tabs = driver.find_elements_by_class_name(\"sip-MarketGroupButton_Text \") #all text in markets\n for i, el_tab in enumerate(name_tabs):\n name = el_tab.text.lower()\n if part[strategy] == '2' and ('escanteio' in name) and ('opções' in name):\n tab = group_tabs[i] #gets desired market\n break \n elif ('escanteio' in name) and (part[strategy] in name) and ('º' in name or 'ª' in name) and ('asiático' not in name):\n tab = group_tabs[i] #gets desired market\n break\n\n assert len(tab.text.split('\\n')) > 1 #this happens when tab is closed \n\n except AssertionError:\n tab.click() #if it's closed, it opens\n if tab == None:\n print(f'Não há apostas de handicap para 1º tempo')\n return\n\n finally:\n rows = tab.find_elements_by_class_name(\"gl-Market \")[0] #first collumn is bet numbers\n j = None\n for i, row in enumerate(rows.text.split('\\n')): #iterates over head text\n row = row.lower()\n if str(fav['esc_tot']+1) in row: #gets what row is bet\n j = i \n break\n if j == None:\n print(f'Bet on {fav[\"esc_tot\"]+1} not avaible')\n return\n bet_collumn = tab.find_elements_by_class_name(\"gl-Market \")[1] #\"mais de\" in collumn 1\n bet_button = bet_collumn.find_elements_by_class_name(\"gl-ParticipantOddsOnly \")[j] #click on bet option\n bet_button.click()\n\n place_bet(fav, strategy, 'mais', user, owned)",
"def make_bet(self, bet):\n player = bet.created_by\n if not self.can_player_bet(player):\n raise ValidationError(\"Not able to make a bet.\")\n bet.save()",
"def __bet(self, numbers: str, user_seed: str) -> None:\n self.BetSource(self.tx.origin, self.tx.timestamp)\n if not self._game_on.get():\n Logger.debug(f'Game not active yet.', TAG)\n revert(f'Game not active yet.')\n amount = self.msg.value\n Logger.debug(f'Betting {amount} loop on {numbers}.', TAG)\n self.BetPlaced(amount, numbers)\n self._take_wager(self.address, amount)\n\n nums = set(numbers.split(','))\n n = len(nums)\n if n == 0:\n Logger.debug(f'Bet placed without numbers.', TAG)\n revert(f' Invalid bet. No numbers submitted. Zero win chance. Returning funds.')\n elif n > 20:\n Logger.debug(f'Bet placed with too many numbers. Max numbers = 20.', TAG)\n revert(f' Invalid bet. Too many numbers submitted. Returning funds.')\n\n numset = set(WHEEL_ORDER)\n numset.remove('0')\n for num in nums:\n if num not in numset:\n Logger.debug(f'Invalid number submitted.', TAG)\n revert(f' Please check your bet. Numbers must be between 0 and 20, submitted as a comma separated '\n f'string. Returning funds.')\n\n bet_type = self._bet_type.get()\n self._bet_type.set(BET_TYPES[0])\n if bet_type == BET_TYPES[2] or bet_type == BET_TYPES[3]:\n bet_limit = self._bet_limits[0]\n else:\n bet_limit = self._bet_limits[n]\n if amount < BET_MIN or amount > bet_limit:\n Logger.debug(f'Betting amount {amount} out of range.', TAG)\n revert(f'Betting amount {amount} out of range ({BET_MIN} -> {bet_limit} loop).')\n\n if n == 1:\n bet_type = BET_TYPES[4]\n if bet_type == BET_TYPES[1]:\n payout = int(MULTIPLIERS[BET_TYPES[5]] * 1000) * amount // (1000 * n)\n else:\n payout = MULTIPLIERS[bet_type] * amount\n if self.icx.get_balance(self.address) < payout:\n Logger.debug(f'Not enough in treasury to make the play.', TAG)\n revert('Not enough in treasury to make the play.')\n\n spin = self.get_random(user_seed)\n winningNumber = WHEEL_ORDER[int(spin * 21)]\n Logger.debug(f'winningNumber was {winningNumber}.', TAG)\n win = winningNumber in nums\n payout = payout * win\n self.BetResult(str(spin), winningNumber, payout)\n\n if win == 1:\n self._wager_payout(self.address, payout)\n else:\n Logger.debug(f'Player lost. ICX retained in treasury.', TAG)",
"def do_bet(self, arg):\n\t\topts = get_options(parser.parser_add, arg)\n\t\tif opts is None: return\n\t\tkwargs = {}\n\t\tkwargs['name'] = ' '.join(opts.broken_name)\n\t\tkwargs['weight'] = opts.weight\n\t\tif opts.outcome is not None:\n\t\t\tkwargs['outcome'] = convert_outcome(opts.outcome)\n\t\tself.manager.add_bet(**kwargs)\n\t\tprint(display.format_bet(self.manager.bets[-1]))",
"def make_bet(self, amount):\n self.update_fear(amount)\n self.bot.bet(amount)",
"def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()",
"def get_bet(self) -> int:\n return int(input(f\"How much money would you like to place? \"\n f\"(input an integer between {MINIMUM_BET()}-{self.balance}): \"))",
"def betting(game, episode, buttons):\n potential_wager = process_user_input(game, game.player1, game.player2, buttons)\n\n if potential_wager:\n game.player1.wager = potential_wager\n game.update_tablepot()\n\n if game.player1.folded:\n print(\"player1 folded\")\n return False\n\n game.player2.wager = process_bot_input(game, game.player2, game.player1, episode)\n game.update_tablepot()\n\n if game.player2.folded:\n print(\"player2 folded\")\n return False\n\n if game.player1.wager == game.player2.wager:\n print(\"moving on\")\n return True\n else:\n print(\"you're stuck in betting\")\n return betting\n else:\n return 'no input'",
"def bet(bet_tup, index, driver=None, verbose=False):\n team_name, mkt_type, amt = bet_tup\n button = find_bet_button(team_name, mkt_type, driver, verbose)\n button.send_keys(\"\\n\")\n time.sleep(1)\n set_wager(driver, index, amt)\n time.sleep(1)",
"def post_bet(self, bot_name, amount):\n canPost = self.parent.post_bet(bot_name, amount)\n if canPost:\n self.pot += amount\n return True\n else:\n return False",
"def blind_bet(self):\n self.this_player.bet(SMALL_BLIND_BET)\n self.other_player.bet(BIG_BLIND_BET)\n if SHOW_MESSAGE:\n print(\"Making blind bets.\")\n print(\"Player1:\")\n self.player1.show()\n print(\"Player2:\")\n self.player2.show()",
"def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)",
"def place_bets(self, market=None, market_bets=None):\n venue = market['event']['venue']\n name = market['marketName']\n if market_bets:\n for strategy_ref, strategy_bets in market_bets.items():\n live_strategy = betbot_db.strategy_repo.is_live(strategy_ref)\n retry_count = 0\n while len(strategy_bets) > 0: # Some orders may not execute first time around.\n # Set limit order prices as this may be an order re-submission.\n for strategy_bet in strategy_bets:\n runner_book = self.get_runner_book(market['marketId'], strategy_bet['selectionId'])\n size = strategy_bet['limitOrder']['size']\n side = strategy_bet['side']\n strategy_bet['limitOrder']['price'] = self.determine_price(side, size, runner_book)\n # Place bets via the Betfair API (or simulate it).\n if self.live_mode and live_strategy:\n resp = self.api.place_bets(market['marketId'], strategy_bets, strategy_ref)\n else:\n resp = self.simulate_place_bets(market, strategy_bets, strategy_ref)\n # Evaluate the API response.\n if type(resp) is dict and 'status' in resp:\n if resp['status'] == 'SUCCESS':\n # Check for execution and persist.\n success_refs = []\n for instruction in resp['instructionReports']:\n # If the order didn't execute, mark the instruction as settled immediately.\n if 'orderStatus' in instruction and instruction['orderStatus'] == 'EXECUTION_COMPLETE':\n instruction['settled'] = False\n success_refs.append(instruction['instruction']['customerOrderRef'])\n else: # Fill-or-Kill Limit Order EXPIRED so nothing to settle.\n instruction['settled'] = True\n # Add the strategy reference for display purposes.\n instruction['customerStrategyRef'] = strategy_ref\n betbot_db.instruction_repo.insert(market, instruction)\n # Remove any instructions that have executed, leaving any that EXPIRED.\n strategy_bets = [x for x in strategy_bets if x['customerOrderRef'] not in success_refs]\n self.logger.info('Successfully placed %s bet(s) on %s %s.' % (strategy_ref, venue, name))\n else:\n self.logger.error(\n 'Failed to place %s bet(s) on %s %s. (Error: %s)' %\n (strategy_ref, venue, name, resp['errorCode']))\n # Set the market as skipped, it's too late to try again.\n betbot_db.market_repo.set_skipped(market, resp['errorCode'])\n else:\n msg = 'Failed to place %s bet(s) on %s %s - resp = %s' % (strategy_ref, venue, name, resp)\n raise Exception(msg)\n retry_count += 1\n if retry_count == 5:\n self.logger.warn(\"Failed to place one or more %s bets 5 times, giving up.\" % strategy_ref)\n break\n # Throttle order re-submissions.\n sleep(1)",
"def all_in():\r\n\r\n raise_bet(player.get_cash())",
"def game_bid():\n game = current_user.get_active_game()\n bid = request.form.get('bid', '')\n\n # Validate bid\n if not isinstance(bid, str):\n # Invalid input for bid, but no need to alert user\n return redirect(url_for('game_home'))\n bid = bid.strip()\n if not BID_REGEX.match(bid):\n flash('Your bid must be an integer bid from zero (0) to thirteen (13).')\n return redirect(url_for('game_home'))\n\n if game is None:\n flash('If you want to join a game, click the Join button.')\n return redirect(url_for('home'))\n else:\n hand = game.get_latest_hand()\n # Attempt to place the bid\n try:\n hand.place_bid(current_user.user_id, int(bid), game)\n except UserCanNotBidError:\n flash('Bidding is not available at this time for you.')\n return redirect(url_for('game_home'))\n except BadGameStateError:\n flash('An error occurred while trying to pace your bid. Please try again.')\n return redirect(url_for('game_home'))\n else:\n flash(f'Your bid of {bid} has been placed.')\n return redirect(url_for('game_home'))",
"def beer():\r\n global cheated\r\n\r\n if enter_four == config.confus(config.config4):\r\n player.grab(helpful.Item('SixPack',10,0,0,6))\r\n cheated = True\r\n print '<achievement unlocked>\\n'\r\n\r\n if player.get_money() >= 17:\r\n\r\n player.set_health(100)\r\n player.lose_money(17)\r\n\r\n raw_input('You take out your money.\\n')\r\n raw_input(bartender_name + ' chuckles.\\n')\r\n raw_input('\"I guess we have this stuff, if you really need a drink.\"\\n')\r\n\r\n raw_input(\"The 'beer' healed you!\\n\")\r\n raw_input('It also cost $17.\\n')\r\n \r\n else:\r\n print bartender_name + ' chuckles and looks pointedly at his empty tip jar.\\n'\r\n raw_input('\"' +\"We're out of beer.\" + '\"\\n')\r\n raw_input('\"Nice try.\"\\n')",
"def get_bet(self):\n while newbet := input(f\"{self.name}: {self.chips} chips. Last bet: {self.lastbet}. Bet: \"):\n try:\n newbet = int(newbet)\n if newbet in range(0, self.chips+1):\n self.bet = newbet\n self.chips -= newbet\n return newbet\n else:\n print(\"You don't have that many chips.\")\n except ValueError:\n print(\"Bets are numbers please.\")",
"def sell():\n return apology(\"TODO\")\n if request.method == \"POST\":\n # Ensure symbol was submitted\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"must provide symbol\", 403)\n symbol = symbol.upper()\n\n # Ensure number of shares was submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide shares\", 403)\n\n return render_template(\"sell.html\")",
"async def process_bj_game(self, ctx, amount, user_id):\n if amount >= 0:\n if not await self.check_in_game(user_id, ctx):\n if amount > await ex.u_currency.get_balance(user_id):\n await ctx.send(f\"> **{ctx.author}, you can not bet more than your current balance.**\")\n else:\n return True\n else:\n await ctx.send(f\"> **{ctx.author}, you can not bet a negative number.**\")",
"def search_market_gather_players(self, name, max_price_to_pay, bids_allowed, bids_made, futbindata, min_bid, max_bid):\n if (int(max_bid) < 400):\n max_bid = 400\n # Ensure bid box is visible, then clear previous params\n self.sleep_approx(2)\n input = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[2]/div[6]/div[2]/input\")\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", input)\n WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable(\n (By.XPATH, \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[2]/div[6]/div[2]/input\"))).click()\n self.sleep_approx(1)\n input.send_keys(0)\n self.sleep_approx(1)\n\n clear = \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[2]/div[1]/button\"\n maxbidbox = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[2]/div[3]/div[2]/input\")\n minbidbox = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[2]/div[2]/div[2]/input\")\n\n # CLEAR RESULTS BOX\n self.driver.find_element(By.XPATH, clear).click()\n self.sleep_approx(1)\n\n # insert max_bid here\n maxbidbox.click()\n self.sleep_approx(1)\n maxbidbox.send_keys(max_bid)\n self.sleep_approx(1)\n\n # insert min_bid here\n minbidbox.click()\n self.sleep_approx(1)\n minbidbox.send_keys(min_bid)\n self.sleep_approx(1)\n\n # search the pages, and bid on players under bid price\n self.clickSearch()\n sleep(3)\n\n keepgoing = True\n while keepgoing:\n # Each page, get user config\n self.getUserConfig()\n status = self.checkState(\"transfermarket\")\n if status:\n max_price_to_pay = int(max_price_to_pay)\n self.sleep_approx(4)\n\n # TODO understand why some eligible players fail to receive bids...\n players_on_page = self.getAllPlayerInfo()\n for card in players_on_page:\n playernumber = card[0]\n bidStatus = card[1]\n curbid = card[5]\n timeremainingseconds = card[7]\n timeremainingmins = timeremainingseconds/60\n playerid = card[8]\n buynow = card[6]\n\n if bids_made < bids_allowed-1:\n if \"highest-bid\" not in bidStatus:\n stopbidTime = int(self.bidexpiration_ceiling)\n if timeremainingmins < stopbidTime:\n if timeremainingmins >= 2:\n # Check if bid to make falls under ceiling\n if (curbid < 1000):\n curbidprice_afterbidding = curbid+50\n else:\n curbidprice_afterbidding = curbid+100\n if curbidprice_afterbidding < max_price_to_pay:\n if ((curbid*2)<self.user_num_coins):\n self.makebid_individualplayer(\n playernumber, max_price_to_pay)\n self.sleep_approx(2)\n bids_made += 1\n log_event(self.queue, \"Bids made on \" + str(name) +\n \": \" + str(bids_made) + \"/\" + str(bids_allowed))\n else:\n log_event(self.queue, \"not enough coins\")\n else:\n keepgoing = False\n else:\n keepgoing = False\n\n self.sleep_approx(3)\n log_event(self.queue, \"Going to next page\")\n try:\n self.driver.find_element_by_xpath(\n '/html/body/main/section/section/div[2]/div/div/section[1]/div/div/button[2]')\n self.driver.find_element_by_xpath(\n '/html/body/main/section/section/div[2]/div/div/section[1]/div/div/button[2]').click()\n self.user_requests_made += 1\n except:\n log_event(self.queue, \"No next page found, returning\")\n keepgoing = False\n self.clickBack()\n self.sleep_approx(1)\n return bids_made",
"async def bet(message, amount):\n\n # make sure their discord account is linked\n mixcord_user = await database.get_user(message.user_id)\n if mixcord_user is None:\n return \"your mixer account must be linked to your discord via mixcord to use this command.\"\n\n # make sure they have sufficient balance\n if amount == \"all\":\n amount = mixcord_user[\"balance\"]\n if amount == 0:\n return \"amount must be a positive integer.\"\n else:\n amount = utils.get_positive_int(amount)\n if amount is None:\n return \"amount must be a positive integer.\"\n if mixcord_user[\"balance\"] < amount:\n return \"you have insufficient balance. ({}/{} {})\".format(mixcord_user[\"balance\"], amount, currency_name)\n\n won = random.randint(0, 1) == 1\n if won:\n await database.add_balance(message.user_id, amount)\n return \"you won :D you now have {} {}.\".format((mixcord_user[\"balance\"] + amount), currency_name)\n else:\n await database.add_balance(message.user_id, -amount)\n return \"you lost :( you now have {} {}.\".format((mixcord_user[\"balance\"] - amount), currency_name)"
]
| [
"0.70802295",
"0.6853024",
"0.6825054",
"0.6718689",
"0.671583",
"0.67043567",
"0.6674088",
"0.66712844",
"0.64814967",
"0.64650905",
"0.6464141",
"0.64590806",
"0.64511913",
"0.6350406",
"0.627941",
"0.6273458",
"0.62659526",
"0.61382014",
"0.6119612",
"0.6117631",
"0.61033297",
"0.6016445",
"0.5890668",
"0.5840869",
"0.57673067",
"0.57496655",
"0.57392174",
"0.5720008",
"0.56952786",
"0.5659537"
]
| 0.70504504 | 1 |
Deals cards to the user and house, returns None. Deals two cards to player and house each. House receives one open and one closed card. If cards have already been dealt this method just returns None. | def first_deal(self) -> None:
if len(self.house.hand.cards) == 0 and len(self.user.hand.cards) == 0: # Check if cards are already dealt.
print(self.deal_card(self.user))
print(self.deal_card(self.house))
print(self.deal_card(self.user))
print(self.deal_card(self.house, is_open=False))
print(f"The house has: {self.house.hand.cards} totalling to {self.house.hand.value}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.take_card(dealer_first)\n self.dealer.take_card(dealer_second)\n\n player_first = self.deck.draw()\n player_second = self.deck.draw()\n player_first.flip()\n player_second.flip()\n self.player.take_card(player_first)\n self.player.take_card(player_second)\n\n if self.verbose:\n print('Player bets:', self.player_bet)\n for player in (self.player, self.dealer):\n print(player, 'dealt:')\n for card in player:\n if card.face():\n print(' '*3, str(card)+':', 'face up')\n else:\n print(' '*3, str(card)+':', 'face down')",
"def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")",
"def open_cards(self) -> None:\r\n self.dealer.deal_cards_to(self.card_stack, PokerRules.CARDS_PER_ROUND[self.round_num])",
"def deal():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n outcome = outcome_plus = outcome_plus_plus = \"\"\n action = HIT_OR_STAND\n \n # If the \"Deal\" button is clicked during the middle of \n # a round the program reports that the \"Player\" lost \n # the round and updates the \"score\" appropriately.\n if in_play:\n outcome = PLAYER_LOSES \n outcome_plus = EARLY_DEAL_1\n outcome_plus_plus = EARLY_DEAL_2\n score -= SCORE_POINTS\n else:\n in_play = True\n \n # Create and shuffle the \"Deck\" (stored as a global \n # variable). Avoids the situation where the \"Deck\" \n # becomes empty during play.\n global deck_of_cards\n deck_of_cards = Deck()\n deck_of_cards.shuffle()\n \n # Create new \"Player\" and \"Dealer\" Hands (stored as \n # global variables). \n global player, dealer\n player = Hand()\n dealer = Hand()\n \n # Add two \"Cards\" to each \"Hand\". To transfer a \"Card\" \n # from the \"Deck\" to a \"Hand\", the \"deal_card()\" \n # method of the \"Deck\" class and the \"add_card()\" \n # method of \"Hand\" class are being used in \n # combination. \n player.add_card(deck_of_cards.deal_card())\n dealer.add_card(deck_of_cards.deal_card())\n player.add_card(deck_of_cards.deal_card())\n dealer.add_card(deck_of_cards.deal_card())\n \n # Print resulting \"Hands\" to the console with an \n # appropriate message indicating which \"Hand\" is which.\n # Remove comments if in DEBUG mode.\n #print \"Player: \" + str(player)\n #print \"Dealer: \" + str(dealer) \n \n return None",
"def deal_opening_cards(self) -> None:\r\n for i in range(self.num_of_players):\r\n self.dealer.deal_cards_to(self.players[i].cards_stack, PokerRules.CARDS_PER_PLAYER)",
"def deal_cards(self):\r\n\t\tself.player.double = False\r\n\t\tif self.cardstack.reshuffle:\r\n\t\t\tself.cardstack.shuffle(self.decks)\r\n\t\t\tself.cardstack.reshuffle = False\r\n\t\tself.hands.append(Hand())\r\n\t\tfor i in range(2):\r\n\t\t\tself.hands[0].add_card(self.cardstack.draw())\r\n\t\t\tself.dealer.add_card(self.cardstack.draw())",
"def deal_cards(self):\n card.Card.create_deck()\n self.dealer.deal()\n for player in self.players:\n player.deal()",
"def house_deal(self) -> None:\n if not self.has_game_ending_hand:\n while max(self.house.hand.value) < 17:\n print(f\"{self.deal_card(self.house)}\")",
"def deal_demo():\n deck = get_deck()\n print(hand_to_string(deck))\n print(hand_to_string(get_hand(deck)))\n print(hand_to_string(get_hand(deck)))",
"def deal(self, hands, card_per_hand=1):\n for rounds in range(card_per_hand):\n for hand in hands:\n if self.cards:\n top_card = self.cards[0]\n self.give(top_card, hand)\n else:\n print(\"Can't continue deal. Out of cards!\")",
"def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()",
"def deal(self):\n dealt_card = self.deck_of_cards.pop()\n print(\"You have been dealt the {} \".format(dealt_card.value) \\\n + \"of {}.\".format(dealt_card.suit) + \"\\n\")",
"def deal_cards(self, agent, param):\n return agent.deal(param, big_blind, small_blind, self.bet_hist, self.pot)",
"def deal_card(self):\n return self._deal(1)[0]",
"def endgame(self):\n #reveals the dealer's first card then the dealer hits until the dealer's hand's value is above 16\n self.dealer_hand[0].face_up()\n if self.dealer_hand[0].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[0].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[0].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n #House always wins Ties\n elif self.dealer_value == 21:\n self.player_lose()\n\n while self.dealer_value < 17:\n self.hit(\"dealer\")\n\n if (self.player_value - self.dealer_value) > 0:\n self.player_win()\n else:\n self.player_lose()",
"def deal_board(deck):\n board = []\n deal_flop(deck, board)\n deal_turn(deck, board)\n deal_river(deck, board)\n return deck, board",
"def deal_cards(self):\n for i in range(0, self.num_players):\n # start at first player index\n player = self.players[self.index_wrap(i)]\n for c in range(Evolution.BASE_CARD_DRAW + player.num_species):\n self.check_for_empty_deck()\n # actually pops last element so pretend deck is flipped\n player.hand.add_card(self.deck.pop())\n # Don't actually set cards left until all cards have been handed out\n if self.continue_game:\n self.cards_left.set(len(self.deck))\n else:\n self.cards_left.set(0)",
"def deal(self):\n\t\tplayerList = self.getPlayers()\n\t\tstart = self.curDealerSeatNo + 1\n\t\tfor i in range(len(playerList)*2):\n\t\t\tplayerList[(start + i) % len(playerList)].hand.append(self.deck.pop())\n\t\t\tplayerList[(start + i) % len(playerList)].isHandLive = True",
"def stand():\n \n # Update message, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Player\" has busted, remind the \"Player\" that \n # they have busted.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL\n elif in_play:\n # If the \"Hand\" is in play, repeatedly hit \"Dealer\" \n # until his \"Hand\" has value 17 or more. \n while dealer.get_value() < 17:\n dealer.add_card(deck_of_cards.deal_card())\n\n # If busted, update messages, score and the \n # player's \"Hand\" status. \n if dealer.get_value() > 21:\n outcome = PLAYER_WINS\n outcome_plus = DEALER_BUSTED\n outcome_plus_plus = \"\"\n action = NEW_DEAL \n score += SCORE_POINTS \n in_play = False\n # Else compare the value of the \n # player's and dealer's \"Hands\". If the value of \n # the player's \"Hand\" is less than or equal to \n # the dealer's \"Hand\", the \"dealer\" wins. \n # Otherwise the \"player\" has won. Again,\n # update messages, score and the player's \"Hand\" \n # status. \n else: \n in_play = False\n action = NEW_DEAL\n outcome_plus = outcome_plus_plus = \"\"\n if player.get_value() > dealer.get_value():\n outcome = PLAYER_WINS \n score += SCORE_POINTS \n else:\n outcome = PLAYER_LOSES \n score -= SCORE_POINTS\n \n return None",
"def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")",
"async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")",
"def deal_cards(deck, card): \n player = deck[card]\n return player",
"def deal_card(self):\n if len(self.cards) > 0:\n return self.cards.pop()\n else:\n return None",
"def deal_two_cards_to_each(deck):\n hands = [deck[0:2] + [sorted([deck[0][0], deck[1][0]], reverse=True)] + [deck[0][1] == deck[1][1]],\n deck[2:4] + [sorted([deck[2][0], deck[3][0]], reverse=True)] + [deck[2][1] == deck[3][1]],\n deck[4:6] + [sorted([deck[4][0], deck[5][0]], reverse=True)] + [deck[4][1] == deck[5][1]],\n deck[6:8] + [sorted([deck[6][0], deck[7][0]], reverse=True)] + [deck[6][1] == deck[7][1]],\n deck[8:10] + [sorted([deck[8][0], deck[9][0]], reverse=True)] + [deck[8][1] == deck[9][1]],\n deck[10:12] + [sorted([deck[10][0], deck[11][0]], reverse=True)] + [deck[10][1] == deck[11][1]]]\n deck = deck[12:]\n return hands, deck",
"def deal(this_deck):\n dealt_card = this_deck.popleft()\n\n return dealt_card",
"def checkDoubles(self,card): # need to check defenders handcount...\n multipleCards = [card]\n for i in range(4): # checking all other possible cards of same rank\n card_plus = card + 13 * i # checking higher values\n card_minus = card - 13 * i # checking lower values\n if card_plus in self.currentHand and card_plus < 51 and card_plus != card and card_plus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt= input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_plus)\n self.currentHand.remove(card_plus)\n else:\n print(\"Did not add\")\n if card_minus in self.currentHand and card_minus > 0 and card_plus != card and card_minus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_minus)\n self.currentHand.remove(card_minus)\n else:\n print(\"Did not add\")\n return multipleCards",
"def deal(self):\n hands = sample(self.deck, 13) #random sample so no need to shuffle\n hand1, hand2, flip = hands[:6], hands[6:-1], hands[-1]\n return hand1, hand2, flip",
"def accordion_game_loop():\n\n while True:\n \n # Shows player the cards on the table\n deck.cards_on_table() \n \n # Prompt player to choose from available cards on table or quit\n player_choice = input(\n \"Pick a card index number or deal a card = d or quit game = q: \")\n print('')\n \n try:\n # How to exit the game loop\n if player_choice == 'q':\n break\n # How to deal a new card, plus win and lose conditions \n if player_choice == 'd':\n if len(deck.dealer_deck) >= 1:\n deck.deal_cards(1)\n print('Undealt cards: ', len(deck.dealer_deck), '\\n')\n continue\n if len(deck.dealer_deck) == 0 and len(deck.table_deck) == 1:\n print('\\n','\\t','Congratulations, you won!')\n break\n else:\n break\n \n # How to choose a particular card and move it 3 or 1 places\n if 1 <= int(player_choice) <= 53:\n player_choice1 = int(player_choice)\n player_choice2 = input(\n \"please choose d = deal, 3 = move 3 places or 1 = move one place: \")\n \n # Repeating the dealing of a new card plus win and loose\n # conditions\n if player_choice2 == 'd':\n if len(deck.dealer_deck) >= 1:\n deck.deal_cards(1)\n print('Undealt cards: ', len(deck.dealer_deck), '\\n')\n continue\n if len(deck.dealer_deck) == 0 and len(deck.table_deck) == 1:\n print('\\n','\\t','Congratulations, you won!')\n break\n else:\n break\n \n # How to move card 3 places and check that it is possible\n elif (player_choice2 == '3' and \n svc.value_comparison(\n deck.table_deck[player_choice1 - 1],\n deck.table_deck[player_choice1 - 4])):\n if len(deck.table_deck) >= 4:\n deck.move_and_replace(player_choice1, 3)\n elif len(deck.dealer_deck) == 0 and len(deck.table_deck) == 1:\n print('\\n','\\t','Congratulations, you won!')\n else:\n print('\\n','\\t','*** Please choose again, move not allowed ***','\\n')\n continue\n\n # How to move card 1 places and check that it is possible\n elif (player_choice2 == '1' and \n svc.value_comparison(\n deck.table_deck[player_choice1 - 1],\n deck.table_deck[player_choice1 - 2])):\n if len(deck.table_deck) > 1:\n # Choosing to add to next\n deck.move_and_replace(player_choice1, 1)\n elif len(deck.dealer_deck) == 0 and len(deck.table_deck) == 1:\n print('\\n','\\t','Congratulations, you won!')\n else:\n print('\\n','\\t','*** Please choose again, move not allowed ***','\\n')\n else:\n print('\\n','\\t','*** Please choose again, move not allowed ***','\\n')\n \n # Indicate that the player has chosen unknown command\n except:\n print(3 * '\\n','\\t','!!! Unknown command, please choose again !!!','\\n')\n continue\n \n print('Thanks for playing!')",
"def showdown(self):\n print \"%s: %s\" %(self.name, repr(self.cards)) # open dealer's cards\n for player in self.game.players:\n win = self.balance(player)\n if win > 0: \n print player.name, 'wins', win\n elif win == 0: \n print player.name, 'draws'\n elif win <0:\n print player.name, 'loses', -(win) \n self.budget -= win\n player.budget += win\n print 'budget of %s : %s'%(player.name,player.budget)\n print 'budget of %s : %s'%(self.name,self.budget)",
"async def round(self):\n def turn_check(m):\n return ((m.content.lower() == 'stand') or (m.content.lower() == 'hit')) and m.guild == self.ctx.guild\n # Players\n for i, player in enumerate(self.players):\n if not player.out:\n HoS = ''\n while HoS != \"stand\":\n embed_players = discord.Embed(\n title='Players', color=0x0000fd)\n try:\n await self.ctx.send(f\"{self.users[i].name}, Would you like to hit or stand? \")\n HoS = await self.client.wait_for('message', timeout=20.0, check=turn_check)\n HoS = HoS.content.lower()\n\n if HoS == \"stand\":\n break\n\n elif HoS == \"hit\":\n # give the player a new card\n self.deck.move_cards(player, 1)\n # reload the embed with player hands\n for j, player2 in enumerate(self.players):\n if not player2.out:\n embed_players.add_field(\n name=f\"{self.users[j].name}\", value=player2, inline=True)\n await self.players_msg.edit(embed=embed_players)\n\n if player.get_value() > 21:\n await self.ctx.send(f\"{self.users[i].name} is bust\")\n break\n elif player.get_value() == 21:\n await self.ctx.send(f\"{self.users[i].name} has BlackJack!\")\n player.has_bj = True\n break\n\n except Exception as e:\n print(e)\n continue\n\n # Dealer\n while self.dealer.get_value() < 17:\n self.deck.move_cards(self.dealer, 1)\n\n embed_dealer = discord.Embed(title='Dealer', color=0x00ff00)\n embed_dealer.add_field(name=\"Hand\", value=self.dealer, inline=False)\n await self.dealer_msg.edit(embed=embed_dealer)\n\n # Checks\n # if dealer is bust and not all players are out\n if self.dealer.get_value() > 21 and self.total_players_out < len(self.players):\n for player in self.players:\n if player.get_value() <= 21 and not player.out: # if player is not bust and is not out\n player.credit(2 * player.bet)\n await self.ctx.send(\"Since Dealer is bust, all players win\")\n\n elif self.dealer.get_value() == 21 and self.total_players_out < len(self.players): # Dealer has blackjack\n await self.ctx.send(\"Dealer has BlackJack!\")\n for player in self.players:\n if player.has_bj and not player.out:\n player.credit(2 * player.bet)\n else:\n # Used to check if any of the if statements are activated.\n if_flag = False\n for i, player in enumerate(self.players):\n # if player has blacjack or beat the dealer and not out\n if player.has_bj or (player.get_value() < 21 and player.get_value() > self.dealer.get_value()) and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, Conrats on winning!\")\n player.credit(2 * player.bet)\n # if player not bust and tied with dealer\n elif player.get_value() < 21 and player.get_value() == self.dealer.get_value() and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, tied with the dealer!\")\n player.credit(player.bet)\n if not if_flag and self.total_players_out < len(self.players):\n await self.ctx.send(\"House wins\")\n\n # end of round cleanup\n for i, player in enumerate(self.players):\n if not player.out:\n player.has_bj = False\n if player.coins < 1:\n await self.ctx.send(f\"{self.users[i].name}, Min bet is €1, get your cheap ass out of here\")\n player.out = True\n self.total_players_out += 1\n elif player.coins > 10000:\n await self.ctx.send(f\"{self.users[i].name}! You\\'re too good, we have to stop you\")\n player.out = True\n self.total_players_out += 1"
]
| [
"0.6993562",
"0.6883988",
"0.6612903",
"0.6499829",
"0.6383287",
"0.6368293",
"0.62500584",
"0.6218783",
"0.61778855",
"0.61760587",
"0.6155784",
"0.6136452",
"0.6116217",
"0.60801005",
"0.6064902",
"0.60176337",
"0.6009256",
"0.59821904",
"0.5931164",
"0.59221417",
"0.59100354",
"0.59050137",
"0.5901433",
"0.58661014",
"0.583611",
"0.58325475",
"0.5830286",
"0.58061284",
"0.5790324",
"0.57843655"
]
| 0.68916774 | 1 |
Deals cards to house, returns None | def house_deal(self) -> None:
if not self.has_game_ending_hand:
while max(self.house.hand.value) < 17:
print(f"{self.deal_card(self.house)}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_deal(self) -> None:\n if len(self.house.hand.cards) == 0 and len(self.user.hand.cards) == 0: # Check if cards are already dealt.\n print(self.deal_card(self.user))\n print(self.deal_card(self.house))\n print(self.deal_card(self.user))\n print(self.deal_card(self.house, is_open=False))\n print(f\"The house has: {self.house.hand.cards} totalling to {self.house.hand.value}\")",
"def deal_card(self):\n return self._deal(1)[0]",
"def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")",
"def open_cards(self) -> None:\r\n self.dealer.deal_cards_to(self.card_stack, PokerRules.CARDS_PER_ROUND[self.round_num])",
"def action_hit(self) -> None:\n print(self.deal_card(self.user))",
"def deal_cards(self, agent, param):\n return agent.deal(param, big_blind, small_blind, self.bet_hist, self.pot)",
"def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))",
"def deal_card(self):\n if len(self.cards) > 0:\n return self.cards.pop()\n else:\n return None",
"def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card",
"def deal(self):\n if len(self) == 0:\n return None\n else:\n return self._cards.pop(0)",
"def deal(self):\n if len(self) == 0:\n return None\n else:\n return self._cards.pop(0)",
"def test_for_dealing_card():\n deck1 = Shoe()\n deck1.deal_card()\n assert len(deck1.deck) == 51",
"def deal_demo():\n deck = get_deck()\n print(hand_to_string(deck))\n print(hand_to_string(get_hand(deck)))\n print(hand_to_string(get_hand(deck)))",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card",
"def mock_card():\n return Card(Suit.SPADE, 1)",
"def show_card(self):\n return self.hands.show(0)",
"def deal_cards(deck, card): \n player = deck[card]\n return player",
"def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard",
"def sum_hand(self, cards):\n self.totalValue = 0\n for card in cards:\n self.totalValue += DeckOfCards.value(self, card)\n\n for card in cards:\n if self.totalValue > 21 and 'A' in card:\n self.totalValue -= 10\n \n if self.totalValue > 21:\n self.keepGoing = False\n print(f\"{self.name} busted!\")",
"def test_cards_get(self):\n pass",
"def deal( self ):\n \n return self.__deck.pop() if len(self.__deck) else None # Use ternary expression to guard against empty deck.",
"def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()",
"async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")",
"def action_house_reveal(self) -> None:\n self.house.hand.reveal_hand()\n print(f\"\\nThe house reveals their hand containing: {self.house.hand}, totalling to {self.house.hand.value}\")",
"def deal_cards(self):\r\n\t\tself.player.double = False\r\n\t\tif self.cardstack.reshuffle:\r\n\t\t\tself.cardstack.shuffle(self.decks)\r\n\t\t\tself.cardstack.reshuffle = False\r\n\t\tself.hands.append(Hand())\r\n\t\tfor i in range(2):\r\n\t\t\tself.hands[0].add_card(self.cardstack.draw())\r\n\t\t\tself.dealer.add_card(self.cardstack.draw())",
"def deal(this_deck):\n dealt_card = this_deck.popleft()\n\n return dealt_card",
"def deal(self):\n if len(self) == 0:\n return None\n else:\n #removes top card and deals it\n return self._cards.pop()",
"def reveal_card(self):\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()"
]
| [
"0.7425127",
"0.6940771",
"0.6936745",
"0.6591779",
"0.6573968",
"0.65444267",
"0.65152925",
"0.6493008",
"0.6459767",
"0.6452606",
"0.6400211",
"0.63715595",
"0.63715595",
"0.6359715",
"0.63436705",
"0.63021743",
"0.6300474",
"0.6289987",
"0.62397206",
"0.6235566",
"0.6222352",
"0.61983615",
"0.61896706",
"0.6189028",
"0.61508864",
"0.61463606",
"0.6129571",
"0.610324",
"0.6100197",
"0.60899585"
]
| 0.7155595 | 1 |
Checks for the different kinds of hands the player and house has, returns bool. | def get_game_ending_hands(self) -> bool:
end = False
if 10 in self.house.hand.value: # Check if house's first card is a 10
if self.action_peek_cards() == 1: # Peek the card to check for and ace. CardValue.ACE has a value of 1
self.event_house_blackjack()
end = True
elif 11 in self.house.hand.value: # Check if house's first card is an ace
if self.action_peek_cards() in (10, 11, 12, 13): # TEN, JACK, QUEEN, KING in respective order
self.event_house_blackjack()
end = True
elif min(self.house.hand.value) > 21: # Check if house has gone bust
self.event_house_bust()
end = True
elif max(self.user.hand.value) == 21: # Check for player blackjack
self.event_player_blackjack()
end = True
elif min(self.user.hand.value) > 21: # Check if player has gone bust
self.event_player_bust()
end = True
self.has_game_ending_hand = end
return end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_fullhouse(self):\n if self.has_pair() & self.has_three_of_a_kind():\n self.rank_per_hand['5'] = \"full house\"\n return True\n return False",
"def is_full_house(hand):\n\tis_a_full_house = False\n\tnum_three_kind = 0\n\tnum_pair = 0\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tnum_three_kind += 1\n\t\telif hand[i] == 2:\n\t\t\tnum_pair += 1\n\t\ti += 1\n\tif num_three_kind ==1 and num_pair == 1:\n\t\tis_a_full_house = True\n\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_full_house == True:\n\t\tif (hand[j] == 2 or hand[j] == 3) and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_full_house:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def is_game_win(self):\n return not self.deck and not self.hand",
"def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def is_card_in_other_hands(self, own_hand_index, card):\n for i, hand in enumerate(self.hands):\n if i == own_hand_index:\n continue\n if card in hand:\n return True\n return False",
"def is_four_of_a_kind(hand):\n\tis_a_four_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 4:\n\t\t\tis_a_four_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_four_of_a_kind == True:\n\t\tif hand[j] == 4 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_four_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None",
"def does_player_have_card(self, player, card):\n return card in self.hands[player]",
"def is_four_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 4:\n return (True, cards[c])\n return None",
"def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False",
"def is_full_house(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] != 0 and count[c] != 2 and count[c] != 3:\n return None\n triple = 0\n for k in count:\n if count[k] == 3:\n triple = cards[k]\n return (True, triple)",
"def has_four_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 4:\n self.rank_per_hand['6'] = \"four of a kind\"\n return True\n return False",
"def has_cards(self):\n return self.hand.len() > 0",
"def isOpen(self):\n what = self.checkapf(\"WHATSOPN\").read()\n if \"DomeShutter\" in what or \"MirrorCover\" in what or \"Vents\" in what:\n return True, what\n else:\n return False, ''",
"async def should_handle(self, iteration):\n self.queens = self.ai.queens\n self.hatchery = self.ai.townhalls\n self.enemies = self.ai.known_enemy_units.not_structure\n\n if not self.queens:\n return False\n\n if not self.hatchery:\n return False\n\n return True",
"def testHand():\n\n failure = False\n print(\"\\n ---------- Test Hand ---------\")\n\n h = Hand(8, {'a':3, 'b':2, 'd':3})\n\n print(\"Mano actual:\", h)\n h.update('bad')\n print(\"Palabra dada: bad\")\n print(\"Mano actual:\", h)\n\n if h.containsLetters('aabdd') and not h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar la letras 'aabdd' y además no estar vacío\")\n\n h.update('dad')\n print(\"Palabra dada: dad\")\n print(\"Mano actual:\", h)\n if h.containsLetters('ab') and not h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar la letras 'aabdd' y además no estar vacío\")\n\n h.update('ab')\n print(\"Palabra dada: ab\")\n print(\"Mano actual:\", h)\n\n if h.isEmpty():\n failure = True\n else:\n failure = False\n print(\"FAILURE: Debería estar vacío\")\n\n print(\"Comparación de jugadas: \")\n print(\"h = Hand(8, {'a':3, 'b':2, 'd':3})\")\n h = Hand(8, {'a':3, 'b':2, 'd':3})\n print(\"g = Hand(8, {'a':3, 'b':2, 'd':3})\")\n g = Hand(8, {'a':3, 'b':2, 'd':3})\n print(\"j = Hand(8, {'a':3, 'b':2, 'd':3})\")\n j = Hand(7, {'a':2, 't':2, 'p':3})\n print(\"¿h = g?\", h == g)\n print(\"¿h,g = j?\", h == j or g == j)\n\n if failure:\n print(\"SUCCESS: testHand()\")\n else:\n print(\"FAILURE: testHand()\")",
"def compare_hands(self):\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.25)\r\n\r\n # If the round ends in a tie, the try_again will be set to true so that the program knows\r\n # to restart the round without incrementing the round number or changing the win/lose record\r\n if (self.player_rock is True and self.opp_rock is True) or (\r\n self.player_paper is True and self.opp_paper is True) or (\r\n self.player_scissors is True and self.opp_scissors is True):\r\n\r\n self.try_again = True\r\n\r\n self.player_tie()\r\n\r\n else:\r\n\r\n # If there is no draw, then the code proceeds to determine the winner and the loser.\r\n self.try_again = False\r\n\r\n if self.player_rock is True and self.opp_scissors is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_rock is True and self.opp_paper is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_paper is True and self.opp_rock is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_paper is True and self.opp_scissors is True:\r\n\r\n self.player_lose()\r\n\r\n elif self.player_scissors is True and self.opp_paper is True:\r\n\r\n self.player_win()\r\n\r\n elif self.player_scissors is True and self. opp_rock is True:\r\n\r\n self.player_lose()\r\n\r\n # Clear the summary entry box\r\n self.summary_entry.delete(0, \"end\")\r\n\r\n # Insert a new value which lets the player know if they won that round\r\n self.summary_entry.insert(0, self.summary)",
"def test_privatize_hands(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n gs = g\n\n p0, p1 = gs.players\n\n latrine, insula, jack, road = cm.get_cards(['Latrine', 'Insula', 'Jack', 'Road'])\n p0.hand.set_content([latrine, insula])\n p1.hand.set_content([jack, road])\n\n gs_private = g.privatized_game_state_copy('p0')\n p0, p1 = gs_private.players\n\n self.assertIn(jack, p1.hand)\n self.assertIn(Card(-1), p1.hand)\n self.assertNotIn(road, p1.hand)\n\n self.assertIn(latrine, p0.hand)\n self.assertIn(insula, p0.hand)\n\n self.assertEqual(len(p0.hand), 2)\n self.assertEqual(len(p1.hand), 2)",
"def determineWinner(self) -> bool:\n\n # Saving the board's rows, columns and diagonals in variables\n rows: List[List[str]] = self.board.getRows()\n columns: List[List[str]] = self.board.getColumns()\n diagonals: List[List[str]] = self.board.getDiagonals()\n\n # saving the board's rows, columns and diagonals in one list\n lines: List[List[str]] = [row for row in rows]\n for column in columns:\n lines.append(column)\n for diagonal in diagonals:\n lines.append(diagonal)\n\n # checking if either the AI or the human has three in a row, column or diagonal\n for symbol in [self.getPlayerSymbol(), self.getAiSymbol()]:\n for line in lines:\n if line.count(symbol) == 3:\n # human player wins\n if symbol == self.getPlayerSymbol():\n winner: Player = self.player\n\n # AI wins\n else:\n winner: Ai = self.ai\n print(f\"{winner.getName()} wins!\")\n return True\n return False",
"def is_royal_flush(hand):\n\n # same suit\n suite = hand[0][1]\n count = {c:0 for c in cards.keys()}\n for c in hand:\n if suite != c[1]:\n return False\n count[c[0]] += 1\n # all in same suit\n for c in 'T J Q K A'.split():\n if count[c] != 1:\n return False\n return True",
"def check_win(self, player):\n def check_row_win(player):\n for row in self.game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_column_win(player):\n # For doing a column check, transpose the grid and do a row check\n trans_game_state = numpy.transpose(self.game_state)\n for row in trans_game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_diag_win(player):\n # Left to right diagonal\n if player == self.game_state[0][0] == self.game_state[1][1] == self.game_state[2][2]:\n return True\n # Right to left diagonal\n if player == self.game_state[0][2] == self.game_state[1][1] == self.game_state[2][0]:\n return True\n return False\n\n if check_column_win(player) or check_diag_win(player) or check_row_win(player):\n return True\n return False",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True",
"def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)",
"def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False",
"def check_win_condition(board) -> bool:\n if _check_vertical_win_condition(board) or _check_horizontal_win_condition(board) or _check_diagonal_win_condition(\n board):\n return True\n else:\n board.alternate_current_player()\n return False",
"def is_high_card(hand):\n\tis_a_high_card = True\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] > 1:\n\t\t\tis_high_card = False\n\t\ti += 1\n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_high_card == True:\n\t\tif hand[j] == 1 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_high_card:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def has_3_spades(self):\n if Card('3', 'spades') in self.hand:\n return True\n return False",
"def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False",
"def find_hands(self):\n # hands = []\n if self.cards:\n all_cards = (self.cards+self.cards_on_table)\n\n self.pairs_threes_fours(self.hands_list, all_cards)\n self.find_flush(self.hands_list, all_cards)\n self.find_straight(self.hands_list, all_cards)\n\n hand_name_list = list(map(lambda h: h.hand_name, self.hands_list))\n\n hands_count = Counter(hand_name_list)\n for key, value in hands_count.items():\n if value == 2 and key == 'Pair':\n self.hands_list.append(HandDescription('Two pairs', None, None))\n\n if \"Pair\" in hand_name_list and \"Three of a kind\" in hand_name_list:\n self.hands_list.append(HandDescription('Full house', None, None))\n\n if \"Flush\" in hand_name_list and 'Straight' in hand_name_list:\n self.hands_list.append(HandDescription('Pokier', None, None))\n\n self.sort_my_hands()"
]
| [
"0.6710579",
"0.664217",
"0.6553142",
"0.64558953",
"0.6409192",
"0.6303049",
"0.630163",
"0.61734915",
"0.614371",
"0.6137094",
"0.6111759",
"0.60144633",
"0.59642214",
"0.59610355",
"0.5945365",
"0.59393865",
"0.59295696",
"0.59169227",
"0.5908294",
"0.59029585",
"0.59004796",
"0.5875606",
"0.58725667",
"0.5858142",
"0.5836609",
"0.58261406",
"0.5824969",
"0.5799817",
"0.5785375",
"0.578365"
]
| 0.6740741 | 0 |
Asks user if the game should be ended or not, returns None. | def round_end(self) -> None:
input_ = self.validate_input("\nDo you want to play another round?[y/n]", ("y", "n"))
if input_ == "n":
self.has_ended = True
else:
self.user.bet = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1",
"def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame",
"def end_of_game(self):\n try:\n play_again = input(\"Would you like to play again?[y]es/[n]o: \").lower()\n except ValueError:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()\n if play_again == \"y\":\n # Phrase(self.player_guess, new_game=True, run_extend=True)\n # Character(self.player_guess, self.selected_phrase, life_check=True, new_game=True)\n Game()\n elif play_again == \"n\":\n print(\"\\n\"\"Thank you for playing, see y'all next time.\"\"\\n\")\n sys.exit()\n else:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()",
"def check_game_end(self):\r\n\r\n if np.all(self.remaining == -1): # end of game\r\n self.show_results() # show the final results\r\n sys.exit() # exit the program\r",
"def endGame(self):\n pass",
"def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie",
"def endgame(winner):",
"def is_endgame_state(self) :\n raise NotImplementedError",
"def end_game(self, game_state: str):\n if game_state == \"win\":\n end_message = \"{0} wins! Would you like to play again?\".format(self.players[self.game.whose_turn])\n else:\n end_message = \"Cat's game! Would you like to play again?\"\n play_again = messagebox.askyesno(title='Game over', message=end_message)\n if play_again:\n self.game.reset_game_data()\n self.reset_game_ui()\n else:\n self.window.destroy()",
"def quit_game(self):\n self.done = True",
"def handleEnd(winner):\n if winner != 0:\n if winner == 1: print(\"human win\")\n if winner == 2: print(\"cpu win\")\n if winner == 3: print(\"draw game\")\n return True\n return False",
"def isDone(self, game):\n from pygame.locals import K_ESCAPE, QUIT \n if game.keystate[K_ESCAPE] or pygame.event.peek(QUIT):\n return True, False \n else:\n return False, None",
"def ask_stop_game(self):\n return self.stop_game",
"def determineEndGame(self):\n\n print(\"noWinners: \" + str(self.noWinners) + \", noTotKids: \" + str(self.noTotKids))\n\n # TODO scegliere come determinare la fine del gioco\n # if self.noWinners == self.noTotKids - 1: # end-game test\n if self.noWinners == self.noTotKids:\n print(\"ho determinato la fine del gioco\")\n return True\n else:\n print(\"colore toccato ma la partita non e' finita\")\n return False",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables): #\n return True, -1\n\n return False, -1",
"def set_end_game(self):\n # For now, we just need to set a flag indicating we should end\n # the game. When we check whether we should load another story\n # or repeat a repeating script, this flag will be used to skip\n # back to the main session script, to the end of the game.\n self._end_game = True",
"def end_game(self):\n controller = self.controller\n self.end_game_running = True\n\n while self.end_game_running:\n controller.keyboard_end_game_control(self)\n controller.display_end_game()\n\n self.reset_game()\n self.run()",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def end_game(self):\n pygame.event.clear()\n self.screen.fill(BLACK)\n self.show_on_screen(\"GAME OVER\", (500, 600), font_size=50)\n self.show_on_screen(\"Press \\\"N\\\" to start a new game\", (500, 650), font_size=30)\n self.show_on_screen(\"Press \\\"ESC\\\" to exit\", (500, 710), font_size=30)\n self.show_on_screen(\"SCORE: \" + str(self.score), (500, 560), font_size=50)\n pygame.display.flip()\n\n # clears previously pressed key\n pygame.event.wait()\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_n:\n self.reset_lander()\n self.play()",
"def is_end_game(state):\n if YoteRules.is_boring(state) or YoteRules.is_player_stuck(state, state.get_next_player()):\n return True\n latest_player_score = state.score[state.get_latest_player()]\n if latest_player_score >= MAX_SCORE:\n return True\n return False",
"def endState(self):\n return not(self.state.winner() == None and len(self.state.get_actions()) > 0)",
"def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, \n date=date.today(), \n won=won,\n attempts_remaining=self.attempts_remaining, \n answer=self.answer\n )\n score.put()",
"def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True",
"def end_game(self):\n self.game.stop_running()",
"def queryNewGame(self):\n print\n response = raw_input('Would you like to play again? ')\n return response.lower() in ('y', 'yes')",
"def end_of_game(self):\n self.log.info('The game has ended')\n #\n end_callout = callout.FinishCallout(\n 'callout',\n 'finish_callout',\n ['exit_button'],\n S['end-game-callout'],\n self.deaths,\n )\n end_callout.show()\n #\n self.objects.append(end_callout)\n #\n while True:\n if end_callout.dismiss_button:\n music.fadeout(2)\n yield 2\n break\n yield 0\n #\n sys.exit(0)",
"def display_end_game(self):\n game_view = self.get_view.get_game_view\n character = self.model.get_character\n\n if character.alive:\n game_view.game_win()\n else:\n game_view.game_over()\n\n game_view.update_display()",
"def endMyTurn(self):\n try:\n result = self.game.server.endEmpireTurn(self.game.authKey)\n if result == 0:\n if self.game.myEmpire['roundComplete'] == 1:\n self.modeMsgBox('You have now un-ended your turn')\n self.game.myEmpire['roundComplete'] = 0\n else:\n self.modeMsgBox('Your turn has been ended, thankyou')\n self.game.myEmpire['roundComplete'] = 1\n self.mainmenu.writeTextRoundEnds()\n elif type(result) == types.StringType:\n self.modeMsgBox(result)\n else:\n \"\"\"End Turn and wait for it to end\"\"\"\n result = self.game.server.endRound(self.game.authKey)\n self.game.server.logout(self.game.authKey)\n from anw.modes.modelogin import ModeLogin\n newMode = ModeLogin(self.game, 200)\n self.game.enterMode(newMode)\n except:\n self.modeMsgBox('endMyTurn->Connection to Server Lost')",
"def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs"
]
| [
"0.7820257",
"0.75849676",
"0.75164115",
"0.7323227",
"0.71045566",
"0.70531404",
"0.7052095",
"0.6873565",
"0.6841535",
"0.6786962",
"0.67742175",
"0.6761526",
"0.6748283",
"0.6743815",
"0.6704593",
"0.6692921",
"0.66365916",
"0.6626896",
"0.6626896",
"0.6623149",
"0.6581739",
"0.6575373",
"0.65480316",
"0.65336835",
"0.6533486",
"0.65307826",
"0.65095747",
"0.65006137",
"0.6495655",
"0.6489819"
]
| 0.7586079 | 1 |
Event for when house has blackjack, returns None. | def event_house_blackjack(self) -> None:
if 21 in self.user.hand.value:
self.event_player_push()
else:
print("The house has blackjack")
self.event_house_wins() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False",
"def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)",
"def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())",
"def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True",
"def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"async def blackjack(self, ctx, arg: int): \n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'SELECT user_id, jacks FROM main WHERE user_id = {ctx.author.id}')\n result = cursor.fetchone()\n embed = discord.Embed(color=0x228b22, title=\"Blackjack\")\n if result is not None:\n if arg > result[1]:\n embed.add_field(name=\"Error\", value=f\"You can't bid more chips than you have!\", inline=False)\n embed.set_footer(text=\"You can check your balance using the *profile* command\")\n else:\n player, house = [],[]\n deck.deal(player,2)\n deck.deal(house, 2)\n embed.add_field(name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```['{deck.display_hand(house)[1]}', '?'] ``` \\n Value: ?\")\n embed.set_footer(text=\"Type `hit` or `stay` to take your turn!\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(house) != 21 and deck.hand_value(player) != 21:\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n while msg.content.startswith(\"hit\") or msg.content.startswith(\"Hit\"):\n embed.remove_field(0)\n deck.deal(player)\n embed.insert_field_at(0, name=\"Your Hand:\", value=f\"```{deck.display_hand(player)}``` \\n Value: {deck.hand_value(player)}\")\n await ctx.send(content=None, embed=embed)\n if deck.hand_value(player) > 21:\n break\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author)\n embed.remove_field(1)\n embed.set_footer(text=\"\")\n deck.house_turn(house)\n embed.add_field(name=\"Dealer's Hand:\", value=f\"```{deck.display_hand(house)}``` \\n Value: {deck.hand_value(house)}\")\n if deck.hand_value(player) == 21:\n outcome = \"Blackjack!\"\n bal = \"won\"\n chips = int(result[1] + arg*1.5)\n elif deck.hand_value(player) > 21:\n outcome = \"Player bust, you lose\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n elif deck.hand_value(house) > 21:\n outcome = \"Dealer bust, you win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) > deck.hand_value(house):\n outcome = \"Win!\"\n bal = \"won\"\n chips = int(result[1] + arg)\n elif deck.hand_value(player) == deck.hand_value(house):\n outcome = \"Push, chips back\"\n bal = \"gotten back your\"\n chips = int(result[1])\n else:\n outcome = \"Loss\"\n bal = \"lost\"\n chips = int(result[1] - arg)\n sql = (\"UPDATE main SET jacks = ? WHERE user_id = ?\")\n val = (chips, ctx.author.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n if chips == int(result[1]):\n chips += arg\n embed.add_field(name=outcome, value=f\"You have {bal} <:chip:657253017262751767> **{abs(int(result[1] - chips))}** chips\", inline=False)\n await ctx.send(content=None, embed=embed)\n else:\n await ctx.send(\"You must register before you can play blackjack!\")",
"def blackjack():\n score_report, bank, game_deck = start_game()\n while not end_game(bank, game_deck):\n user, dealer = Player(), Player(dealer=True)\n print(\"\\n=============== BEGINNING ROUND! ===============\")\n bank.report_balance()\n bank.place_bet()\n play_round(user=user, dealer=dealer, deck=game_deck)\n if game_deck.cards:\n winner = decide_winner(user, dealer)\n end_round(winner_result=winner, bank=bank, report=score_report)\n print(score_report)\n score_report.report_rounds()\n print(f\"This concludes our game of BlackJack 21 and you get to take home ${bank.balance}, thank you for playing!\")",
"def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")",
"def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21",
"def get_winner(self) -> None:\n if not self.get_game_ending_hands():\n if max(self.user.hand.value) > max(self.house.hand.value): # Values above 21 are omitted\n self.event_player_wins()\n elif max(self.user.hand.value) == max(self.house.hand.value):\n self.event_player_push()\n else:\n self.event_house_wins()",
"def won(self):\r\n return None",
"def get_game_ending_hands(self) -> bool:\n end = False\n if 10 in self.house.hand.value: # Check if house's first card is a 10\n if self.action_peek_cards() == 1: # Peek the card to check for and ace. CardValue.ACE has a value of 1\n self.event_house_blackjack()\n end = True\n elif 11 in self.house.hand.value: # Check if house's first card is an ace\n if self.action_peek_cards() in (10, 11, 12, 13): # TEN, JACK, QUEEN, KING in respective order\n self.event_house_blackjack()\n end = True\n elif min(self.house.hand.value) > 21: # Check if house has gone bust\n self.event_house_bust()\n end = True\n elif max(self.user.hand.value) == 21: # Check for player blackjack\n self.event_player_blackjack()\n end = True\n elif min(self.user.hand.value) > 21: # Check if player has gone bust\n self.event_player_bust()\n end = True\n self.has_game_ending_hand = end\n return end",
"def test_for_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, True)",
"async def onBought( # type: ignore[override]\n self, event: Event, strategy: Optional[EventHandler]\n ) -> None:\n pass",
"def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand",
"def house_deal(self) -> None:\n if not self.has_game_ending_hand:\n while max(self.house.hand.value) < 17:\n print(f\"{self.deal_card(self.house)}\")",
"def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False",
"def deal_self(self):\n self.cards.hit(self.get_card())\n if self.cards.hand < 17 and self.cards.hand>=0:\n self.state = 'active'\n elif self.cards.hand >= 17 and self.cards.hand <= 21:\n self.state = 'stand'\n elif self.cards.hand==-1:\n self.state = 'burst'",
"def hit():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Hand\" is in play, hit the \"player\". \n if in_play:\n outcome = outcome_plus = outcome_plus_plus = \"\"\n player.add_card(deck_of_cards.deal_card())\n else:\n return None\n \n # If busted, update messages, score and the player's \n # \"Hand\" status.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL \n score -= SCORE_POINTS\n in_play = False\n \n return None",
"def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)",
"def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")",
"def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")",
"def doCheck(self):\n self.protocol.sendPacket(networkpackets.PacketPokerCheck(**self._serial_and_game_id))",
"def blackjack(self1):\n print(\"Shuffle...\")\n self.card.shuffle()\n print(\"All shuffling!\")\n print(\"Dealing...\")\n self.deal1()\n print(\"\\nLet's play!\")\n for player in self.players1:\n print(\"{}'s turn...\".format(player.name))\n self.play(player)\n else:\n print(\"Determining the winner...\")\n self.find_winner1()",
"def blackjack():\r\n\r\n #Pre-loop condition \r\n play = 'yes'\r\n # Creates a standard deck of cards.\r\n deck = create_deck()\r\n #How many games you're winning or losing by.\r\n tally = 0\r\n #Main gameplay loop\r\n while play.lower() in ('yes','y'):\r\n score, win_statement = blackjack_helper(deck)\r\n print(win_statement)\r\n #Termination, used if player entered 'quit' in input prompt.\r\n if win_statement == 'You forfeit.':\r\n return 'Exiting program'\r\n \r\n #Show the score\r\n tally += score\r\n if tally < 0:\r\n print(f\"You're losing by {-1 * tally}.\")\r\n elif tally == 0:\r\n print('You\\'re tied!')\r\n else: #tally > 0\r\n print(f\"You're winning by {tally}.\")\r\n play = input('Enter \"Yes\" to play again \\n: ') \r\n print(\"Ending game\")",
"def hitMe(hand, deck):\n if deck.cardsLeft == 0:\n return False\n hand.getCard(deck.drawCard())\n return True",
"def hit(player):\n deal_random_card(player)"
]
| [
"0.7102307",
"0.6858465",
"0.6709716",
"0.6596564",
"0.6382169",
"0.6296633",
"0.62518513",
"0.6159988",
"0.6137631",
"0.61218387",
"0.6086342",
"0.60474914",
"0.60283005",
"0.602187",
"0.59936106",
"0.5944831",
"0.59164226",
"0.5806925",
"0.57985395",
"0.5778431",
"0.5761524",
"0.5759307",
"0.57571644",
"0.5750715",
"0.5743386",
"0.56974393",
"0.56967646",
"0.558619",
"0.5555888",
"0.5546824"
]
| 0.85808146 | 0 |
Event for when user wins, returns None. | def event_player_wins(self) -> None:
win_amount = self.user.bet
print("Congratulations, you win:", win_amount)
self.user.win_balance(self.user.bet) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_winner(self) -> None:\n if not self.get_game_ending_hands():\n if max(self.user.hand.value) > max(self.house.hand.value): # Values above 21 are omitted\n self.event_player_wins()\n elif max(self.user.hand.value) == max(self.house.hand.value):\n self.event_player_push()\n else:\n self.event_house_wins()",
"def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)",
"def player_win(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU WIN\"\r\n print(\"Player wins against opponent.\\n\")\r\n self.player_wins += 1",
"def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0",
"def notify_winner(self):\n self.is_winner = True",
"def handle_win(context: GameContext, event: pygame.event.Event) -> None:\n if event.type == WIN:\n win_text = WINNER_FONT.render(f\"{event.winner.name} wins\", True, WHITE)\n context.game_window.blit(\n win_text,\n (context.game_window.get_width() // 2 - win_text.get_width() // 2,\n context.game_window.get_height() // 2 - win_text.get_height() // 2))\n pygame.display.update()\n pygame.time.delay(WIN_TEXT_DELAY) # Wait a bit in the winner screen\n restart_game(context)",
"def checkForWin(self):\n w = self.getWinner()\n if w == PLAYER or w == AI:\n # self.printBoard()\n # print('%d'%w + ' won!')\n return\n if w == Tie:\n # print('Tie')\n return",
"def is_game_won(self):\n return True",
"def won(self):\r\n return None",
"def on_win(data):\n print(str(data))\n update_score_db(data['winner'], data['loser'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})",
"def has_won(board, player):\r\n return False",
"def handleWin(self, winningplayer):\n self.board.drawWinBoard(winningplayer, self.istournament)\n self.board = None\n self.ui.game = None\n if self.istournament:\n if winningplayer == 1:\n self.ui.tournament.setWinner(1)\n if winningplayer == -1:\n self.ui.tournament.setWinner(2)\n\n threading.Timer(3, self.ui.displayCurrentTournament).start()",
"def winning_event(self, player):\n # vertical check\n for col in range(GameData.columns):\n if self.board[0][col] == player and self.board[1][col] == player and self.board[2][col] == player:\n self.draw_vertical_winning_line(col, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # horizontal check\n for row in range(GameData.rows):\n if self.board[row][0] == player and self.board[row][1] == player and self.board[row][2] == player:\n self.draw_horizontal_winning_line(row, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # ascending diagonal heck\n if self.board[2][0] == player and self.board[1][1] == player and self.board[0][2] == player:\n self.draw_asc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # descending diagonal win chek\n if self.board[0][0] == player and self.board[1][1] == player and self.board[2][2] == player:\n self.draw_desc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n return False",
"def has_won(board, player):\n return False",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def win_game(self):\r\n self.board.clear_hovered_tiles_list()\r\n self.is_game_over = True\r\n self.reset_button.won_game()\r\n self.high_score.update(self.timer.seconds)",
"def check_win_lose(self):\n if self.b.get_player_i() == 7: # player got to the bank\n return 1 # win\n if self.b.get_chaser_i() == self.b.get_player_i(): # chaser catch the player\n return 2 # lose\n return 0 # nothing",
"def winner(self):\n if self.__current_player == 1:\n if self.__fields[0].winner():\n print(self.__players[0]._Player__name + \"is winner!\")\n Game.play = False\n elif self.__current_player == 2:\n if self.__fields[1].winner():\n print(self.__players[1]._Player__name + \"is winner!\")\n Game.play = False",
"def checkForWin (self):\r\n\t\tw = self.getWinner()\r\n\t\tif w:\r\n\t\t\tself.printBoard()\r\n\t\t\traise Exception(w + ' won!')",
"def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False",
"def who_won(self):\n if self.scoreB >= WP: return Stone.BLACK\n if self.scoreW >= WP: return Stone.WHITE\n return self.last",
"def check_winner(self):\n pass",
"def you_won(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_win_label)\n Clock.schedule_once(self.goto_next_level, 5)",
"def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)",
"def game_over(self, won=True):\n if won is True:\n self.game[\"game_status\"] = self.WON\n else:\n self.game[\"game_status\"] = self.DISCONNECTED\n db.save_game(self.game_id, self.game)",
"def get_winner(self) -> int:\n return self._win_state",
"def won(self):\n return self.roster.won",
"def notify_no_win(self):\n\n board_set = set()\n for row in self.board:\n board_set.update(row)\n if \"\" not in board_set:\n print \"Game is over. No one won.\"\n return True",
"def game_over(self, player_one, draw):\n if draw:\n win_string = \" No winner \"\n elif not player_one:\n win_string = \"Player 1 wins!\"\n else:\n win_string = \"Player 2 wins!\"\n text_surface, rect = self.game_font.render(win_string, (0, 0, 0))\n self.screen.blit(text_surface, (self.width/2 - 150, self.height/2 - 20))\n pygame.display.set_caption(win_string)\n pygame.display.flip()",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()"
]
| [
"0.68993604",
"0.68964064",
"0.6843061",
"0.6753403",
"0.6743779",
"0.6651324",
"0.65241176",
"0.65032357",
"0.64356375",
"0.6432726",
"0.63998324",
"0.6389373",
"0.6332981",
"0.62867635",
"0.62409985",
"0.6231352",
"0.62251234",
"0.6204182",
"0.62012535",
"0.6200097",
"0.6181266",
"0.6164582",
"0.6162168",
"0.6160121",
"0.6149011",
"0.61317414",
"0.61299837",
"0.61086977",
"0.61071056",
"0.60972536"
]
| 0.7420098 | 0 |
Event for when house wins, returns None. | def event_house_wins(self) -> None:
print("You lose")
self.user.lose_balance(self.user.bet) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_winner(self) -> None:\n if not self.get_game_ending_hands():\n if max(self.user.hand.value) > max(self.house.hand.value): # Values above 21 are omitted\n self.event_player_wins()\n elif max(self.user.hand.value) == max(self.house.hand.value):\n self.event_player_push()\n else:\n self.event_house_wins()",
"def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()",
"def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()",
"def won(self):\r\n return None",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0",
"def is_game_won(self):\n return True",
"def notify_winner(self):\n self.is_winner = True",
"def notify_no_win(self):\n\n board_set = set()\n for row in self.board:\n board_set.update(row)\n if \"\" not in board_set:\n print \"Game is over. No one won.\"\n return True",
"def has_won(board, player):\r\n return False",
"def event_player_wins(self) -> None:\n win_amount = self.user.bet\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(self.user.bet)",
"def checkForWin (self):\r\n\t\tw = self.getWinner()\r\n\t\tif w:\r\n\t\t\tself.printBoard()\r\n\t\t\traise Exception(w + ' won!')",
"def checkForWin(self):\n w = self.getWinner()\n if w == PLAYER or w == AI:\n # self.printBoard()\n # print('%d'%w + ' won!')\n return\n if w == Tie:\n # print('Tie')\n return",
"def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False",
"def check_winner(self):\n pass",
"def is_game_over(cls):\n cls.record_winner()\n cls.record_tie()",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def end_of_game(self, winner):\n pass",
"def has_won(board, player):\n return False",
"def game_over(self, won=True):\n if won is True:\n self.game[\"game_status\"] = self.WON\n else:\n self.game[\"game_status\"] = self.DISCONNECTED\n db.save_game(self.game_id, self.game)",
"def _determine_winner(self, room_code: str) -> None:\n game = self.read_game(room_code)\n if (len(game.players) == 1) and game.started:\n (winner,) = game.players\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set winner=:p\"),\n ExpressionAttributeValues={\n \":p\": winner.dict(),\n },\n )",
"def get_winner(state):\n\n if",
"def check_if_game_over():\n # Calling check for winners.\n check_for_winner()\n # Calling check it's tie or not.\n check_if_tie()",
"def who_won(self):\n if self.scoreB >= WP: return Stone.BLACK\n if self.scoreW >= WP: return Stone.WHITE\n return self.last",
"def game_over(winner):\n global in_play, outcome, score\n \n if winner == \"Dealer\":\n score -= 1\n if Dealer.busted:\n outcome = \"Player busted! New Deal?\"\n \n else:\n outcome = \"Dealer Wins! New Deal?\"\n \n else:\n score += 1\n if Player.busted:\n outcome = \"Dealer busted! New Deal?\"\n \n else:\n outcome = \"Player Wins! New Deal?\"\n \n in_play = False",
"def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")",
"def handle_game_over(self, winner, end_state):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n ##############################\n print(\"Game over, these are the stats:\")\n print('Winner: ' + str(winner))\n print('End state: ' + str(end_state))",
"def winner(self):\n raise NotImplementedError",
"def winner(self):\n raise NotImplementedError",
"def check_if_game_over():\n check_for_winner()\n check_for_tie()"
]
| [
"0.7377803",
"0.7067633",
"0.7039978",
"0.6885437",
"0.6532778",
"0.64540684",
"0.6443208",
"0.63587207",
"0.6265723",
"0.6253461",
"0.625192",
"0.6251206",
"0.6244894",
"0.6225948",
"0.62121314",
"0.6206654",
"0.61854243",
"0.61658245",
"0.61578286",
"0.61563206",
"0.61334753",
"0.6123333",
"0.6121846",
"0.61019754",
"0.6093934",
"0.6061724",
"0.60566103",
"0.60370255",
"0.60370255",
"0.6030707"
]
| 0.7271658 | 1 |
Event for when house goes bust, returns None. | def event_house_bust(self) -> None:
print(f"The house's hand contains {min(self.house.hand.value)}, they're bust")
self.event_player_wins() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()",
"def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)",
"async def onBought( # type: ignore[override]\n self, event: Event, strategy: Optional[EventHandler]\n ) -> None:\n pass",
"def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)",
"def onBounce(self):\n pass",
"def is_caught_up_well_enough_for_government_work():\n return config.CAUGHT_UP or (config.BLOCKCHAIN_SERVICE_LAST_BLOCK and config.CURRENT_BLOCK_INDEX >= config.BLOCKCHAIN_SERVICE_LAST_BLOCK - 1)",
"def lost(self):\r\n return None",
"def event11515373():\n header(11515373, 0)\n skip_if_this_event_on(3)\n if_event_flag_off(1, EVENT.JareelDead)\n if_player_inside_region(1, 1512956)\n if_condition_true(0, 1)\n skip_if_client(1)\n network.notify_boss_room_entry()\n chr.activate_npc_buffs(CHR.AbyssalPrinceJareel)",
"def unaway(self):\n self.away()",
"def handle_auction_end() -> None:\n auction_suns = game_state.get_auction_suns()\n max_sun = None\n if sum(1 for el in auction_suns if el is not None) > 0:\n max_sun = max(el for el in auction_suns if el is not None)\n\n # if no suns were bid and the auction tiles are full, clear\n # the tiles\n if max_sun is None:\n if game_state.get_num_auction_tiles() == game_state.get_max_auction_tiles():\n game_state.clear_auction_tiles()\n\n # if a sun was bid, give auction tiles to the winner\n else:\n winning_player = auction_suns.index(max_sun)\n\n # swap out winning player's auctioned sun with the center sun\n game_state.exchange_sun(\n winning_player, max_sun, game_state.get_center_sun()\n )\n game_state.set_center_sun(max_sun)\n\n # give auction tiles to the winner\n auction_tiles = game_state.get_auction_tiles()\n game_state.clear_auction_tiles()\n game_state.give_tiles_to_player(\n winning_player,\n (tile for tile in auction_tiles if gi.index_is_collectible(tile)),\n )\n\n winning_player_collection = game_state.get_player_collection(winning_player)\n\n # resolve pharoah disasters\n num_phars_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_PHAR\n )\n if num_phars_to_discard > 0:\n num_phars_owned = winning_player_collection[gi.INDEX_OF_PHAR]\n num_phars_to_discard = min(num_phars_to_discard, num_phars_owned)\n game_state.remove_single_tiles_from_player(\n [gi.INDEX_OF_PHAR] * num_phars_to_discard, winning_player\n )\n\n # resolve nile disasters\n num_niles_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_NILE\n )\n if num_niles_to_discard > 0:\n num_floods_owned = winning_player_collection[gi.INDEX_OF_FLOOD]\n num_niles_owned = winning_player_collection[gi.INDEX_OF_NILE]\n\n num_floods_to_discard = min(num_floods_owned, num_niles_to_discard)\n num_niles_to_discard = min(\n num_niles_to_discard - num_floods_to_discard, num_niles_owned\n )\n\n game_state.remove_single_tiles_from_player(\n [gi.INDEX_OF_FLOOD] * num_floods_to_discard\n + [gi.INDEX_OF_NILE] * num_niles_to_discard,\n winning_player,\n )\n\n # resolve civ disasters\n num_civs_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_CIV\n )\n if num_civs_to_discard > 0:\n num_civs_owned = sum(\n gi.get_civs_from_collection(winning_player_collection)\n )\n if num_civs_owned <= num_civs_to_discard:\n game_state.remove_all_tiles_by_index_from_player(\n range(\n gi.STARTING_INDEX_OF_CIVS,\n gi.STARTING_INDEX_OF_CIVS + gi.NUM_CIVS,\n ),\n winning_player,\n )\n else:\n game_state.set_num_civs_to_discard(num_civs_to_discard)\n game_state.set_auction_winning_player(winning_player)\n\n # resolve monument disasters\n num_mons_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_MON\n )\n if num_mons_to_discard > 0:\n num_mons_owned = sum(\n gi.get_monuments_from_collection(winning_player_collection)\n )\n if num_mons_owned <= num_mons_to_discard:\n game_state.remove_all_tiles_by_index_from_player(\n range(\n gi.STARTING_INDEX_OF_MONUMENTS,\n gi.STARTING_INDEX_OF_MONUMENTS + gi.NUM_MONUMENTS,\n ),\n winning_player,\n )\n else:\n game_state.set_num_mons_to_discard(num_mons_to_discard)\n game_state.set_auction_winning_player(winning_player)\n\n mark_player_passed_if_no_disasters(winning_player)\n\n # clear auction suns and mark auction as over\n game_state.end_auction()\n\n # if it's the final round and all playesr are passed\n if game_state.is_final_round() and game_state.are_all_players_passed():\n end_round(game_state)\n # else if no disasters to be resolved, advance current player\n elif not game_state.disasters_must_be_resolved():\n game_state.advance_current_player()\n # else, that means there IS a disaster to be resolved, so set current\n # player to auction winner to resolve\n else:\n game_state.set_current_player(game_state.get_auction_winning_player())",
"def bomb_defused(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_defused\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombDefuseXp) * player['level'], 'defusing the bomb' )\r\n debug.write(\"[SourceRPG] bomb_defused handled\", 1)",
"def update(self):\n if self.bunker_health == 0:\n self.kill()",
"def event1950():\n header(1950)\n boss_dead_flag, immediate_item, delayed_item_1, delayed_item_2 = define_args('iiii')\n end_if_event_flag_on(boss_dead_flag)\n if_event_flag_on(0, boss_dead_flag)\n skip_if_equal(1, immediate_item, 0)\n item.award_item_to_host_only(immediate_item)\n network.disable_sync()\n wait(5.0)\n skip_if_equal(1, delayed_item_1, 0)\n item.award_item_to_host_only(delayed_item_1)\n skip_if_equal(1, delayed_item_2, 0)\n item.award_item_to_host_only(delayed_item_2)",
"def try_collecting(self, event):\n sun_list = [i for i in self.board[event.pos] if isinstance(i, Sun)]\n if sun_list:\n sun_list[0].collected = True\n self.player.gold += Sun.gold\n self.ev_manager.post(events.SunCollected(self.player.gold))",
"def on_death(self, state):",
"def is_burrowed(self) -> bool:\n return self.proto.is_burrowed",
"def event11512001():\n header(11512001)\n end_if_this_event_on()\n\n if_event_flag_on(1, EVENT.DarkOrnsteinKilledFirst) # Ornstein died first.\n if_entity_health_less_than_or_equal(1, CHR.DarkSmough, 0.0)\n if_condition_true(-1, 1)\n if_event_flag_on(2, EVENT.DarkSmoughKilledFirst) # Smough died first.\n if_entity_health_less_than_or_equal(2, CHR.DarkOrnsteinScion, 0.0)\n if_condition_true(-1, 2)\n if_condition_true(0, -1)\n\n chr.cancel_special_effect(CHR.DarkOrnsteinScion, 4950) # Make his death animation normal speed.\n\n item.award_item_to_host_only(ITEMLOT.DarkOrnsteinAndSmoughReward)\n skip_if_condition_false_finished(3, 2)\n item.award_item_to_host_only(ITEMLOT.DarkOrnsteinScionReward)\n boss.kill_boss(CHR.DarkOrnsteinScion)\n skip(1)\n boss.kill_boss(CHR.DarkSmough)\n sound.play_sound_effect(CHR.Player, SoundType.s_sfx, 777777777)\n boss.disable_boss_health_bar(CHR.DarkSmough, TEXT.SunEaterSmough)\n boss.disable_boss_health_bar(CHR.DarkOrnsteinGiant, TEXT.ForsakenKnightOrnstein)\n\n for fog_wall, fog_sfx in zip((1511990, 1511992, 1511988), (1511991, 1511993, 1511989)):\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, True)\n\n flag.enable(EVENT.DarkOrnsteinAndSmoughDead)\n wait(3.0)\n sound.disable_map_sound(1513800)",
"def _baste_off(self):\n GPIO.output(self._baster_pin, GPIO.LOW)",
"def handle_event(self, event):\n if event.key == BattleActions.SELECT.value:\n prev_item = self.pokemon.held_item\n self.pokemon.held_item = self.item\n self.bag.subtract_item(self.item)\n self.bag.add_item(prev_item)\n self.is_dead = True",
"def standby() -> None:",
"def handle_event(self, event):\n if event.key == BattleActions.SELECT.value:\n self.pokemon.held_item = self.item\n self.bag.subtract_item(self.item)\n self.is_dead = True",
"def event11310201():\n header(11310201)\n if_event_flag_on(1, EVENT.SkeletonsDisturbed)\n if_not_in_world_area(1, 13, 1)\n if_condition_true(0, 1)\n flag.disable(EVENT.SkeletonsDisturbed)",
"def wash(self, car):\n yield self.env.timeout(WASHTIME)\n print(\"Washing machine removed %d%% of %s's dirt.\" % (random.randint(50, 99), car))",
"def event11512060():\n header(11512060, 1)\n chr.disable(CHR.CapriciousThrall)\n end_if_this_event_on()\n end_if_event_flag_on(EVENT.CapriciousThrallDead)\n\n if_event_flag_on(0, EVENT.CapriciousThrallActive)\n chr.disable(CHR.SilverKnightArcherNearThrall)\n\n if_event_flag_on(1, EVENT.CapriciousThrallActive)\n if_host(1)\n if_player_inside_region(1, REGION.CapriciousThrallTrigger)\n if_condition_true(0, 1)\n\n # Ambush.\n flag.enable(EVENT.ThrallAmbushOngoing) # Ambush is ongoing. Note this MUST be enabled before the flag below.\n flag.enable(11512060) # One-off ambush is done.\n flag.enable(11502003) # Thrall won't appear in Sen's.\n flag.enable(11502004) # Thrall won't appear in Sen's.\n obj.enable(1511974)\n sfx.create_map_sfx(1511975)\n obj.enable(1511976)\n sfx.create_map_sfx(1511977)\n obj.enable(1511978)\n sfx.create_map_sfx(1511979)\n chr.enable(CHR.CapriciousThrall)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallAmbushAttack)\n wait(0.5)\n sound.enable_map_sound(1513804)\n boss.enable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n wait(100.0) # Battle timer.\n end_if_event_flag_on(11512061) # Already dead and handled.\n boss.disable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n sound.play_sound_effect(CHR.CapriciousThrall, SoundType.s_sfx, 777777777) # For effect.\n wait(3.0) # so sound effect can build up and slightly mask the abrupt music stop\n sound.disable_map_sound(1513804)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallRetreat)\n wait(1.4)\n chr.disable(CHR.CapriciousThrall)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979)\n message.status_explanation(TEXT.ThrallHasFled)\n flag.enable(11512008) # Message won't appear when you come back.",
"def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk",
"def event11512040():\n header(11512040, 1)\n chr.disable_gravity(CHR.SilverKnightHighArcher)\n if_event_flag_on(0, EVENT.DarkAnorLondo)\n if_entity_dead(1, CHR.SilverKnightLowArcher)\n if_entity_dead(1, CHR.SilverKnightHighArcher)\n end_if_condition_true(1)\n if_entity_dead(2, 1510176)\n end_if_condition_true(2)\n chr.disable_ai(CHR.SilverKnightLowArcher)\n chr.disable_ai(CHR.SilverKnightHighArcher)\n chr.disable_ai(1510176)\n if_player_inside_region(0, REGION.TriggerArcherBattle)\n chr.enable_ai(CHR.SilverKnightLowArcher)\n chr.enable_ai(CHR.SilverKnightHighArcher)\n chr.enable_ai(1510176)",
"def tick(self, game, ticks=1):\n \n self.structure.time_until_harvest -= ticks\n if self.structure.time_until_harvest <= 0:\n if ref.structure_type_dct[self.structure.structure_type]['site type'] == 'resource':\n resources_harvested = 0\n for worker in xrange(self.structure.workers):\n workload = randint(500, 1500)\n if workload <= self.harvestable:\n self.harvestable -= workload\n resources_harvested += workload\n else:\n resources_harvested += self.harvestable\n self.harvestable = 0\n self.structure.workers = 0\n self.structure.transform()\n game.action_log.append('transformation')\n break\n #Adds resource to 'available' town resources\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['available'] += resources_harvested\n #Removes resource from 'harvestable' town resources\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['harvestable'] -= resources_harvested\n\n self.structure.time_until_harvest = ref.structure_type_dct[\n self.structure.structure_type]['time per harvest']\n return\n\n elif ref.structure_type_dct[self.structure.structure_type]['site type'] == 'adventure':\n if len(self.structure.workers) > 0:\n for hero in [h for h in entities.heroes['object list'] if (\n h.hero_id in self.structure.workers)]:\n hero.boredom += randint(0, 100)\n try:\n monster = next(m for m in entities.monsters['object list'] if \n m.monster_id in self.structure.monsters)\n self.battle(hero, monster)\n except StopIteration:\n adventure_sites = [\n s for s in entities.sites['object list'] if ref.structure_type_dct[\n s.structure.structure_type]['site type'] == 'adventure'\n ]\n if hero.boredom < 100 and len(adventure_sites) > 0:\n hero.destination = choice(adventure_sites).location\n else:\n hero.boredom = 0\n hero.destination = entities.town['object'].location\n hero.traveling = True\n self.structure.workers.remove(hero.hero_id)\n self.structure.worker_capacity += 1",
"def wash(self, car):\n yield self.env.timeout(WASHTIME)\n print(\"Carwash removed %d%% of %s's dirt.\" %\n (random.randint(50, 99), car))",
"def wash(self, car):\n yield self.env.timeout(WASHTIME)\n print(\"Carwash removed %d%% of %s's dirt.\" %\n (random.randint(50, 99), car))"
]
| [
"0.692791",
"0.60571635",
"0.58967096",
"0.55677986",
"0.5558159",
"0.55171144",
"0.55069155",
"0.5488246",
"0.54822344",
"0.54730093",
"0.5447639",
"0.54356676",
"0.5406993",
"0.5402682",
"0.53844774",
"0.5326585",
"0.5324676",
"0.53170097",
"0.52970785",
"0.5275559",
"0.52530986",
"0.52516294",
"0.52470213",
"0.52313185",
"0.5197382",
"0.51960516",
"0.5194132",
"0.5188731",
"0.5178067",
"0.5178067"
]
| 0.7827559 | 0 |
Event for when player goes bust, returns None. | def event_player_bust(self) -> None:
print(f"Your hand contains {min(self.user.hand.value)}, you're bust")
self.event_house_wins() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()",
"def update(self):\n if self.bunker_health == 0:\n self.kill()",
"def on_loop(self):\n\n if not Syringe.craftable:\n if Syringe.can_be_crafted(self.macgyver.inventory):\n Syringe.craftable = True\n self.notification.active('craft-available').set_timer(2)\n\n # Check if MacGyver threw himself against a wall...\n if sprite.spritecollide(self.macgyver, self.walls, False):\n self.macgyver.rollback()\n\n # Macgyver will collect the item and add it to it's inventory...\n for item in sprite.spritecollide(self.macgyver, self.items, False):\n item.collect(self.macgyver.inventory)\n\n # if self.macgyver.coordinates == self.finish_point:\n # self.notification.active('win')",
"def bomb_defused(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_defused\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombDefuseXp) * player['level'], 'defusing the bomb' )\r\n debug.write(\"[SourceRPG] bomb_defused handled\", 1)",
"def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)",
"def on_hit(self, event, data):\n world, player = data\n # Ensure the top of the bounce block is being hit\n if get_collision_direction(player, self) == \"A\":\n self._active = True\n player.set_velocity((0, -3*player.get_max_velocity())) # bounce the player\n player.set_jumping(False) # player can't jump while bounced\n player.set_bounced(True)",
"def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The house has blackjack\")\n self.event_house_wins()",
"def bomb_planted(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_planted\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombPlantXp) * player['level'], 'planting the bomb' )\r\n debug.write(\"[SourceRPG] bomb_planted handled\", 1)",
"def bat_update(self, player):\n if self.bounce_count > 0: #TEMP\n self.bounce()\n return\n target = player.current_tile()\n if(target != None):\n self.moveTowards(player.current_tile())",
"def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk",
"def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)",
"def handle_event(self, event):\n if event.key == BattleActions.SELECT.value:\n self.pokemon.held_item = self.item\n self.bag.subtract_item(self.item)\n self.is_dead = True",
"def event11515373():\n header(11515373, 0)\n skip_if_this_event_on(3)\n if_event_flag_off(1, EVENT.JareelDead)\n if_player_inside_region(1, 1512956)\n if_condition_true(0, 1)\n skip_if_client(1)\n network.notify_boss_room_entry()\n chr.activate_npc_buffs(CHR.AbyssalPrinceJareel)",
"def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)",
"def event2546():\n header(2546)\n\n if_player_does_not_have_special_effect(0, SPEFFECT.KillTrigger)\n\n if_player_has_special_effect(1, SPEFFECT.BondToBeyond)\n if_player_has_special_effect(1, SPEFFECT.KillTrigger)\n if_condition_true(0, 1)\n\n flag.disable_chunk(11025600, 11025619)\n flag.enable_random_in_chunk(11025600, 11025619)\n skip_if_event_flag_off(1, 11025600)\n chr.set_special_effect(CHR.Player, SPEFFECT.BondToBeyondEffect)\n restart()",
"def got_hit(self, game_over_event):\n if self.invul_timer == 0:\n self.health -= 1\n self.play_impact_sound()\n if self.health == 0:\n pygame.event.post(pygame.event.Event(game_over_event))\n self.invul_timer = CST.PLAYER_INVULNERABILITY_DURATION * CST.FPS\n self.repair_timer = self.REPAIR_TIME # Resetting repair state upon hit",
"def handle_event(self, event):\n if event.key == BattleActions.SELECT.value:\n prev_item = self.pokemon.held_item\n self.pokemon.held_item = self.item\n self.bag.subtract_item(self.item)\n self.bag.add_item(prev_item)\n self.is_dead = True",
"def bumperCallback(self,data):\n if data.state == 1 :\n rospy.loginfo(\"Bumper enfonce\") \n self.soundPub.publish(0)",
"def event2542():\n header(2542)\n\n if_player_has_special_effect(1, SPEFFECT.RingOfTemptationEquipped)\n if_entity_health_less_than_or_equal(1, CHR.Player, 0.0)\n if_condition_true(0, 1)\n chr.set_special_effect(CHR.Player, SPEFFECT.RingOfTemptationDeath)\n restart()",
"def on_hit(self, event, data):\n world, player = data\n # Ensure the top of the flag block is being hit\n # and ensure player hadn't been healed before in this level\n if get_collision_direction(player, self) == \"A\" and not player.get_bonus_health():\n player.change_health(1)\n player.set_bonus_health(True) # player won't get heal twice in a single level",
"def hit(self):\n if self.is_sturdy:\n print(\"Signal.HIT\")\n for pxl in self.pixels:\n if not pxl.damaged: # if one of the pixels is not yet damaged, stop the code\n return None\n else:\n for pxl in self.pixels:\n pxl.damaged = True\n\n self.kill()",
"def stand(self):\n self.endgame()",
"def __onBanNotifyHandler(self):\n LOG_DEBUG('GameSessionController:__onBanNotifyHandler')\n banTime = time.strftime('%H:%M', time.gmtime(time.time() + self.PLAY_TIME_LEFT_NOTIFY))\n self.__lastBanMsg = (self.isPlayTimeBlock, banTime)\n self.onTimeTillBan(*self.__lastBanMsg)\n self.__banCallback = BigWorld.callback(self.DAY_DURATION, self.__onBanNotifyHandler)",
"def monster_attack(self, player):\n monster = Monster(random.randint(0, 20), random.randint(0, 20))\n print(\"{} you are being attacked by a monster!\".format(player.name))\n health = player.health\n while True:\n monster.health -= player.attack\n player.health -= monster.attack\n if monster.health < 0 or player.health < 0:\n break\n if player.health <= 0:\n print(\"You died!\")\n print(\"Game over! You collected {} pieces of gold\".format(player.gold))\n return True\n else:\n print(\"You lost {} health. Your health is now {}.\".format(health - player.health, player.health))",
"def onBounce(self):\n pass",
"def heal(self):\n self.infected = False",
"def on_hit(self, event, data):\n world, player = data\n\n # Ensure the top of the switch block is being hit\n if get_collision_direction(player, self) == \"A\" and not self._pressed:\n self._time = time.time() # save the hit time\n self._pressed = True # set the pressed status to True\n if not self._block_around: # ensure the block storage is empty\n x, y = self.get_position() # get the switch position\n self._block_around = world.get_things_in_range(x, y, 20) # put block around into storage\n for block in self._block_around: # remove block in the storage\n if not isinstance(block, Switch) and isinstance(block, Block):\n world.remove_block(block)",
"def event11510870():\n header(11510870, 1)\n npc, = define_args('i')\n skip_if_this_event_slot_off(2)\n chr.drop_mandatory_treasure(npc)\n end()\n\n if_entity_dead(0, npc)\n end()",
"def set_bankrupt(self):\n if self.status == self.PLAYER_BANKRUPT:\n return\n self.status = self.PLAYER_BANKRUPT\n self.game.player_bankrupted(self)",
"def lose(self) -> None:\n self._actual_money -= self._bet"
]
| [
"0.72710687",
"0.6107994",
"0.5895356",
"0.5872952",
"0.583328",
"0.582141",
"0.58022356",
"0.5797654",
"0.5782969",
"0.57784337",
"0.5760133",
"0.57263",
"0.56755555",
"0.5641476",
"0.56328326",
"0.5613689",
"0.5613351",
"0.55944276",
"0.55883646",
"0.55850583",
"0.55589384",
"0.555639",
"0.55395573",
"0.5501595",
"0.5490593",
"0.54749113",
"0.54648846",
"0.5461786",
"0.5455987",
"0.5389525"
]
| 0.77857053 | 0 |
list of urls of a user's repos | def urls(gh, user):
return [repo.url for repo in getuserrepos(gh, user)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val",
"def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos",
"def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response",
"def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list",
"def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos",
"def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)",
"def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls",
"def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r",
"def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]",
"def list_repos(self):\n return sorted(self.user_con.list_repos())",
"def query_repos(self):\n return [self.config[\"repo\"]]",
"def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True",
"def n_public_repos(gh, user):\n return getuser(gh, user).public_repos",
"def get_repositories(github_user):\n\n if not github_user:\n return [1, {\"message\": \"GitHub username missing\"}]\n else:\n\n # build Request object\n request = urllib2.Request(\"https://api.github.com/users/\"\n + str(github_user) + \"/repos\")\n request.get_method = lambda: 'GET'\n try:\n '''try to send the request to the GitHub API and\n create Python dictionary from JSON response'''\n repositories = urllib2.urlopen(request)\n repositories = json.loads(\"\\n\".join(repositories.readlines()))\n\n return [0, repositories]\n\n except urllib2.HTTPError as e:\n\n # return HTTP error and the message from the API\n return [1, {\"message\": str(e) + \": \"\n + json.loads('\\n'.join(e.readlines()))['message']}]",
"def get_repos(self):\n return requests.get(\"https://api.github.com/user/repos\",\n headers=self.headers).json",
"def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)",
"def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos",
"def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url",
"def getContributors(auth):\n users = []\n r = requests.get(url='https://gist.github.com/paulmillr/2657075/',\n auth=auth)\n soup = BeautifulSoup(r.text, 'html.parser')\n users = [tr.select_one('a').text for tr in soup('tbody')[0].select('tr')]\n return users",
"def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]",
"def get_repos():\n\n return __do_get_repos()",
"def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)",
"def listRepositories(self):\n return self.mini_catalog.listRepositories()",
"def urls(self) -> list[str]:\r\n ...",
"def getRemotes(directory):\n gitRemoteOutput = subprocess.check_output(['git','remote','-v'],cwd=directory)\n remotes = []\n for line in gitRemoteOutput.splitlines():\n if '(fetch)' in line:\n splitLine = line.split();\n remotes.append({'name': splitLine[0].strip(), 'url': splitLine[1].strip()})\n return remotes",
"def reponames(gh, user):\n return [u.split('/')[-1] for u in urls(gh, user)]",
"def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())",
"def getURLs():",
"def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data",
"def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))"
]
| [
"0.7733041",
"0.77024794",
"0.7624664",
"0.7403024",
"0.73314524",
"0.7210593",
"0.7078684",
"0.70109814",
"0.6999416",
"0.69814646",
"0.6937563",
"0.6918695",
"0.6880146",
"0.6738339",
"0.67307055",
"0.6662354",
"0.65915906",
"0.65727204",
"0.65644765",
"0.65623707",
"0.65577567",
"0.6548484",
"0.64901114",
"0.64899653",
"0.6451562",
"0.64009905",
"0.63831586",
"0.6380128",
"0.6371337",
"0.63502324"
]
| 0.8964593 | 0 |
number of public repositories of a user | def n_public_repos(gh, user):
return getuser(gh, user).public_repos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def get_public_narrative_count():\n client_workspace = MongoClient(mongoDB_metrics_connection+to_workspace)\n db_workspace = client_workspace.workspace\n public_narrative_count = db_workspace.workspaceACLs.find({\"user\" : \"*\"}).count()\n return public_narrative_count;",
"def public_count() -> int:\n if DB.count['public'] == -1:\n return DB._get_count('public')\n return DB.count['public']",
"def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n \n return {\n \"public\": public_count,\n }",
"def get_public_images_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'public_images')",
"def test_repositories(self):\n\t\ttot_repos = total_repos(self)\n\t\t#self.assertEqual(tot_repos, \"6052353)",
"def count_revisions_by_user(self):\n return self.run_query(f\"count({self.r}/contributor[id = 5558])\")",
"def get_language_distribution(username):\n users_repos, api_calls = get_repos(username)\n \n language_count = {}\n \n for repo in users_repos:\n language_count[repo.language] = language_count.get(repo.language, 0) + 1\n \n return language_count, api_calls",
"def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos",
"def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n private_count = Image.objects.filter(uploader=uploader, is_private=True, is_profile_image=False).count()\n liked_count = Image.objects.filter(likes__id=uploader).count()\n \n return {\n \"public\": public_count,\n \"private\": private_count,\n \"liked\": liked_count,\n }",
"def get_count(owner, repo_slug, auth_tokens, endpoint):\n count_url = make_req_url(owner, repo_slug, endpoint, 0)\n response = send_bitbucket_request(count_url, auth_tokens)\n if response and 'count' in response:\n return response['count']-1\n return 0",
"def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val",
"def test_get_github_repos_count_positive(self):\n self.assertIsNotNone(app.get_github_repos_count(\"dhh\")[\"count\"])",
"def get_user_content_count(user_id, start_index=0):\n # Define the variable to track the total content count and structure the user URI\n total_count = 0\n user_uri = f\"{base_url}/people/{user_id}\"\n\n # Get the content count for the first 100 results and increment the total count accordingly\n content_count = _get_paginated_content_count(user_uri, start_index)\n total_count += content_count\n\n # Continue rolling through the user content until all assets have been identified\n while content_count > 0:\n start_index += 100\n content_count = _get_paginated_content_count(user_uri, start_index)\n total_count += content_count\n return total_count",
"def get_total_commits_per_user(commits):\n return get_total_contributions_per_user(commits, 'author')",
"def get_historic_users_count():\n return User.objects.all().count()",
"def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response",
"def private_count() -> int:\n if DB.count['private'] == -1:\n return DB._get_count('private')\n return DB.count['private']",
"def people_count(self):\n return len(self.__users)",
"def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})",
"def get_user_contributions(self, project):\n user = self.context.get('user')\n if not user.is_anonymous():\n return project.observations.filter(creator=user).count()\n else:\n return 0",
"def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])",
"def get_total_issues_per_user(issues):\n return get_total_contributions_per_user(issues, 'user')",
"def GetNumberOfRepoMetas(language: scrape_repos_pb2.LanguageToClone) -> int:\n path = pathlib.Path(language.destination_directory)\n if path.is_dir():\n return len([x for x in path.iterdir() if x.suffix == '.pbtxt'])\n else:\n return 0",
"def _get_count(visibility: str) -> int:\n if visibility == 'private':\n queue = gql(count_private)\n else:\n queue = gql(count_public)\n result = DB.client.execute(queue)\n count: int = int(result[f'flickr_{visibility}_aggregate']['aggregate']['count'])\n DB.count[visibility] = count\n return count",
"def get_buildings_for_user_count(user):\n return BuildingSnapshot.objects.filter(\n super_organization__in=user.orgs.all(),\n canonicalbuilding__active=True,\n ).count()",
"def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users",
"def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)"
]
| [
"0.6931196",
"0.69251525",
"0.6906984",
"0.6766805",
"0.66190344",
"0.6315404",
"0.63035226",
"0.629744",
"0.6281933",
"0.62044704",
"0.6178291",
"0.61447835",
"0.6059186",
"0.6017082",
"0.59982175",
"0.5984747",
"0.59217376",
"0.5917715",
"0.5898999",
"0.5878044",
"0.58628654",
"0.5838098",
"0.58215284",
"0.5793212",
"0.57864636",
"0.5784655",
"0.5771481",
"0.57484186",
"0.574405",
"0.57356817"
]
| 0.8427448 | 0 |
Applying the same augmentation to image and its corresponding mask | def augment(image,masks):
# Random horizontal flipping
if random.random() > 0.5:
image = TF.hflip(image)
masks = TF.hflip(masks)
# Random vertical flipping
if random.random() > 0.5:
image = TF.vflip(image)
masks = TF.vflip(masks)
return image,masks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def img_and_mask_augmentation(augmentation, img, mask):\n\n # img_copy = img.copy()\n image_shape = img.shape\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n mask_aug = det.augment_image(mask, hooks=imgaug.HooksImages(activator=hook))\n mask_aug = mask_aug.astype(np.bool)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, mask_aug",
"def image_augmentation(img):\n return np.fliplr(img)",
"def _augment(img):\r\n return flip(img, axis=2)",
"def _augment(img):\n return flip(img, axis=2)",
"def data_augmentation(image_data, mask_data, rotate=False, vertical_flip=False, horizontal_flip=False):\n aug_images = []\n aug_masks = []\n\n for _ in range(len(image_data)):\n if rotate:\n rotation = A.RandomRotate90(p=1)\n rotated_data = rotation(image=image_data[_], mask=mask_data[_])\n rotated_image = rotated_data['image']\n rotated_mask = rotated_data['mask']\n aug_images.append(rotated_image)\n aug_masks.append(rotated_mask)\n\n if vertical_flip:\n flip_v = A.VerticalFlip(p=1)\n vertical_data = flip_v(image=image_data[_], mask=mask_data[_])\n vertical_image = vertical_data['image']\n vertical_mask = vertical_data['mask']\n aug_images.append(vertical_image)\n aug_masks.append(vertical_mask)\n\n if horizontal_flip:\n flip_h = A.HorizontalFlip(p=1)\n horizontal_data = flip_h(image=image_data[_], mask=mask_data[_])\n horizontal_image = horizontal_data['image']\n horizontal_mask = horizontal_data['mask']\n aug_images.append(horizontal_image)\n aug_masks.append(horizontal_mask)\n\n nd_images = make_ndarray(aug_images)\n nd_masks = make_ndarray(aug_masks)\n #nd_images = np.zeros((len(aug_images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)\n #nd_masks = np.zeros((len(aug_masks), IMG_HEIGHT, IMG_WIDTH), dtype=np.float32)\n\n #for _ in range(len(aug_images)): # Load into ndarray\n # nd_images[_] = aug_images[_]\n # nd_masks[_] = aug_masks[_] # load mask without channel variable\n\n return nd_images, nd_masks",
"def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()",
"def augment(self, image):\n pass",
"def img_augmentation(augmentation, img, bbox):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_augmentation(augmentation, img, bbox)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox",
"def get_augmentation_sequence():\n # Macro to apply something with 50% chance\n sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 50%\n rarely = lambda aug: iaa.Sometimes(0.1, aug) # 10%\n\n # Augmentation applied to every image\n # Augmentors sampled one value per channel\n aug_sequence = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.5), # vertically flip 50% of all images\n\n # crop images by -0.25% to 0.25% of their height/width\n # positive values crop the image, negative pad\n sometimes(iaa.CropAndPad(\n percent=(-0.25, 0.25),\n pad_mode=['constant', 'edge'], # pad with constant value of the edge value\n pad_cval=(0, 0) # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 0), # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n mode='constant' # ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # rarely(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),\n iaa.GaussianBlur((0, 3.0)),\n iaa.Add((-10, 10), per_channel=0.7), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)),\n # sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))\n ],\n random_order=True\n )\n\n return aug_sequence",
"def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter",
"def transform(self,image,masks,aug):\n # convert to PIL Image.\n PIL_convert = transforms.ToPILImage()\n image = PIL_convert(image)\n masks = PIL_convert(masks.astype(np.int32))\n # resize the image and masks\n resize = transforms.Resize(size=(512,512))\n image = resize(image)\n masks = resize(masks)\n # augmentation\n if aug is True:\n augment(image,masks)\n else:\n pass\n # Convert to Tensor\n image = TF.to_tensor(image)\n masks = TF.to_tensor(masks)\n\n return image,masks",
"def Adjust_Data(img,mask,feature_dict, normalize):\n ## Normalize image\n if normalize:\n img = Normalize_Image(img)\n\n ## Assume mask shape has 4 dimensions - mask is (batch, x, y, color-channel)\n ## color-channels are redundant, so just choose the first. \n mask = mask[:,:,:,0]\n \n ## Image_datagen performs interpolation when rotating, resulting in non-integer\n ## mask values. Round these back to integers before expanding the mask. \n mask = mask.round() \n mask = Expand_Mask(mask, feature_dict)\n #print(mask.shape, np.unique(mask, axis = 0))\n return (img,mask)",
"def __getitem__(self, idx):\n\n def load_image_mask(idx):\n img = cv2.imread(os.path.join(self.img_path, self.img_files[idx]))\n mask = cv2.imread(os.path.join(self.mask_path, self.img_files[idx]), cv2.IMREAD_GRAYSCALE)\n return img, mask\n\n # retrieve current image index and current augmentation index\n curr_img_idx, curr_augm_idx = self.__get_img_augm_idx__(idx)\n\n batch_img = []\n batch_mask = []\n\n img, mask = load_image_mask(curr_img_idx)\n batch_gen_iter = 0\n\n # generate AT MOST self.batch_size images\n\n while batch_gen_iter < self.batch_size:\n\n if curr_augm_idx < self.gen_count:\n\n # there are still augmentations to generate for current image\n # let's generate them\n\n if mask is None:\n print(f\"== WARNING: Image {self.img_files[curr_img_idx]}\" +\n f\"does not have corresponding mask in \\\"{self.mask_path}\\\"; skipping ==\")\n\n else:\n crop_res = self.crop_compose(image=img, mask=mask)\n augm_img, augm_mask = crop_res[\"image\"], crop_res[\"mask\"]\n\n if curr_augm_idx != 0 and self.augm:\n augm_res = self.augm_compose(image=augm_img, mask=augm_mask)\n augm_img, augm_mask = augm_res[\"image\"], augm_res[\"mask\"]\n\n # threshold and transform mask for NN model\n\n _, augm_mask = cv2.threshold(augm_mask, 127, 255, cv2.THRESH_BINARY)\n augm_mask = np.stack([(augm_mask == 255)], axis=-1).astype('float')\n\n # append augmented image and mask to batches\n\n batch_img.append(augm_img)\n batch_mask.append(augm_mask)\n\n curr_augm_idx += 1\n batch_gen_iter += 1\n\n else:\n\n # all augmentations for current images have been generated\n # move to next image\n\n curr_img_idx += 1\n curr_augm_idx = 0\n\n if curr_img_idx < len(self.img_files):\n img, mask = load_image_mask(curr_img_idx)\n else:\n break\n\n return np.array(batch_img), np.array(batch_mask)",
"def augment_img(img):\n img = random_hflip_img(img)\n img = cutout_img(img, size=12)\n img = zero_pad_and_crop_img(img)\n return img",
"def image_mask_augmentation(x, y, batch_size=4, transformations=None, seed=6):\n # Always perform some basic transformations\n if transformations is None:\n transformations = dict(\n rotation_range=10.0,\n height_shift_range=0.02,\n shear_range=5,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode=\"constant\"\n )\n\n datagen_x = ImageDataGenerator(**transformations)\n datagen_x.fit(x, augment=True, seed=seed)\n datagen_y = ImageDataGenerator(**transformations)\n datagen_y.fit(y, augment=True, seed=seed)\n\n x_aug = datagen_x.flow(x, batch_size=batch_size, seed=seed)\n y_aug = datagen_y.flow(y, batch_size=batch_size, seed=seed)\n\n generator = zip(x_aug, y_aug)\n\n return generator",
"def data_augmentation(self, img):\n new_img = img.astype(float)\n # random brightness - the mask bit keeps values from going beyond (0,255)\n value = np.random.randint(-28, 28)\n if value > 0:\n mask = (new_img[:, :, 0] + value) > 255\n if value <= 0:\n mask = (new_img[:, :, 0] + value) < 0\n new_img[:, :, 0] += np.where(mask, 0, value)\n # random shadow - full height, random left/right side, random darkening\n h, w = new_img.shape[0:2]\n mid = np.random.randint(0, w)\n factor = np.random.uniform(0.6, 0.8)\n if np.random.rand() > .5:\n new_img[:, 0:mid, 0] *= factor\n else:\n new_img[:, mid:w, 0] *= factor\n return (new_img.astype(np.uint8))",
"def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image",
"def augment_image(im):\n # First crop out the face to save reduce computation load\n bb = im.landmarks['bb'].lms\n bb_vec = bb.as_vector()\n bb_ul = (np.array([bb_vec[0], bb_vec[1]]) - bb.centre()) * 2\n bb_lr = (np.array([bb_vec[4], bb_vec[5]]) - bb.centre()) * 2\n ul = bb_ul + bb.centre()\n lr = bb_lr + bb.centre()\n im = im.crop(ul, lr, constrain_to_boundary=True)\n if im.pixels.shape[0] == 1:\n pix = np.zeros((3, im.pixels.shape[1], im.pixels.shape[2]))\n pix[:,] = im.pixels\n im.pixels = pix\n\n beta = 0.3\n cx = np.random.uniform(-beta, beta)\n cy = np.random.uniform(-beta, beta)\n fx = 1.0\n fy = np.random.uniform(0.6, 1.4)\n max_rotation = 30\n theta = np.random.uniform(-max_rotation, max_rotation)\n\n rotation = menpo.transform.Rotation.init_from_2d_ccw_angle(theta)\n shear = menpo.transform.Affine(np.array([[1, cx, 0],[cy, 1, 0], [0,0,1]]))\n scale = menpo.transform.Affine(np.array([[fx, 0, 0],[0, fy, 0], [0,0,1]]))\n T = scale.compose_after(shear).compose_after(rotation)\n\n t_im = im.transform_about_centre(T)\n\n t_im = add_color_jetting(t_im)\n t_im = add_occlusion(t_im)\n\n\n new_bb = t_im.landmarks['PTS'].lms.bounding_box()\n\n #new_bb contains the gt bounding box\n augmented_bb = add_bb_noise(new_bb)\n augmented_bb = augmented_bb.reshape((4,2))\n augmented_bb = menpo.shape.PointCloud(augmented_bb)\n t_im.landmarks['bb'] = menpo.landmark.LandmarkGroup.init_with_all_label(augmented_bb)\n\n return t_im",
"def data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n for i in range(image.shape[0]):\n # For each image slice, generate random affine transformation parameters\n # using the Gaussian distribution\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # Apply the affine transformation (rotation + scale + shift) to the image\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],\n M[:, :2], M[:, 2], order=1)\n\n # Apply the affine transformation (rotation + scale + shift) to the label map\n label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],\n M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2",
"def apply_mask(im, im_pred):\n r_channel, g_channel, b_channel = cv2.split(im_pred)\n alpha_channel = 127 * np.ones(b_channel.shape, dtype=b_channel.dtype)\n # Make background pixels fully transparent\n alpha_channel -= 127 * np.all(im_pred == np.array([0, 0, 0]), axis=2).astype(b_channel.dtype)\n im_pred = cv2.merge((r_channel, g_channel, b_channel, alpha_channel))\n mask = Image.fromarray(im_pred, mode='RGBA')\n # masked_img = Image.fromarray(im)#array to image\n masked_img=im\n masked_img.paste(mask, box=None, mask=mask)\n # return np.array(masked_img)\n return masked_img",
"def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method=\"paste\", repeat=1):\n avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])\n \n output = np.zeros(orig_scene.shape)\n if method==\"seamlessclone\":\n width, height, _ = match_scene.shape\n center = (height/2, width/2)\n \n # create plain white mask\n mask = np.zeros(match_scene.shape, match_scene.dtype) + 255\n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = avg_pixel\n \n \n \n #image_to_compare\n output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), \n orig_scene_impute.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n #implot(output_blend)\n # now reapply the mask with alpha blending to fix it up again.\n \n \"\"\"\n TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE \n REAL MASK TO BLUR\n \"\"\"\n dilation_mask = mask_seam.copy()\n \n dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the \n #implot(dilation_mask, 'gray')\n \n dilation_mask = dilation_mask/255.0\n \n \n \n # 0 is black, 1 is white\n #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)\n #print dialation_mask\n #print dialation_mask.shape\n #print output_blend.shape\n #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)\n \n for _ in range(10):\n # some kind of layered alpha blend by the dilation mask values...\n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),\n cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)\n \n \n \n orig_scene_impute = orig_scene.copy()\n orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]\n output = cv2.seamlessClone(match_scene.astype(np.uint8), \n output_blend.astype(np.uint8), \n mask, center,cv2.NORMAL_CLONE)\n \n # complete blend with seamlessclone...\n \n \n # output = np.maximum(output_blend, orig_scene_impute)\n # or just darken...\n \n \n #if repeat == 1:\n # return output_blend\n #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method=\"paste\")\n \n\n\n elif method==\"paste\":\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n \n elif method==\"alphablend\":\n output_blend = output.copy()\n output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]\n output_blend[mask_seam != 0] = match_scene[mask_seam != 0]\n \n \n \n \n else:\n output[mask_seam == 0] = orig_scene[mask_seam == 0]\n output[mask_seam != 0] = match_scene[mask_seam != 0]\n return output",
"def overlay_mask(img, mask, transparency=0.5):\n im_over = np.ndarray(img.shape)\n im_over[:, :, 0] = (1 - mask[:, :, 0]) * img[:, :, 0] + mask[:, :, 0] * (\n 255 * transparency + (1 - transparency) * img[:, :, 0])\n im_over[:, :, 1] = (1 - mask[:, :, 1]) * img[:, :, 1] + mask[:, :, 1] * (\n 255 * transparency + (1 - transparency) * img[:, :, 1])\n im_over[:, :, 2] = (1 - mask[:, :, 2]) * img[:, :, 2] + mask[:, :, 2] * (\n 255 * transparency + (1 - transparency) * img[:, :, 2])\n return im_over",
"def image_augmentation(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n\n transform_list = [\n T.RandomCrop(crop_type=\"relative_range\", crop_size=[0.95, 0.87]),\n T.RandomBrightness(0.9, 1.5),\n T.RandomContrast(0.8, 1.6),\n T.RandomSaturation(1.0, 1.6),\n T.RandomRotation(angle=[15, 0, 5, 6, 15], expand=False),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.ResizeScale(1.0, 2.0, target_height=900, target_width=700)\n ]\n\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annotations = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n instances = utils.annotations_to_instances(annotations, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n return dataset_dict",
"def image_augmentations(\n image,\n data_augmentations,\n model_input_image_size,\n label=None):\n if image.get_shape() == None:\n im_size = model_input_image_size\n else:\n im_size = image.get_shape().as_list()\n im_size_check = True # np.any(\n # np.less_equal(\n # model_input_image_size[:2],\n # im_size[:2]))\n if data_augmentations is not None:\n for aug in data_augmentations:\n # Pixel/image-level augmentations\n if aug == 'image_float32':\n image = tf.cast(image, tf.float32)\n if aug == 'label_float32':\n label = tf.cast(label, tf.float32)\n if aug == 'bfloat16':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'singleton':\n image = tf.expand_dims(image, axis=-1)\n print 'Adding singleton dimension to image.'\n if aug == 'sgl_label' or aug == 'singleton_label':\n label = tf.expand_dims(label, axis=-1)\n print 'Adding singleton dimension to label.'\n if aug == 'coco_labels':\n label = tf.nn.relu(label - 91)\n if aug == 'contrastive_loss':\n label = tf.stack(\n [tf.ones_like(label), tf.zeros_like(label)], -1)\n if aug == 'bsds_normalize':\n data = np.load(\n '/media/data_cifs/image_datasets/BSDS500/images/train/file_paths.npz')\n mean = data['mean'].squeeze(0)\n stds = data['stds'].squeeze(0)\n image = (image - mean) / stds\n if aug == 'bsds_crop' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n [1., 1, 1.1, 1.2])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_nearest_neighbor(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize,\n tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n print 'Applying BSDS crop.'\n if aug == 'hed_resize' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n np.arange(1, 1.51, 0.1)) # 0.7, 1.5\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bilinear(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n print 'Applying HED resize.'\n if aug == 'uint8_rescale':\n image = tf.cast(image, tf.float32) / 255.\n print 'Applying uint8 rescale to the image.'\n if aug == 'cube_plus_rescale':\n image = tf.cast(image, tf.float32) / 13273.\n print 'Applying uint8 rescale to the image.'\n if aug == 'uint8_rescale_label':\n label = tf.cast(label, tf.float32) / 255.\n print 'Applying uint8 rescale to the label.'\n if aug == 'uint8_rescale_-1_1':\n image = 2 * (tf.cast(image, tf.float32) / 255.) - 1\n print 'Applying uint8 rescale.'\n if aug == 'image_to_bgr':\n image = tf.stack(\n [image[..., 2], image[..., 1], image[..., 0]], axis=-1)\n if aug == 'pascal_normalize':\n image = image - [123.68, 116.78, 103.94]\n if aug == 'ilsvrc12_normalize':\n MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n image = (image - MEAN_RGB) / STDDEV_RGB\n if aug == 'random_contrast':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n print 'Applying random contrast.'\n if aug == 'random_brightness':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_brightness(image, max_delta=63.)\n print 'Applying random brightness.'\n if aug == 'grayscale' and im_size_check:\n # image = tf.image.rgb_to_grayscale(image)\n if len(image.get_shape().as_list()) == 2:\n image = tf.expand_dims(image, axis=-1)\n else:\n image = tf.expand_dims(image[..., 0], axis=-1)\n print 'Converting to grayscale.'\n if aug == 'rgb2gray' and im_size_check:\n image = tf.image.rgb_to_grayscale(image)\n print 'Converting rgb2gray.'\n if aug == 'clip_uint8' and im_size_check:\n image = tf.minimum(image, 255.)\n image = tf.maximum(image, 0.)\n if aug == 'cube_plus_crop':\n image = cube_plus_crop(image, model_input_image_size)\n # Affine augmentations\n if aug == 'rotate' and im_size_check:\n max_theta = 22.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'rotate90' and im_size_check:\n image = tf.image.rot90(\n image,\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n print 'Applying random 90 degree rotate.'\n if aug == 'rotate90_image_label' and im_size_check:\n concat = tf.image.rot90(\n tf.concat([image, label], -1),\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n image = concat[..., :im_size[-1]]\n label = concat[..., im_size[-1]:]\n print 'Applying random 90 degree rotate to images and labels.'\n if aug == 'stack3d':\n image = tf.concat([image, image, image], axis=-1)\n if aug == 'rot_image_label' and im_size_check:\n max_theta = 30.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n label = tf.contrib.image.transform(\n label,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'random_scale_crop_image_label'\\\n and im_size_check:\n scale_choices = tf.convert_to_tensor(\n [1., 1.04, 1.08, 1.12, 1.16])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n model_input_image_size[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bicubic(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize, tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n if aug == 'rc_res' and im_size_check:\n image = random_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying random crop and resize.'\n if aug == 'cc_res' and im_size_check:\n image = center_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying center crop and resize.'\n if aug == 'random_crop' and im_size_check:\n image = random_crop(image, model_input_image_size)\n print 'Applying random crop.'\n if aug == 'center_crop' and im_size_check:\n image = center_crop(image, model_input_image_size)\n print 'Applying center crop.'\n if aug == 'rc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='random')\n if aug == 'cc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='center')\n if aug == 'resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying area resize.'\n if aug == 'jk_resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = tf.image.resize_image_with_crop_or_pad(\n image,\n model_input_image_size[0],\n model_input_image_size[1])\n print 'Applying area resize.'\n if aug == 'random_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = random_crop(image, model_input_image_size)\n if aug == 'center_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = center_crop(image, model_input_image_size)\n if aug == 'res_and_crop' and im_size_check:\n model_input_image_size_1 = np.asarray(\n model_input_image_size[:2]) + 28\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size_1,\n f='area')\n image = center_crop(image, model_input_image_size)\n print 'Applying area resize.'\n if aug == 'res_nn' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'res_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying bilinear resize.'\n if aug == 'res_nn_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'left_right':\n image = image_flip(image, direction='left_right')\n print 'Applying random flip left-right.'\n if aug == 'up_down':\n image = image_flip(image, direction='up_down')\n print 'Applying random flip up-down.'\n if aug == 'lr_viz_flip':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_viz_flip(image, label)\n image, label = ud_viz_flip(image, label)\n if aug == 'lr_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_flip_image_label(image, label)\n if aug == 'ud_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = ud_flip_image_label(image, label)\n if aug == 'gratings_modulate':\n modulate = 10\n image //= modulate\n offset = (255 / 2) - ((255 / modulate) / 2)\n image += offset\n if aug == 'gaussian_noise':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 10.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'gaussian_noise_small':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 20.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'mixup':\n raise RuntimeError('Mixup not properly implemented yet.')\n alpha = 0.4\n dist = tf.distributions.Beta(alpha, alpha)\n image = image * dist + (1 - dist) * tf.roll(image, 0, 1)\n label = label * dist + (1 - dist) * tf.roll(label, 0, 1)\n if aug == 'hed_brightness':\n image = tf.image.random_brightness(image, 63)\n if aug == 'hed_contrast':\n image = tf.image.random_contrast(image, lower=0.4, upper=1.5)\n if aug == 'blur_labels':\n label = tf_blur(\n image=label,\n kernel_size=3, # extent\n name='label_blur',\n normalize=True,\n sigma=1.)\n if aug == 'calculate_rate_time_crop':\n im_shape = image.get_shape().as_list()\n minval = im_shape[0] // 3\n time_crop = tf.random_uniform(\n [],\n minval=minval,\n maxval=im_shape[0],\n dtype=tf.int32)\n\n # For now always pull from the beginning\n indices = tf.range(0, time_crop, dtype=tf.int32)\n selected_image = tf.gather(image, indices)\n padded_image = tf.zeros(\n [im_shape[0] - time_crop] + im_shape[1:],\n dtype=selected_image.dtype)\n\n # Randomly concatenate pad to front or back\n image = tf.cond(\n pred=tf.greater(\n tf.random_uniform(\n [],\n minval=0,\n maxval=1,\n dtype=tf.float32),\n 0.5),\n true_fn=lambda: tf.concat(\n [selected_image, padded_image], axis=0),\n false_fn=lambda: tf.concat(\n [padded_image, selected_image], axis=0)\n )\n image.set_shape(im_shape)\n\n # Convert label to rate\n label = label / im_shape[0]\n if aug == 'calculate_rate':\n label = label / image.get_shape().as_list()[0]\n print 'Applying rate transformation.'\n if aug == 'threshold':\n image = tf.cast(tf.greater(image, 0.1), tf.float32)\n print 'Applying threshold.'\n if aug == 'nonzero_label':\n label = tf.cast(tf.greater(label, 0.2), tf.float32)\n print 'Applying threshold.'\n if aug == 'zero_one':\n image = tf.minimum(tf.maximum(image, 0.), 1.)\n print 'Applying threshold.'\n if aug == 'timestep_duplication':\n image = tf.stack([image for iid in range(7)])\n print 'Applying timestep duplication.'\n if aug == 'per_image_standardization':\n image = tf.image.per_image_standardization(image)\n print 'Applying per-image zscore.'\n if aug == 'flip_image_polarity':\n image = tf.abs(image - 1.)\n if aug == 'flip_label_polarity':\n label = tf.abs(label - 1.)\n if aug == 'NCHW':\n image = tf.transpose(image, (2, 0, 1))\n if aug == 'bfloat16_image':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'bfloat16_label':\n label = tf.cast(label, tf.bfloat16)\n if aug == 'hfloat16_image':\n image = tf.cast(image, tf.float16)\n if aug == 'hfloat16_label':\n label = tf.cast(label, tf.float16)\n if aug == 'threshold_label':\n label = tf.cast(tf.greater(label, 0.999), tf.float32)\n print 'Applying threshold of 0.999 to the label.'\n if aug == 'threshold_label_255':\n # cABC label = tf.cast(tf.greater(label, 200), tf.float32)\n label = tf.cast(tf.greater(label, 10), tf.float32)\n print 'Applying threshold of 127.5 to the label.'\n if aug == 'normalize_label':\n label = tf.cast(label, tf.float32)\n label = label / tf.reduce_max(label) # tf.cast(tf.greater(label, 25), tf.float32)\n print 'Normalizing label to [0, 1].'\n if aug == 'scale_to_255':\n image = image * 255.\n if aug == 'clip_255':\n image = tf.maximum(tf.minimum(255., image), 0.)\n # else:\n # assert len(image.get_shape()) == 3, '4D not implemented yet.'\n # image = tf.image.resize_image_with_crop_or_pad(\n # image, model_input_image_size[0], model_input_image_size[1])\n return image, label",
"def add_image_mask(image, image_mask):\n try:\n # image1 * a + image2 * b + lambda\n # image1 and image2 must be the same shape.\n summed = cv2.addWeighted(image, 0.8, image_mask, 1, 0)\n return summed\n except:\n print(\"Unable to apply mask, Check the shape of mask and image\")",
"def augment(im_path):\n # change directory to toplevel of repo (parent of augmentation)\n os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])\n\n im_name, im_ext = os.path.splitext(im_path)\n if im_path not in os.listdir(\"data/raw\"):\n raise FileNotFoundError(f\"{im_path} could not be found in the list of raw images\")\n\n if im_name + \".json\" not in os.listdir(\"data/corrected\"):\n raise FileNotFoundError(f\"{im_name} has not been labelled yet! (no file '{im_name}.json' in corrected)\")\n\n with open(f\"data/corrected/{im_name}.json\") as read_file:\n im_label = json.loads(read_file.read(-1))\n persp = np.float32(im_label[\"perspective\"])\n\n im: Image.Image = Image.open(f\"data/raw/{im_path}\")\n # downscale image to reasonable height\n scale_factor = 500 / im.height\n persp = persp * scale_factor\n im.thumbnail([1000000, 500])\n im_cv = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n # determine crop box\n crop_amount = (im.width - 500)\n left_crop = random.randint(crop_amount//4, 3 * crop_amount // 4)\n # left_crop = crop_amount//2\n right_crop = crop_amount - left_crop\n box = [\n left_crop,\n 0,\n im.width - right_crop,\n im.height\n ]\n\n # warp perspective\n # basic way: add gaussian noise to the 4 corner points\n warped_persp = persp.copy()\n for i in range(4):\n for j in range(2):\n v = warped_persp[i][j]\n v += random.gauss(0, 5)\n # ensure none of the perspective points will fall outside the cropped image\n v = max(box[j] + 5, v)\n v = min(box[j+2] - 5, v)\n warped_persp[i][j] = v\n\n matrix = cv2.getPerspectiveTransform(persp, warped_persp)\n warped_im = cv2.warpPerspective(im_cv, matrix, (im.width, im.height))\n warped_im = Image.fromarray(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))\n\n # run crop on warped image\n warped_im = warped_im.crop(box)\n # adjust warped coordinates according to crop\n for i in range(4):\n warped_persp[i][0] -= box[0]\n warped_persp[i][1] -= box[1]\n\n # scale down to final size\n warped_im = warped_im.resize((256, 256))\n for i in range(4):\n warped_persp[i][0] *= 256 / 500\n warped_persp[i][1] *= 256 / 500\n\n # adjust image colour balance, saturation and contrast\n warped_im = ImageEnhance.Color(warped_im).enhance(random.uniform(0.9, 1.2))\n warped_im = ImageEnhance.Contrast(warped_im).enhance(random.uniform(0.8, 1.2))\n warped_im = ImageEnhance.Brightness(warped_im).enhance(random.uniform(0.8, 1.2))\n\n # adjust image temperature\n # thanks to Mark Ransom (https://stackoverflow.com/a/11888449)\n temp_r, temp_g, temp_b = random.choice(KELVIN_TABLE)\n convert_matrix = (temp_r / 255.0, 0.0, 0.0, 0.0,\n 0.0, temp_g / 255.0, 0.0, 0.0,\n 0.0, 0.0, temp_b / 255.0, 0.0)\n warped_im = warped_im.convert(\"RGB\", convert_matrix)\n\n # add noise\n noise_strength = random.uniform(5, 10)\n warped_im_arr = np.float64(np.array(warped_im))\n warped_im_arr += np.random.normal(0, noise_strength, warped_im_arr.shape)\n warped_im_arr = np.clip(warped_im_arr, 0, 255)\n warped_im = Image.fromarray(np.uint8(warped_im_arr))\n\n fname = f\"{im_name}-{hex(random.randint(2**20, 2**24))[2:]}\"\n warped_im.save(f\"data/augmented/{fname}{im_ext}\")\n with open(f\"data/augmented/{fname}.json\", \"w\") as write_file:\n data = {\n \"darts\": im_label[\"darts\"],\n \"perspective\": warped_persp.tolist()\n }\n write_file.write(json.dumps(data))\n return warped_im, warped_persp",
"def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im",
"def mask_image(image):\n pass",
"def plot_with_augmentation(image, mask, augment):\n augmented = augment(image=image, mask=mask)\n image_flipped = augmented[\"image\"]\n mask_flipped = augmented[\"mask\"]\n visualize(image_flipped, mask_flipped, original_image=image, original_mask=mask)",
"def _preprocessing(mask, mapping, image) -> np.ndarray:\n # TODO: Turn mapping into generic function.\n processed_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n processed_image = cv.remap(processed_image, *mapping, cv.INTER_LINEAR)\n processed_image[~mask] = 255\n return processed_image"
]
| [
"0.7971662",
"0.6987494",
"0.6844219",
"0.6818389",
"0.6804014",
"0.67105186",
"0.6666665",
"0.6547314",
"0.65413684",
"0.65087134",
"0.64880174",
"0.645089",
"0.6445864",
"0.6416765",
"0.6411415",
"0.6388375",
"0.63562083",
"0.63396513",
"0.6335781",
"0.6261895",
"0.62515956",
"0.6217558",
"0.6216206",
"0.61646235",
"0.6155791",
"0.61352986",
"0.6125493",
"0.61221105",
"0.61074775",
"0.6099615"
]
| 0.72530645 | 1 |
Function to read the image and mask and return a sample of dataset when neededself. | def __getitem__(self,image_id):
# read the image
image_path = (os.path.join(self.dataset_dir,self.list_dir[image_id],"images/{}.png".format(self.list_dir[image_id])))
image = io.imread(image_path)
# read the mask
mask_dir = os.path.join(self.dataset_dir,self.list_dir[image_id],'masks')
masks_list = []
for i, f in enumerate (next(os.walk(mask_dir))[2]):
if f.endswith ('.png'):
m = io.imread(os.path.join(mask_dir,f)).astype(np.bool)
m = m[:,:,0]
masks_list.append(m)
#combine all the masks corresponding of an invidual sample image into single binary mask
if len(masks_list) != 1:
masks = np.logical_or(masks,masks_list[i])
else:
masks = masks_list[i]
# do the transforms..
trans_img,trans_masks = self.transform(image,masks,self.aug)
sample = {"image":trans_img,"masks":trans_masks}
return(sample) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, idx):\n img_path = self.img_labels.iloc[idx, 0]\n mask_path = self.img_labels.iloc[idx, 1]\n\n image = _load_img(img_path)\n image = np.array(image)\n image = torch.from_numpy(image)\n \n mask = _load_img(mask_path)\n mask = np.array(mask)\n mask = torch.from_numpy(mask)\n \n sample = (image, mask)\n\n return sample",
"def reader(self, idx):\n # Get the path of input image and groundtruth mask.\n input_path, gtmask_path = self.imgs[idx]\n input_img, gt_img = self.loader(input_path, gtmask_path)\n return input_img, gt_img",
"def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)",
"def loader(self, input_path, mask_path):\n input_image = cv2.imread(input_path)\n # h, w = input_image.shape\n # print(\"input_image:\", h, w)\n # gt_mask = cv2.imread(mask_path)\n # bgr --> rgb\n # # input_image = input_image[:, :, ::-1]\n # gt_mask = gt_mask[:, :, ::-1]\n\n # the gt_mask should be gray image\n gt_mask = cv2.imread(mask_path, 0)\n # h, w = gt_mask.shape\n # print(\"gt_mask:\", h, w)\n\n # randomly horizontal flip\n input_image, gt_mask = horizontal_flip(input_image, gt_mask, axis=1)\n\n # randomly scale\n scale = np.random.uniform(low=0.5, high=2.0, size=1)\n input_image, gt_mask = rescale(input_image, gt_mask, scale)\n\n input_image = cv2.resize(input_image, (self.img_width, self.img_height), interpolation=cv2.INTER_LINEAR)\n gt_mask = cv2.resize(gt_mask, (self.img_width, self.img_height), interpolation=cv2.INTER_NEAREST)\n # print('input_image:', input_image.shape) # -> (512, 512, 3)\n # print('gt_mask:', gt_mask.shape) # -> (512, 512, 3)\n gt_mask = np.expand_dims(gt_mask, axis=-1)\n return input_image, gt_mask",
"def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y",
"def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y",
"def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'",
"def __getitem__(self, idx):\n im = Image.open(self.data_path + self.sample_df.loc[idx,'filename'])\n mask = Image.open(self.data_path + self.sample_df.loc[idx,'mask_filename'])\n semi_label = torch.tensor(self.sample_df.loc[idx,'semi_label'])\n\n im1, _ = self.transform(im, mask)\n im2, _ = self.transform(im, mask)\n\n return im1, im2, semi_label, idx",
"def get_sample_mask(self):",
"def load_image(image_dataset: ImageDataset,\n sample_id: int) -> np.array:\n img = imread(image_dataset.image_paths[sample_id])\n mask = imread(image_dataset.roi_paths[sample_id])\n mask = (1 - (mask > 0).astype(np.uint8))\n img = img * np.repeat(np.expand_dims(mask, axis=-1), 3, axis=-1)\n img = np.transpose(img, (1, 0, 2))\n return img",
"def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData",
"def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert 16 bit mask to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)\n bin_mask = get_bin_mask(mask)\n n_instance = bin_mask.shape[-1]\n return bin_mask, np.ones([n_instance], dtype=np.int32)",
"def get_dataset(path: str):\n image_path = os.path.join(path, \"images\")\n mask_path = os.path.join(path, \"masks\")\n image_files = []\n mask_files = []\n\n if os.path.isdir(image_path) and os.path.isdir(mask_path):\n for (dir_path, _, filenames) in os.walk(image_path):\n for f in filenames:\n image_files.append(os.path.join(dir_path, f))\n\n for (dir_path, _, filenames) in os.walk(mask_path):\n for f in filenames:\n mask_files.append(os.path.join(dir_path, f))\n x = len(image_files)\n y = len(mask_files)\n\n if x != y:\n logger.warning(\n \"Found un-even numbers of x-y for dataset. x = %i, y = %i.\", x, y\n )\n\n if x == 0:\n logger.warning(\"Found 0 existing sets.\")\n\n return image_files, mask_files\n logger.info(\"Found %i sets in existing dataset.\", x)\n\n return image_files, mask_files\n\n logger.error(\"Could not locate x and y folder.\")\n sys.exit()",
"def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict",
"def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels",
"def __getitem__(self, idx):\n im = Image.open(self.data_path + self.sample_df.loc[idx,'filename'])\n # load label\n label = torch.tensor(self.sample_df.loc[idx,'abnormal_XR'])\n # load mask\n if self.load_mask:\n mask = Image.open(self.data_path + self.sample_df.loc[idx,'mask_filename'])\n else:\n mask = None\n\n # load semi-label\n if self.load_semilabels:\n semi_label = torch.tensor(self.sample_df.loc[idx, 'semi_label'])\n else:\n semi_label = None\n\n im, mask = self.transform(im, mask)\n\n return im, label, mask, semi_label, torch.tensor(idx)",
"def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y",
"def read_data_sets_label(data_dir, label):\n train_data, test_data = read_data_sets(data_dir, one_hot=False)\n train_mask = create_mask(train_data, label)\n test_mask = create_mask(test_data, label)\n return (train_data.images[train_mask], test_data.images[test_mask])",
"def get_test(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small2', preprocess)",
"def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)",
"def load_Data(img_path, mask_path):\n image_files = glob(os.path.join(img_path, '*.*'))\n mask_files = glob(os.path.join(mask_path, '*.*'))\n image_files.sort()\n mask_files.sort()\n images_list = []\n masks_list = []\n\n for _ in range(len(image_files)):\n image = cv2.imread(image_files[_])\n mask = cv2.imread(mask_files[_])\n images_list.append(image)\n masks_list.append(mask)\n\n return images_list, masks_list",
"def test_get_image(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n image = spine_data_loader.get_image(str(idx))\n assert image.shape == (256, 256, 1)\n assert image.min() == 0.0\n assert image.max() == 1.0\n assert image.dtype == 'float64'",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")",
"def _dataset(filename, filter, img_count=1000000):\n try:\n # Attempt to load the dataset.\n with np.load(filename) as data:\n X = data['arr_0']\n y = data['arr_1']\n except:\n # The dataset does not exist, so we regenerate.\n\n # Set up a sample of random images:\n sample_size = (img_count, 3, 3, 3) # 3x3 windows, each containing 3 channels\n images = np.random.random(sample_size)\n\n # The correct label for each \"image\" is the color at its center\n y = images[:, 1, 1, :]\n\n # Now we apply the filter to each of our images and store the filtered image\n print(\"Generating dataset:\")\n\n X = np.zeros(images.shape)\n\n for i in range(images.shape[0]):\n thisImg = images[i]\n filtered = filter.apply(thisImg)\n X[i] = filtered\n\n if (i + 1) % (img_count / 100) == 0:\n print(\"%s: %d%% done\" % (filename, 100 * (i + 1) / img_count))\n\n print(\"Dataset generation complete.\")\n\n np.savez(filename, X, y)\n\n return X[:img_count], y[:img_count]",
"def __call__(self):\n\n dataset = TextOnlyCocoAnnotation()\n\n with open(self.path) as read_file:\n\n json_loaded = json.load(read_file)\n\n for i, value in tqdm(json_loaded['imgs'].items()):\n image_path = os.path.join(os.path.dirname(self.path), 'train2014',\n value['file_name'])\n dataset_type = value['set']\n\n if dataset_type not in self.sets:\n print(dataset_type)\n continue\n\n for annotation_id in json_loaded['imgToAnns'][i]:\n annotation_value = json_loaded['anns'][str(annotation_id)]\n word_annotation = self.parse_annotation_instance(annotation_value)\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset",
"def __getitem__(self, index):\n img_name = self.files[self.split][index]\n msk_name = img_name.replace(\".bmp\", \".png\")\n\n image_path = os.path.join(self.root, self.split, img_name)\n label_path = os.path.join(self.root, self.split, msk_name)\n\n assert os.path.exists(os.path.join(label_path)), \\\n \"> Corresponding Mask: {} do not exist!!!\".format(msk_name)\n\n image = misc.imread(image_path)\n image = np.array(image, dtype=np.uint8)\n\n # image = Image.fromarray(image, mode='RGB')\n\n # bright_enhancer = ImageEnhance.Brightness(image)\n # image = bright_enhancer.enhance(1.25)\n #\n # con_enhancer = ImageEnhance.Contrast(image)\n # image = con_enhancer.enhance(1.75)\n\n # sharp_enhancer = ImageEnhance.Sharpness(image)\n # image = sharp_enhancer.enhance(2.25)\n\n # image = image.filter(ImageFilter.EMBOSS)\n\n # image = np.array(image, dtype=np.uint8)\n image = image[:, :, ::-1] # From RGB to BGR\n\n # Histogram Equalization\n # image[:, :, 0] = cv2.equalizeHist(image[:, :, 0])\n # image[:, :, 1] = cv2.equalizeHist(image[:, :, 1])\n # image[:, :, 2] = cv2.equalizeHist(image[:, :, 2])\n\n label = misc.imread(label_path, mode=\"L\")\n label[label > 0] = 1\n label = np.array(label, dtype=np.uint8)\n\n # data augmentation used in training\n if self.aug_ext is not None:\n image = self.aug_ext(image)\n if self.augmentations is not None:\n image, label = self.augmentations(image, label)\n\n if self.is_transform:\n image = self.transform(image)\n\n image = image.transpose(2, 0, 1) # From HWC to CHW (For PyTorch we use N*C*H*W tensor)\n return torch.from_numpy(image).float(), torch.from_numpy(label).long()",
"def _sample_single(self):\n ss = super(SuperResolutions, self)._sample_single()\n image = ss['data']\n # Down sample\n max_down_sample = max(self.data_down_sample, self.label_down_sample)\n if self.is_down_sample:\n images = []\n images.append(image)\n for i in range(max_down_sample):\n image = self.downsample(image)\n images.append(image)\n data = images[self.data_down_sample]\n label = images[self.label_down_sample]\n return {'data': data, 'label': label}",
"def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)",
"def load_data(self, data):\n self.image_name = data['name'] # here the name is set as the name of input patch.\n self.img = data['image']\n self.mask = data['mask']\n \n if self.mask.shape != self.img.shape:\n raise ValueError('The loaded mask shape has to be', self.img.shape)\n \n sha = tuple(range(self.img.ndim))\n re_sha = sha[-1:] + sha[:-1]\n \n self.img_ = u.np_to_torch(np.transpose(self.img, re_sha), bc_add=False).unsqueeze(0).type(self.dtype)\n self.mask_ = u.np_to_torch(np.transpose(self.mask, re_sha), bc_add=False).unsqueeze(0).type(self.dtype)\n \n # compute std on coarse data for skipping all-zeros patches\n input_std = torch.std(self.img_ * self.mask_).item()\n return input_std"
]
| [
"0.67121446",
"0.66772234",
"0.6363293",
"0.6307143",
"0.62401533",
"0.6154699",
"0.6122595",
"0.60832435",
"0.60519624",
"0.60349685",
"0.60263544",
"0.6025657",
"0.6024643",
"0.6020571",
"0.6012803",
"0.60097057",
"0.5902136",
"0.5896047",
"0.58942544",
"0.587289",
"0.5857914",
"0.585612",
"0.58551437",
"0.58207625",
"0.57973105",
"0.57876617",
"0.57824606",
"0.57779324",
"0.5775561",
"0.57543814"
]
| 0.7232249 | 0 |
finds columns in the dataframe with zero variance ie those with the same value in every observation. | def find_zero_var(df):
toKeep = []
toDelete = []
for col in df:
if len(df[col].value_counts()) > 1:
toKeep.append(col)
else:
toDelete.append(col)
##
return {'toKeep':toKeep, 'toDelete':toDelete} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_zero_var(df): \n toKeep = []\n toDelete = []\n for col in df:\n if len(df[col].value_counts()) > 1:\n toKeep.append(col)\n else:\n toDelete.append(col)\n ##\n return {'toKeep':toKeep, 'toDelete':toDelete}",
"def test_drop_zero_variance_on_subset_columns(data):\n step = DropZVColumnsStep(['name', 'released'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns",
"def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns",
"def get_zeros(self):\n zero_values = self.df[self.col_name].isin([0]).sum(axis=0)\n return zero_values",
"def test_drop_zero_variance_columns_considering_NA_will_not_drop_any_column(data):\n step = DropZVColumnsStep()\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns",
"def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col",
"def drop_quasi_zero(df, thresh=0.05):\n drop_list = []\n for el in df.columns.values:\n non_zero = df[el][df[el] != 0].shape[0] / df.shape[0]\n if non_zero < thresh:\n drop_list.append(el)\n print('Dropping column: {} | Non-zero values ratio: {}%'.format(\n el, round(100 * non_zero, 3)))\n return df.drop(drop_list, axis=1)",
"def test_drop_zero_variance_columns_omiting_NA_will_drop_a_column(data):\n step = DropZVColumnsStep(naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns",
"def filter_rows_by_variance(df, max_=MAX_NUM_ROWS):\n top_rows = df.var(axis=1).nlargest(max_)\n return df.ix[top_rows.index]",
"def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()",
"def zero_one_card(df):\n unique_values = defaultdict()\n for col in df.columns:\n if df[col].nunique() < 2:\n unique_values[col] = df[col].nunique()\n if len(unique_values) > 0:\n printmd(str(\"* Columns: *\"+', '.join(list(unique_values.keys()))+\"* have less than two different values\"))\n for col in unique_values.keys():\n printmd(str('* *' + col + \"* has \" + str(df[col].nunique()) + ' differents values :' + str(df[col].unique())))\n else:\n printmd(\"* No columns have less than 2 different values\")",
"def remove_zero_features(df,no_zeros = 1):\n thing = df.astype(bool).sum(axis=0) # number of nonzeros in each column\n idx = pd.Index(thing) #Index format\n location = idx.get_loc(no_zeros) # Set all elements that are 1.0 to True, rest to False.\n loc_of_one = np.asarray(np.nonzero(location)) #Array of columns with only one nonzero element\n loc_of_one = loc_of_one[0]\n df_new = df.drop(df.columns[loc_of_one], axis = 1) # New reduced dataframe\n return df_new",
"def RemoveZeroVar(chain):\n return chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]",
"def variance(df, cols, dummy_col, generated_feature_name, params=None): \n group_cols = cols[:-1]\n calc_col = cols[-1]\n group = df[cols].groupby(by=group_cols)[[calc_col]].var().reset_index().rename(index=str, columns={calc_col: generated_feature_name}).fillna(0)\n dtype = {x: df[x].dtype for x in group_cols if x in df.columns.values}\n dtype[generated_feature_name] = utils.set_type(group[generated_feature_name], 'float')\n _df = df.merge(group.astype(dtype), on=group_cols, how='left')\n r = _df[[generated_feature_name]].copy()\n del dtype, _df, group\n gc.collect()\n module_logger.debug('feature generated: {}'.format(generated_feature_name))\n return r",
"def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)",
"def check_zero(col):\n return np.sum(col == 0.0)",
"def only_positive_values(df):\n\n\n only_positive_cols_bool = (df <= 0).any()\n only_positive_cols = only_positive_cols_bool[~only_positive_cols_bool].index\n positive_df = df[only_positive_cols]\n\n return positive_df",
"def RemoveZeroVar(chain):\n\treturn chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]",
"def get_cols_dummy():",
"def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_",
"def overfit_features(df):\r\n overfit = []\r\n for col in df.columns:\r\n counts = df[col].value_counts().iloc[0]\r\n if counts / len(df)*100 > 99.94:\r\n overfit.append(col)\r\n return overfit",
"def get_nan_columns(df):\n df = nan_val_summary(df)\n return df[df['fraction_missing'] > 0]['columns'].values",
"def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df",
"def check_missings(df):\n \n import numpy as np\n import pandas as pd\n \n #make a list of the variables that contain missing values\n vars_with_na = [var for var in df.columns if df[var].isnull().sum()>1]\n miss_pred = pd.isnull(df[vars_with_na]).sum().sort_values(ascending=False)\n # print the variable name and the percentage of missing values\n if not vars_with_na:\n print(\"There are no missing values\")\n\n else:\n for var in miss_pred.index:\n missing = np.round(df[var].isnull().mean(), 3)\n print(f\"{var} : {missing:.3%} missing values\")",
"def _find_Vgroups(self, X):\n na_value = X[self.V_features].isnull().sum()\n na_list = na_value.unique()\n na_value = na_value.to_dict()\n cols_same_null = []\n for i in range(len(na_list)):\n cols_same_null.append([k for k, v in na_value.items() if v == na_list[i]])\n return cols_same_null",
"def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var",
"def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )",
"def col_sds(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n from ..stats import sd\n return x.agg(sd, na_rm=na_rm)",
"def check_missing_data(df): \n df_lng = pd.melt(df) #Convert to long data\n null_variables = df_lng.value.isnull()\n \n return pd.crosstab(df_lng.variable, null_variables)",
"def VarianceFilter(X, data_headers, varCut=0.1):\n Xidx = np.var(X[data_headers].values, axis=1) > varCut\n return X.iloc[Xidx, :]"
]
| [
"0.74193525",
"0.6533197",
"0.62334067",
"0.62208056",
"0.61681634",
"0.6127774",
"0.60418195",
"0.59925526",
"0.597131",
"0.5882125",
"0.586426",
"0.57852906",
"0.57829046",
"0.57698643",
"0.57459974",
"0.57398176",
"0.56759185",
"0.55791897",
"0.55541277",
"0.55518734",
"0.55498666",
"0.5527349",
"0.54984325",
"0.5475967",
"0.5458889",
"0.53964067",
"0.5395861",
"0.5392873",
"0.53914595",
"0.53821266"
]
| 0.7350333 | 1 |
finds columns that are eother positively or negatively perfectly correlated (with correlations of +1 or 1), and creates a dict that includes which columns to drop so that each remaining column is independent | def find_perfect_corr(df):
corrMatrix = df.corr()
corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)
already_in = set()
result = []
for col in corrMatrix:
perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()
if perfect_corr and col not in already_in:
already_in.update(set(perfect_corr))
perfect_corr.append(col)
result.append(perfect_corr)
toRemove = []
for item in result:
toRemove.append(item[1:(len(item)+1)])
toRemove = sum(toRemove, [])
return {'corrGroupings':result, 'toRemove':toRemove} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}",
"def remove_highly_correlated_vars_fast(df, corr_limit=0.70):\r\n # Creating correlation matrix\r\n correlation_dataframe = df.corr().abs().astype(np.float16)\r\n # Selecting upper triangle of correlation matrix\r\n upper_tri = correlation_dataframe.where(np.triu(np.ones(correlation_dataframe.shape),\r\n k=1).astype(np.bool))\r\n # Finding index of feature columns with correlation greater than 0.95\r\n to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > corr_limit)]\r\n print();\r\n print('Highly correlated columns to remove: %s' %to_drop)\r\n return to_drop",
"def filter_collinearity(c, threshold):\n\t# ensure symmetric\n\tif c.shape[0] != c.shape[1]:\n\t\traise ValueError('input dataframe should be symmetrical in dimensions')\n\n\t# init drops list\n\tdrops = []\n\tmacor = [] # mean abs corrs\n\tcorrz = [] # the correlations\n\n\t## Iterate over each feature\n\tfinished = False\n\twhile not finished:\n\n\t\t# Whenever there's a break, this loop will start over\n\t\tfor i,nm in enumerate(c.columns):\n\t\t\tthis_col = c[nm].drop(nm).sort_values(na_position='first') # gets the column, drops the index of itself, and sorts\n\t\t\tthis_col_nms = this_col.index.tolist()\n\t\t\tthis_col = np.array(this_col)\n\n\t\t\t# check if last value is over thresh\n\t\t\tmax_cor = this_col[-1]\n\t\t\tif pd.isnull(max_cor) or max_cor < threshold or this_col.shape[0] == 1:\n\t\t\t\tif i == c.columns.shape[0] - 1:\n\t\t\t\t\tfinished = True\n\n\t\t\t\t# control passes to next column name or end if finished\n\t\t\t\tcontinue\n\n\t\t\t# otherwise, we know the corr is over the threshold\n\t\t\t# gets the current col, and drops the same row, sorts asc and gets other col\n\t\t\tother_col_nm = this_col_nms[-1]\n\t\t\tthat_col = c[other_col_nm].drop(other_col_nm)\n\n\t\t\t# get the mean absolute correlations of each\n\t\t\tmn_1, mn_2 = np.nanmean(this_col), np.nanmean(that_col)\n\n\t\t\t# we might get nans?\n\t\t\t# if pd.isnull(mn_1) and pd.isnull(mn_2):\n\t\t\t\t# this condition is literally impossible, as it would\n\t\t\t\t# require every corr to be NaN, and it wouldn't have\n\t\t\t\t# even gotten here without hitting the continue block.\n\t\t\tif pd.isnull(mn_1):\n\t\t\t\tdrop_nm = other_col_nm\n\t\t\telif pd.isnull(mn_2):\n\t\t\t\tdrop_nm = nm\n\t\t\telse:\n\t\t\t\tdrop_nm = nm if mn_1 > mn_2 else other_col_nm\n\n\t\t\t# drop the bad col, row\n\t\t\tc.drop(drop_nm, axis=1, inplace=True)\n\t\t\tc.drop(drop_nm, axis=0, inplace=True)\n\n\t\t\t# add the bad col to drops\n\t\t\tdrops.append(drop_nm)\n\t\t\tmacor.append(np.maximum(mn_1, mn_2))\n\t\t\tcorrz.append(_MCFTuple(\n\t\t\t\t\tfeature_x=drop_nm,\n\t\t\t\t\tfeature_y=nm if not nm == drop_nm else other_col_nm,\n\t\t\t\t\tabs_corr=max_cor,\n\t\t\t\t\tmac=macor[-1]\n\t\t\t\t))\n\n\t\t\t# if we get here, we have to break so the loop will \n\t\t\t# start over from the first (non-popped) column\n\t\t\tbreak\n\n\t\t# if not finished, restarts loop, otherwise will exit loop\n\n\t# return\n\treturn drops, macor, corrz",
"def correlation_drop(df, threshold):\n df_copy = df.copy()\n col_corr = set()\n\n corr_matrix = df_copy.corr()\n\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i]\n col_corr.add(colname)\n if colname in df_copy.columns:\n del df_copy[colname]\n print(col_corr)\n return df_copy",
"def drop_corr_columns(df, drop_columns=True, print_columns=True, threshold=0.98):\n\n # 1. calculation\n CorrCoeff = df.corr()\n\n # 2. report\n CorrFieldsList = []\n print('Columns with correlations more than %s :' % str(threshold))\n for i in CorrCoeff:\n for j in CorrCoeff.index[CorrCoeff[i] >= threshold]:\n if i != j and j not in CorrFieldsList:\n CorrFieldsList.append(j)\n if print_columns:\n print(\"%s-->%s: r^2=%f\" % (i, j, CorrCoeff[i][CorrCoeff.index == j].values[0]))\n #print()\n #print('Correlated columns count: %', len(CorrFieldsList))\n\n # 3. dropping\n if drop_columns:\n print('%s columns total' % df.shape[1])\n df = df.drop(CorrFieldsList, 1)\n print('%s columns left' % df.shape[1])\n\n return df",
"def remove_multicollinearity_correlation(data: pd.DataFrame, threshold: Optional[float] = 0.8) -> pd.DataFrame:\n corr_data = pd.DataFrame(np.triu(np.abs(data.corr())), columns=data.columns)\n\n multicoll_columns = np.logical_and(corr_data >= threshold, corr_data < 1.0).any()\n return data.loc[:, ~multicoll_columns]",
"def correl_vars(ds,cutoff=0.65, is_cor_mat_return=True):\n cor_mat = ds.corr() # correl matrix\n \n var1 = []; var2 = []\n for i in range(len(cor_mat.columns)):\n for j in range(len(cor_mat.index)):\n if (abs(cor_mat.iloc[i,j]) > cutoff) & (i>j):\n var1.append(cor_mat.columns[i]); var2.append(cor_mat.index[j])\n \n high_cor_var = list(zip(var1,var2)) # correls vars list\n \n # Getting VIF's\n inv_corr_mat = np.linalg.inv(corr_mat)\n vif = pd.DataFrame(np.diag(inv_corr_mat), index=df.columns).reset_index().rename(columns={'index':'Parameter',0:'VIF'}).sort_values(by = ['VIF'],ascending=False, ignore_index=True)\n \n # Other way by using statsmodels package : added intercept using add_constant as statmodels doesn't include it by default\n# from statsmodels.stats.outliers_influence import variance_inflation_factor\n# from statsmodels.tools.tools import add_constant\n# vif = pd.DataFrame([variance_inflation_factor(add_constant(ds).values, i) for i in range(add_constant(ds).shape[1])], \\\n# index=add_constant(ds).columns, columns=['VIF']).reset_index().rename(columns={'index':'Parameter'}).drop(index=0).sort_values(by = ['VIF'],ascending=False, ignore_index=True)\n \n if is_cor_mat_return :\n correl_dict = {'correl_matrix':cor_mat, 'Correl_vars' : high_cor_var, 'vif':vif}\n return correl_dict\n else :\n correl_dict = {'Correl_vars' : high_cor_var, 'vif':vif}\n return correl_dict",
"def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]",
"def remove_correlated_features(x, threshold=0.9):\n x_copy = np.copy(x)\n \n corr_matrix = np.corrcoef(x_copy, rowvar=False)\n # Set to False highly correlated columns\n nb_col = len(corr_matrix)\n columns = np.full((nb_col,), True, dtype=bool)\n for i in range(nb_col):\n for j in range(i+1, nb_col):\n if corr_matrix[i, j] >= threshold:\n if columns[i]:\n columns[j] = False\n \n # Remove correlated features and concat categorical features\n return x_copy[:, columns], columns",
"def FE_remove_variables_using_SULOV_method(df, numvars, modeltype, target,\r\n corr_limit = 0.70,verbose=0):\r\n df = copy.deepcopy(df)\r\n ### for some reason, doing a mass fillna of vars doesn't work! Hence doing it individually!\r\n null_vars = np.array(numvars)[df[numvars].isnull().sum()>0]\r\n for each_num in null_vars:\r\n df[each_num].fillna(0,inplace=True)\r\n target = copy.deepcopy(target)\r\n print('Searching for highly correlated variables from %d variables using SULOV method' %len(numvars))\r\n print('##### SULOV : Searching for Uncorrelated List Of Variables (takes time...) ############')\r\n correlation_dataframe = df[numvars].corr().abs().astype(np.float16)\r\n ######### This is how you create a dictionary of which var is highly correlated to a list of vars ####\r\n corr_values = correlation_dataframe.values\r\n col_index = correlation_dataframe.columns.tolist()\r\n index_triupper = list(zip(np.triu_indices_from(corr_values,k=1)[0],np.triu_indices_from(\r\n corr_values,k=1)[1]))\r\n high_corr_index_list = [x for x in np.argwhere(abs(corr_values[np.triu_indices(len(corr_values), k = 1)])>=corr_limit)]\r\n low_corr_index_list = [x for x in np.argwhere(abs(corr_values[np.triu_indices(len(corr_values), k = 1)])<corr_limit)]\r\n tuple_list = [y for y in [index_triupper[x[0]] for x in high_corr_index_list]]\r\n correlated_pair = [(col_index[tuple[0]],col_index[tuple[1]]) for tuple in tuple_list]\r\n corr_pair_dict = dict(return_dictionary_list(correlated_pair))\r\n keys_in_dict = list(corr_pair_dict.keys())\r\n reverse_correlated_pair = [(y,x) for (x,y) in correlated_pair]\r\n reverse_corr_pair_dict = dict(return_dictionary_list(reverse_correlated_pair))\r\n for key, val in reverse_corr_pair_dict.items():\r\n if key in keys_in_dict:\r\n if len(key) > 1:\r\n corr_pair_dict[key] += val\r\n else:\r\n corr_pair_dict[key] = val\r\n #### corr_pair_dict is used later to make the network diagram to see which vars are correlated to which\r\n # Selecting upper triangle of correlation matrix ## this is a fast way to find highly correlated vars\r\n upper_tri = correlation_dataframe.where(np.triu(np.ones(correlation_dataframe.shape),\r\n k=1).astype(np.bool))\r\n empty_df = upper_tri[abs(upper_tri)>corr_limit]\r\n ### if none of the variables are highly correlated, you can skip this whole drawing\r\n if empty_df.isnull().all().all():\r\n print(' No highly correlated variables in data set to remove. All selected...')\r\n return numvars\r\n #### It's important to find the highly correlated features first #############\r\n lower_tri = correlation_dataframe.where(np.tril(np.ones(correlation_dataframe.shape),\r\n k=-1).astype(np.bool))\r\n lower_df = lower_tri[abs(lower_tri)>corr_limit]\r\n corr_list = empty_df.columns[[not(empty_df[x].isnull().all()) for x in list(empty_df)]].tolist(\r\n )+lower_df.columns[[not(lower_df[x].isnull().all()) for x in list(lower_df)]].tolist()\r\n corr_list = find_remove_duplicates(corr_list)\r\n ###### This is for ordering the variables in the highest to lowest importance to target ###\r\n if len(corr_list) == 0:\r\n final_list = list(correlation_dataframe)\r\n print('Selecting all (%d) variables since none of them are highly correlated...' %len(numvars))\r\n return numvars\r\n else:\r\n if isinstance(target, list):\r\n target = target[0]\r\n max_feats = len(corr_list)\r\n if modeltype == 'Regression':\r\n sel_function = mutual_info_regression\r\n fs = SelectKBest(score_func=sel_function, k=max_feats)\r\n else:\r\n sel_function = mutual_info_classif\r\n fs = SelectKBest(score_func=sel_function, k=max_feats)\r\n ##### you must ensure there are no null values in corr_list df ##\r\n try:\r\n fs.fit(df[corr_list].astype(np.float16), df[target])\r\n except:\r\n fs.fit(df[corr_list].astype(np.float32), df[target])\r\n try:\r\n mutual_info = dict(zip(corr_list,fs.scores_))\r\n #### The first variable in list has the highest correlation to the target variable ###\r\n sorted_by_mutual_info =[key for (key,val) in sorted(mutual_info.items(), key=lambda kv: kv[1],reverse=True)]\r\n ##### Now we select the final list of correlated variables ###########\r\n selected_corr_list = []\r\n #### You have to make multiple copies of this sorted list since it is iterated many times ####\r\n orig_sorted = copy.deepcopy(sorted_by_mutual_info)\r\n copy_sorted = copy.deepcopy(sorted_by_mutual_info)\r\n copy_pair = copy.deepcopy(corr_pair_dict)\r\n #### select each variable by the highest mutual info and see what vars are correlated to it\r\n for each_corr_name in copy_sorted:\r\n ### add the selected var to the selected_corr_list\r\n selected_corr_list.append(each_corr_name)\r\n for each_remove in copy_pair[each_corr_name]:\r\n #### Now remove each variable that is highly correlated to the selected variable\r\n if each_remove in copy_sorted:\r\n copy_sorted.remove(each_remove)\r\n ##### Now we combine the uncorrelated list to the selected correlated list above\r\n rem_col_list = left_subtract(list(correlation_dataframe),corr_list)\r\n final_list = rem_col_list + selected_corr_list\r\n removed_cols = left_subtract(numvars, final_list)\r\n except:\r\n print(' SULOV Method crashing due to memory error, trying alternative simpler method...')\r\n #### Dropping highly correlated Features fast using simple linear correlation ###\r\n removed_cols = remove_highly_correlated_vars_fast(train[numvars],corr_limit)\r\n final_list = left_subtract(numvars, removed_cols)\r\n if len(removed_cols) > 0:\r\n print(' Removing (%d) highly correlated variables:' %(len(removed_cols)))\r\n if len(removed_cols) <= 30:\r\n print(' %s' %removed_cols)\r\n if len(final_list) <= 30:\r\n print(' Following (%d) vars selected: %s' %(len(final_list),final_list))\r\n ############## D R A W C O R R E L A T I O N N E T W O R K ##################\r\n selected = copy.deepcopy(final_list)\r\n try:\r\n import networkx as nx\r\n except:\r\n print(' Python networkx library not installed. Install it for feature selection visualization.')\r\n return\r\n #### Now start building the graph ###################\r\n gf = nx.Graph()\r\n ### the mutual info score gives the size of the bubble ###\r\n multiplier = 2100\r\n for each in orig_sorted:\r\n gf.add_node(each, size=int(max(1,mutual_info[each]*multiplier)))\r\n ######### This is where you calculate the size of each node to draw\r\n sizes = [mutual_info[x]*multiplier for x in list(gf.nodes())]\r\n #### The sizes of the bubbles for each node is determined by its mutual information score value\r\n corr = df[corr_list].corr()\r\n high_corr = corr[abs(corr)>corr_limit]\r\n ## high_corr is the dataframe of a few variables that are highly correlated to each other\r\n combos = combinations(corr_list,2)\r\n ### this gives the strength of correlation between 2 nodes ##\r\n multiplier = 20\r\n for (var1, var2) in combos:\r\n if np.isnan(high_corr.loc[var1,var2]):\r\n pass\r\n else:\r\n gf.add_edge(var1, var2,weight=multiplier*high_corr.loc[var1,var2])\r\n ######## Now start building the networkx graph ##########################\r\n widths = nx.get_edge_attributes(gf, 'weight')\r\n nodelist = gf.nodes()\r\n cols = 5\r\n height_size = 5\r\n width_size = 15\r\n rows = int(len(corr_list)/cols)\r\n if rows < 1:\r\n rows = 1\r\n plt.figure(figsize=(width_size,min(20,height_size*rows)))\r\n pos = nx.shell_layout(gf)\r\n nx.draw_networkx_nodes(gf,pos,\r\n nodelist=nodelist,\r\n node_size=sizes,\r\n node_color='blue',\r\n alpha=0.5)\r\n nx.draw_networkx_edges(gf,pos,\r\n edgelist = widths.keys(),\r\n width=list(widths.values()),\r\n edge_color='lightblue',\r\n alpha=0.6)\r\n pos_higher = {}\r\n x_off = 0.04 # offset on the x axis\r\n y_off = 0.04 # offset on the y axis\r\n for k, v in pos.items():\r\n pos_higher[k] = (v[0]+x_off, v[1]+y_off)\r\n if len(selected) == 0:\r\n nx.draw_networkx_labels(gf, pos=pos_higher,\r\n labels=dict(zip(nodelist,nodelist)),\r\n font_color='black')\r\n else:\r\n nx.draw_networkx_labels(gf, pos=pos_higher,\r\n labels = dict(zip(nodelist,[x+' (selected)' if x in selected else x+' (removed)' for x in nodelist])),\r\n font_color='black')\r\n plt.box(True)\r\n plt.title(\"\"\"In SULOV, we repeatedly remove features with lower mutual info scores among highly correlated pairs (see figure),\r\n SULOV selects the feature with higher mutual info score related to target when choosing between a pair. \"\"\", fontsize=10)\r\n plt.suptitle('How SULOV Method Works by Removing Highly Correlated Features', fontsize=20,y=1.03)\r\n red_patch = mpatches.Patch(color='blue', label='Bigger circle denotes higher mutual info score with target')\r\n blue_patch = mpatches.Patch(color='lightblue', label='Thicker line denotes higher correlation between two variables')\r\n plt.legend(handles=[red_patch, blue_patch],loc='best')\r\n plt.show();\r\n ##### N E T W O R K D I A G R A M C O M P L E T E #################\r\n return final_list",
"def get_correlated_data_stats(\n data: np.array\n) -> Dict[str, float]:\n n_features = data.shape[1]\n corr = pd.DataFrame(data).corr()\n corr = np.array(corr)\n\n assert corr.shape[0] == corr.shape[1] == n_features\n\n pair_correlations = []\n for i in range(n_features):\n for j in range(n_features):\n if i > j:\n pair_correlations.append(corr[i, j])\n abs_pair_correlations = [abs(c) for c in pair_correlations]\n\n assert len(pair_correlations) == (n_features * n_features - n_features) / 2\n\n data_corr_stats = {\n \"correlation_min\": np.min(pair_correlations),\n \"correlation_max\": np.max(pair_correlations),\n \"correlation_median\": np.median(pair_correlations),\n \"correlation_mean\": np.mean(pair_correlations),\n \"correlation_std\": np.std(pair_correlations),\n\n \"abs_correlation_min\": np.min(abs_pair_correlations),\n \"abs_correlation_max\": np.max(abs_pair_correlations),\n \"abs_correlation_median\": np.median(abs_pair_correlations),\n \"abs_correlation_mean\": np.mean(abs_pair_correlations),\n \"abs_correlation_std\": np.std(abs_pair_correlations)\n }\n return data_corr_stats",
"def find_correlation(df, method=\"pearson\", threshold=0.9):\n corr_matrix = df.corr(method=method).abs()\n corr_means = {k: corr_matrix[k].mean()\n for k in corr_matrix.columns.tolist()}\n corr_matrix.loc[:, :] = np.tril(corr_matrix, k=-1)\n\n correlated = {}\n for col in corr_matrix:\n corr_cols = corr_matrix[col][corr_matrix[col]\n >= threshold].index.tolist()\n corr_cols.append(col)\n\n if len(corr_cols) > 1:\n selected_cols = {k: corr_means[k] for k in corr_cols}\n\n selected_col = max(selected_cols, key=lambda k: selected_cols[k])\n correlated_col = corr_matrix.transpose()[selected_col].idxmax()\n correlated[selected_col] = (correlated_col,\n corr_matrix[correlated_col][selected_col])\n return correlated",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def removeAllCorrelations(self, removeReImCorrel = True):\n\t\tdim = len(self.coma)/2\n#\t#\tCMwrite(\"removeAllCorrelations\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif not i == j:\n\t\t\t\t\tself.coma[2*i ,2*j ] = 0.\t\t\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\t\t\t\tself.coma[2*i+1,2*j+1] = 0.\n\t\t\t\telif removeReImCorrel:\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}",
"def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df",
"def fast_corr(df, col_name):\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"The type of the input data must be dataframe.\")\n\n if not isinstance(col_name, list):\n raise TypeError(\"The col_name must be list.\")\n\n if all(isinstance(item, str) for item in col_name) is False and all(\n isinstance(item, int) for item in col_name) is False:\n raise ValueError(\n \"The col_name must be a list of strings or a list of integers.\")\n\n if len(col_name) < 2:\n raise ValueError(\n \"At least two columns must be selected for correlation analysis.\")\n\n if all(isinstance(item, str) for item in col_name) is True and all(\n elem in df.columns.to_list() for elem in col_name) is False:\n raise ValueError(\"The column names were not found.\")\n\n if all(isinstance(item, int) for item in col_name) is True and max(\n col_name) > (df.shape[1] - 1):\n raise ValueError(\"The column indexes were out of range.\")\n\n if all(isinstance(item, str) for item in col_name):\n data = df.loc[:, col_name]\n else:\n data = df.iloc[:, col_name]\n\n data2 = data._get_numeric_data()\n rm_n = data.shape[1] - data2.shape[1]\n print(\"Removed\", rm_n, \"non-numberical columns from your selected columns\")\n\n sns.set(style=\"white\")\n corr = data2.corr()\n mask = np.triu(np.ones_like(corr, dtype=np.bool))\n f, ax = plt.subplots(figsize=(9, 11))\n ax.set_title('Correlation Matrix', size=20)\n ax.tick_params(axis='x', labelsize=15)\n ax.tick_params(axis='y', labelsize=15)\n\n cmap = sns.diverging_palette(220, 20, as_cmap=True)\n p = sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n p.set_yticklabels(p.get_yticklabels(), rotation=360)\n return p",
"def column_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0] #a solved location in a column\n if location in location_dict.keys():\n \n #ensure that multiple groups can be in multiple locations using period_loc_frequency\n loc_freq = 0\n loc_freq -= 1 #subtract one for the current location usage\n loc_freq += period_loc_frequency[location]\n \n for other_col in column_dict[box]:\n if other_col in solved_values:\n if values[other_col] == location:\n loc_freq -= 1\n \n #make sure that too many locations haven't been used up yet\n if loc_freq < 0:\n print(\"error: too many groups in location\", location)\n \n #if the location is \"used up\", remove it as an option from the rest of the groups\n if loc_freq == 0:\n for other_col in column_dict[box]:\n try:\n values[other_col].remove(location) #remove the location from the other column units\n except:\n pass\n \n return values",
"def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated",
"def corrGroups(df:pd.DataFrame,corr_thresh:float=0.9) -> list:\n \n corrMatrix = df.corr().abs()\n corrMatrix.loc[:,:] = np.tril(corrMatrix, k=-1)\n corrMatrix = corrMatrix[corrMatrix >= corr_thresh].dropna(how='all').dropna(axis=1,how='all')\n corrMatrix['corr_groups'] = corrMatrix.apply(lambda x:sum([[x.name],x.index[x.notna()].tolist()],[]), axis=1)\n corrMatrix['max'] = corrMatrix.max(axis=1)\n \n corrVars = [i for i in corrMatrix.sort_values('max',ascending=False).corr_groups]\n \n remove=[]\n for i in corrVars:\n for j in range(0,len(corrVars)):\n if set(i).issubset(corrVars[j]) and i!=corrVars[j] and i not in remove:\n remove.append(i)\n \n for rm in remove:\n corrVars.remove(rm)\n \n return corrVars",
"def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass",
"def _get_rank_values(self):\n \n symmetric_uncertainties = {}\n \n #caluclate info gain\n for col in self.cat_cols:\n symmetric_uncertainties[col] = self._get_symmetric_uncertainty(col)\n \n return symmetric_uncertainties",
"def get_distrib_of_correlations(num_columns, percent_indep,\n distrib=\"uniform\", indep_slack=0.05,\n percent_anti_dep=0.5, num_bins=10,\n random_seed=RANDOM_SEED):\n np_random = np.random.RandomState(random_seed)\n\n num_corrs = int(round((num_columns**2 - num_columns) / 2.0))\n num_indep_corrs = int(round(num_corrs * percent_indep))\n num_dep_corrs = num_corrs - num_indep_corrs\n indep_corrs = []\n dep_corrs = []\n anti_dep_corrs = []\n\n # 1. if uniform, use uniform distrib to fill in desired dep strs\n if distrib == \"uniform\":\n # 1a. add independent scores\n while len(indep_corrs) < num_indep_corrs:\n score = np_random.uniform(0.0 - indep_slack, 0.0 + indep_slack + 0.000001)\n if score >= 0.0 - indep_slack and score <= 0.0 + indep_slack:\n indep_corrs.append(score)\n\n # 1b. add anti-dependent scores\n anti_dep_corrs += list(np_random.uniform(\n -1.0,\n 0.0 - indep_slack,\n size=int(math.floor(num_dep_corrs * percent_anti_dep))\n ))\n\n # 1c. add dependent scores\n while len(dep_corrs) < int(math.ceil(num_dep_corrs * (1.0 - percent_anti_dep))):\n score = np_random.uniform(0.0 + indep_slack, 1.000001)\n if score > 0.0 + indep_slack and score <= 1.0:\n dep_corrs.append(score)\n\n # 2. if normal, use normal distrib to fill in desired dep strs\n elif distrib == \"normal\":\n # 2a. add independent scores\n while len(indep_corrs) < num_indep_corrs:\n scale = indep_slack / 4.0\n score = np_random.normal(loc=0.0, scale=scale)\n if score >= 0.0 - indep_slack and score <= 0.0 + indep_slack:\n indep_corrs.append(score)\n\n # 2b. add anti-dependent scores\n while len(anti_dep_corrs) < int(math.floor(num_dep_corrs * percent_anti_dep)):\n loc = (-1.0 - indep_slack) / 2.0\n scale = abs(loc / 4.0)\n score = np_random.normal(loc=loc, scale=scale)\n if score >= -1.0 and score < 0.0 - indep_slack:\n anti_dep_corrs.append(score)\n\n # 2c. add dependent scores\n while len(dep_corrs) < int(math.ceil(num_dep_corrs * (1.0 - percent_anti_dep))):\n loc = (1.0 + indep_slack) / 2.0\n scale = loc / 4.0\n score = np_random.normal(loc=loc, scale=scale)\n if score > 0.0 + indep_slack and score <= 1.0:\n dep_corrs.append(score)\n else:\n raise Exception(\"Invalid distribution specified.\")\n\n # 3. return corrs\n corrs = anti_dep_corrs + indep_corrs + dep_corrs\n print \"num_columns={}, num_corrs={}, num_dep_corrs={}, num_indep_corrs={}, len(scores)={}\"\\\n .format(num_columns, num_corrs, num_dep_corrs, num_indep_corrs, len(corrs))\n return corrs",
"def fix_neg(df: pd.DataFrame, roi: str,\n columns: list = ['cases', 'deaths', 'recover'],\n plot: bool = False) -> pd.DataFrame:\n for c in columns:\n cum = 'cum_%s' % c\n new = 'new_%s' % c\n before = df[cum].copy()\n non_zeros = df[df[new] > 0].index\n has_negs = before.diff().min() < 0\n if len(non_zeros) and has_negs:\n first_non_zero = non_zeros[0]\n maxx = df.loc[first_non_zero, cum].max()\n # Find the bad entries and null the corresponding\n # cumulative column, which are:\n # 1) Cumulative columns which are zero after previously\n # being non-zero\n bad = df.loc[first_non_zero:, cum] == 0\n df.loc[bad[bad].index, cum] = None\n # 2) New daily columns which are negative\n bad = df.loc[first_non_zero:, new] < 0\n df.loc[bad[bad].index, cum] = None\n # Protect against 0 null final value which screws up interpolator\n if np.isnan(df.loc[df.index[-1], cum]):\n df.loc[df.index[-1], cum] = maxx\n # Then run a loop which:\n while True:\n # Interpolates the cumulative column nulls to have\n # monotonic growth\n after = df[cum].interpolate('pchip')\n diff = after.diff()\n if diff.min() < 0:\n # If there are still negative first-differences at this\n # point, increase the corresponding cumulative values by 1.\n neg_index = diff[diff < 0].index\n df.loc[neg_index, cum] += 1\n else:\n break\n # Then repeat\n if plot:\n plt.figure()\n plt.plot(df.index, before, label='raw')\n plt.plot(df.index, after, label='fixed')\n r = np.corrcoef(before, after)[0, 1]\n plt.title(\"%s %s Raw vs Fixed R=%.5g\" % (roi, c, r))\n plt.legend()\n else:\n after = before\n # Make sure the first differences are now all non-negative\n assert after.diff().min() >= 0\n # Replace the values\n df[new] = df[cum].diff().fillna(0).astype(int).values\n return df",
"def compute_clf_based_correlation_score(stats, columns, col_name):\n full_col_data = columns[col_name]\n\n dt_clf = DecisionTreeClassifier()\n\n other_feature_names = []\n other_features = []\n for other_col_name in columns.columns:\n if other_col_name == col_name:\n continue\n\n other_feature_names.append(other_col_name)\n le = LabelEncoder()\n _stringified_col = list(map(str,columns[other_col_name]))\n le.fit(_stringified_col)\n other_features.append(list(le.transform(_stringified_col)))\n\n other_features_t = np.array(other_features, dtype=object).transpose()\n\n le = LabelEncoder()\n _stringified_col = list(map(str,full_col_data))\n le.fit(_stringified_col)\n y = le.transform(_stringified_col)\n dt_clf.fit(other_features_t,y)\n prediction_score = dt_clf.score(other_features_t,y)\n corr_scores = list(dt_clf.feature_importances_)\n highest_correlated_column = max(corr_scores)\n return {\n 'correlation_score': round(10 * (1 - prediction_score * highest_correlated_column))\n ,'highest_correlation': max(corr_scores)\n ,'most_correlated_column': other_feature_names[corr_scores.index(max(corr_scores))]\n ,'similarity_score_description':\"\"\"\n A high value for this score means that two of your columns are highly similar. This is done by trying to predict one column using the other via a simple DT.\n \"\"\"\n }",
"def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df",
"def find_zero_var(df): \n toKeep = []\n toDelete = []\n for col in df:\n if len(df[col].value_counts()) > 1:\n toKeep.append(col)\n else:\n toDelete.append(col)\n ##\n return {'toKeep':toKeep, 'toDelete':toDelete}",
"def fix_values(df, columns):\n df[df.loc[:, columns] > 90] -= 180\n df[df.loc[:, columns] < -90] += 180\n arg_chi5s = [col for col in df.columns.values if \"ARG\" in col and \"chi5\" in col]\n return df.drop(arg_chi5s, axis=1)",
"def checkCorr(originalDF):\n # BEGIN: from https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python\n # EXPLANATION: This code visualizes the correlation matrix of the data \n # using heatmap, representing different correlation coefficients by \n # different colors.\n corrmat = originalDF.corr()\n f, ax = plt.subplots(figsize=(12,9))\n sns.heatmap(corrmat, vmax=.8, square=True)\n \n #Zoom in the important variables\n #saleprice correlation matrix\n k = 10 #number of variables for heatmap\n cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index\n cm = np.corrcoef(originalDF[cols].values.T)\n sns.set(font_scale=1.25)\n hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)\n plt.show()\n # END: from https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python\n \n \"\"\"\n It seems like 1stFlrSF and TotalBsmtSF, \n TotRmsAbvGr and GrLivArea, YearBuilt and GarageYrBlt, \n GarageArea and GarageCars are highly correlated respectively.\n Let us check the specific correlations.\n \"\"\"\n cor1 = originalDF.loc[:, \"1stFlrSF\"].corr(originalDF.loc[:, \"TotalBsmtSF\"])\n cor2 = originalDF.loc[:, \"TotRmsAbvGrd\"].corr(originalDF.loc[:, \"GrLivArea\"])\n cor3 = originalDF.loc[:, \"YearBuilt\"].corr(originalDF.loc[:, \"GarageYrBlt\"])\n cor4 = originalDF.loc[:, \"GarageArea\"].corr(originalDF.loc[:, \"GarageCars\"])\n \n print(\"1st Floor SF and Total Basement SF\")\n print(cor1)\n print(\"Total Rooms Above Ground and Ground Living Area\")\n print(cor2)\n print(\"Year Built and Garage Year Built\")\n print(cor3)\n print(\"Garage Area and Garage Cars\")\n print(cor4)\n \n # Maybe try dropping those with abs(corr) > 0.9?",
"def get_cols_drop():",
"def get_corr_subs_values(corr):\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n\n return subs_dict"
]
| [
"0.6928181",
"0.66915566",
"0.6476721",
"0.64294565",
"0.6361409",
"0.5917592",
"0.57251847",
"0.5655154",
"0.56510097",
"0.5580107",
"0.54400027",
"0.5428099",
"0.5409625",
"0.53756136",
"0.5341789",
"0.5323188",
"0.5310228",
"0.5274723",
"0.52521986",
"0.5248313",
"0.5239504",
"0.5233201",
"0.52211815",
"0.5209356",
"0.51828474",
"0.5172058",
"0.5170032",
"0.51591927",
"0.5139234",
"0.51340485"
]
| 0.69375426 | 0 |
get the text output | def get_text(self):
return self.output.getvalue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_text(self):\n\n return self.output['text']",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def obtain_text():\n pass",
"def get_text(self):",
"def output_text(text):\n print(text)",
"def text(self) -> str:",
"def get_text(self):\n return ''.join(self.result)",
"def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text",
"def output(self):\n text_list = self.q(css='#output').text\n\n if len(text_list) < 1:\n return None\n return text_list[0]",
"def get_text(self):\n return self.res.text",
"def getText(self):",
"def result(target_text):\n\n display_text(target_text)\n readability(target_text)",
"def GetText(self):\r\n \r\n return self._text",
"def _text_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response.text",
"def ui_output_text(morzeText: str):\n print(morzeText)",
"def get_text(self):\n return self.text",
"def get_text(self) -> str:\n return self.text",
"def text(self):\n\t\treturn ' '.join([self.write_components[x] for x in self.write_components])",
"def get_console_text(self):\n console_text_api = '/consoleText'\n return self._api_request(self.url + console_text_api)",
"def getOutput(self):\n text = \"\"\n text += \"*\"*self.getLevel() + \" \"\n if self.isTODO():\n text += \"TODO \"\n if self.isDONE():\n text += \"DONE \"\n text += self.getTitle()\n return text",
"def combine_text(evt):\n global output\n output = output + evt.result.text\n print(evt.result.text)",
"def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet",
"def get_text(downgrade_titles=False):",
"def cmd_get(self):\n return self.text",
"def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()",
"def text(self) -> str:\n return self._impl.get_text()"
]
| [
"0.80502146",
"0.76949847",
"0.76949847",
"0.76949847",
"0.76949847",
"0.76949847",
"0.76014626",
"0.75969446",
"0.74793994",
"0.74737936",
"0.7280087",
"0.725396",
"0.7242547",
"0.7066192",
"0.7057842",
"0.69442976",
"0.6915638",
"0.6883678",
"0.6883098",
"0.685257",
"0.6839582",
"0.68232876",
"0.6807075",
"0.6802679",
"0.6775723",
"0.6772529",
"0.6756655",
"0.67215264",
"0.6716621",
"0.6701096"
]
| 0.8387408 | 0 |
Displays statistics on the most popular stations and trip. | def station_stats(data):
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_ss= data['Start Station'].mode()[0]
print('Most popular Start Station:', popular_ss)
# display most commonly used end station
popular_es= data['End Station'].mode()[0]
print('Most popular End Station:', popular_es)
# display most frequent combination of start station and end station trip
data['start_end']= data['Start Station'] + data['End Station']
popular_se= data['start_end'].mode()[0]
print('Most popular combination of Start and End Station:', popular_se)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*100) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(popular_start_station(df))\n\n # display most commonly used end station\n print(popular_end_station(df))\n\n # display most frequent combination of start station and end station trip\n print(popular_trip(df))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('\\nMost popular start station: {}'.format(df['Start Station'].mode()[0]))\n\n # TO DO: display most commonly used end station\n print('\\nMost popular end station: {}'.format(df['End Station'].mode()[0]))\n\n # TO DO: display most frequent combination of start station and end station trip\n print('\\nMost popular trip from start to end: {}'.format(df['Start Station'].mode()[0] + ' to ' + df['End Station'].mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Frequent Start Station:', df['Start Station'].mode().values[0])\n\n # display most commonly used end station\n print('Most Frequent End Station:', df['End Station'].mode().values[0])\n\n # display most frequent combination of start station and end station trip\n df['Trip'] = df['Start Station'] + ' -> ' + df['End Station']\n print('Most Frequent Trip:', df['Trip'].mode().values[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Common Start Station:', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('Most Common End Station:', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n df['trip'] = df['Start Station'] + ' to ' + df['End Station']\n print('Most Frequent Trip:', df['trip'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n df = load_data(city, month, day)\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_ss = df['Start Station'].mode()[0]\n print('Most popular start station', popular_ss)\n\n # TO DO: display most commonly used end station\n popular_es = df['End Station'].mode()[0]\n print('Most popular end station', popular_es)\n\n # TO DO: display most frequent combination of start station and end station trip\n popular_trip = df.groupby(['Start Station', 'End Station']).size().nlargest(1)\n print('Most popular combination', popular_trip)\n \n return station_stats",
"def display_station_stats(self):\n\n self.station_frame = stat_display_labels(\n self.stats_frame,\n \"Station Stats\",\n [\n \"The most popular start station was:\",\n \"The most popular end station was:\",\n \"The most popular start/end station combination was:\",\n ],\n row=1,\n columnspan=5,\n )\n self.station_stats_data = tk.Label(self.station_frame, justify=\"left\")\n self.station_stats_data.grid(row=0, column=1)",
"def station_stats(df):\n\n print('\\nVerifying the most popular trip and stations..\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = str(df['Start Station'].mode()[0])\n print(\"The most common start station for the selected filters is: \" +\n most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = str(df['End Station'].mode()[0])\n print(\"The most common start end for the selected filters is: \" +\n most_common_end_station)\n\n # display most frequent combination of start station and\n # end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' +\n df['End Station'])\n most_common_start_end_combination = str(df['Start-End Combination']\n .mode()[0])\n print(\"The most common start-end combination of stations for teh selected filters is: \" + most_common_start_end_combination)\n\n print(\"\\nWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Trip Stations'] = df['Start Station'] + ' to ' + df['End Station']\n popular_trip_stations = df['Trip Stations'].mode()[0]\n\n print(f'The most popular starting station is: {popular_start_station}')\n print(f'The most popular ending station is: {popular_end_station}')\n print(f'The most popular trip is: {popular_trip_stations}')\n\n print(f'\\nThis took {time.time() - start_time}s seconds.')\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('Most popular Start station is: ', popular_start_station)\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('Most popular End Station is ', popular_end_station)\n # TO DO: display most frequent combination of start station and end station trip\n popular_trip_start_end = (df['Start Station'] + ' To ' + df['End Station']).mode()[0]\n print('Most popular Trip from star to end: ', popular_trip_start_end)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_common_ststation = df['Start Station'].mode()[0]\n print('The most common starting destination:', most_common_ststation)\n\n # TO DO: display most commonly used end station\n most_common_estation = df['End Station'].mode()[0]\n print('The most common ending destination:', most_common_estation)\n\n # TO DO: display most frequent combination of start station and end station trip\n cmb_station = (df ['Start Station'] + '&' + df['End Station']).mode()[0]\n print('Most frequently used stations combined:', cmb_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = str(df['Start Station'].mode()[0])\n print(\"For the selected filters, the most common start station is: \" +\n most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = str(df['End Station'].mode()[0])\n print(\"For the selected filters, the most common start end is: \" +\n most_common_end_station)\n\n # display most frequent combination of start station and\n # end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' +\n df['End Station'])\n most_common_start_end_combination = str(df['Start-End Combination']\n .mode()[0])\n print(\"For the selected filters, the most common start-end combination \"\n \"of stations is: \" + most_common_start_end_combination)\n\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start = df['Start Station'].mode()[0]\n print(\"The most common place to start: \",common_start)\n\n # TO DO: display most commonly used end station\n common_end = df['End Station'].mode()[0]\n print(\"The most common place to end:\",common_end)\n\n # TO DO: display most frequent combination of start station and end station trip\n common_combo = (df['Start Station']+\"||\"+df['End Station']).mode()[0]\n print(\"The most frequently used station combination: \",str(common_combo.split(\"||\")))\n\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # most commonly used start station\n most_start_station = df['Start Station'].mode()[0]\n print('Most trips start at {} Station'.format(most_start_station))\n\n # display most commonly used end station\n most_end_station = df['End Station'].mode()[0]\n print('Most trips end at {} Station'.format(most_end_station))\n\n # display most frequent combination of start station and end station trip\n df['journey_routes'] = df['Start Station'] + ' and ' + df['End Station']\n most_station_combination = df['journey_routes'].mode()[0]\n print('The most frequent station trips are between', most_station_combination)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print(\"\\nThe most popular start station is: {}\".format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print(\"\\nThe most popular end station is: {}\".format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n most_common_combo = df.groupby(['Start Station','End Station']).size().sort_values(ascending=False).head(1)\n print(\"\\nThe most frequent trip from station to station is:\")\n print(most_common_combo)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\r\n\r\n print('\\nFetching The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # Shows the most commonly used start station\r\n common_start_station = df['Start Station'].mode()[0]\r\n print('The Most Commonly used start station is:', common_start_station)\r\n\r\n # Shows the most commonly used end station\r\n common_end_station = df['End Station'].mode()[0]\r\n print('The Most Commonly used end station is:', common_end_station)\r\n \r\n # Shows the most frequent combination of start station and end station trip\r\n comb_station = (df[\"Start Station\"] + \"-\" + df[\"End Station\"]).mode()[0]\r\n print('The Most Common Used Combination of (start + end station) is:', comb_station)\r\n \r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_common_start = df['Start Station'].mode().to_string(index = False)\n\n # TO DO: display most commonly used end station\n most_common_end = df['End Station'].mode().to_string(index = False)\n\n # TO DO: display most frequent combination of start station and end station trip\n print('The most commonly used start station is {}.'.format(most_common_start))\n print('The most commonly used end station is {}.'.format(most_common_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most common start station is {}.'.format(popular_start_station))\n\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most common end station is {}.'.format(popular_end_station))\n\n # display most frequent combination of start station and end station trip\n df['Route'] = df['Start Station'] + \" to \" + df['End Station']\n popular_route = df['Route'].mode()[0]\n print('The most common route is {}.'.format(popular_route))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n freq_start_station = df['Start Station'].mode()[0]\n print(\"Most common start station: \" + freq_start_station)\n\n # display most commonly used end station\n freq_end_station = df['End Station'].mode()[0]\n print(\"Most common end station: \" + freq_end_station)\n\n # display most frequent combination of start station and end station trip\n start, end = df.groupby(['Start Station', 'End Station']).size().idxmax()\n print(\"Most common trip: From \\'\" + start + \"' To \\'\" + end + \"'\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print(\"Most common start station is: \", popular_start_station)\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print(\"Most common end station is: \", popular_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n popular_start_end_station = df['Start and End Stations'].mode()[0]\n print(\"Most popular start and end station combination: \", popular_start_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df, timing_off_flag):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n if not timing_off_flag:\n start_time = time.time()\n\n show_in_rows = True # Show results in rows (better for multiples).\n\n # Display most commonly used start station.\n display_most_common('The most common start stations(s):', df,\n 'Start Station', show_in_rows)\n\n # Display most commonly used end station.\n display_most_common('The most common end stations(s):', df,\n 'End Station', show_in_rows)\n\n # Display most frequent combination of start and end stations.\n display_most_common('The most common start => end combination(s):', df,\n 'Path', show_in_rows)\n\n print('') # Blank line after final output improves format.\n if not timing_off_flag:\n print('This took {0:6f} seconds.'.format(time.time() - start_time))\n print('-' * 40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most popular start station: ', popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular end station: ', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n combined_trip = (df['Start Station'] + ' - ' + df['End Station']).mode()[0]\n print('The most frequent combination of start station and end station trip: ', combined_trip)\n\n (\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n # TO DO: display most commonly used start station\r\n most_start_station = df['start station'].mode(0)\r\n print('most coommon start station is: [most_start_station]')\r\n # TO DO: display most frequent combination of start station and end station trip\r\n most_trip = df['start station'] + ' , ' + df['end station'].mode(0)\r\n print('most frequent combination of start station and end station trip is: [most_trip]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_station = df['Start Station'].mode()[0]\n print('The most popular station to start a trip at is {}'.format(popular_station))\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular station to end a trip at is {}'.format(popular_end_station))\n\n # display most frequent combination of start station and end station trip\n df['Start and End'] = df['Start Station'] + ' to ' + df['End Station']\n popular_trip = df['Start and End'].mode()[0]\n print('The most frequent start and stop station combination is {}'.format(popular_trip))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n \n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('The Most used start station was:', df['Start Station'].value_counts().idxmax())\n\n \n # TO DO: display most commonly used end station\n print('The Most Commonly used end station was:', df['End Station'].value_counts().idxmax())\n \n # TO DO: display most frequent combination of start station and end station trip\n Combination_Station = (df['Start Station'].astype(str) + \" to \" + df['End Station'].astype(str)).value_counts().idxmax()\n \n print('\\n The Most popular trip was from {}\\n'.format(Combination_Station))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n \n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start = df['Start Station'].mode()[0]\n print('Most Popular Start Station:', popular_start)\n\n # TO DO: display most commonly used end station\n popular_end = df['End Station'].mode()[0]\n print('Most Popular End Station:', popular_end)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'] + ' to ' + df['End Station']\n popular_start_end = df['Start End'].mode()[0]\n print('Most Popular Trip:', popular_start_end)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].value_counts().idxmax()\n print(\"\\nThe most commonly used start station is {}.\".format(most_common_start_station))\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].value_counts().idxmax() # display the most common start station\n print(\"\\nThe most commonly used end station is {}.\".format(most_common_start_station))\n\n # display most frequent combination of start station and end station trip\n df['Route'] = df['Start Station'] + ' to ' + df['End Station']\n most_common_route = df['Route'].value_counts().idxmax()\n print(\"\\nThe most frequent route is from {}.\".format(most_common_route))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n \n print(\"most commonly used start station \",df['Start Station'].mode()[0])\n # TO DO: display most commonly used end station\n\n print(\"most commonly used end station \",df['End Station'].mode()[0])\n # TO DO: display most frequent combination of start station and end station trip\n df['trip road']=df['Start Station']+\" to \"+df['End Station']\n print(\"most frequent combination of start station and end station trip \",df['trip road'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\n\n print(\"\\nNext up, let's find the Most Popular Stations and Trip...\\n\")\n start_time = time.time()\n\n # display most commonly used start station\n print('Most common start station:', df['Start Station'].value_counts().idxmax())\n\n # display most commonly used end station\n print('Most common end station:', df['End Station'].value_counts().idxmax())\n\n # display most frequent combination of start station and end station trip\n frequent_combination = (df['Start Station'] + '~' + df['End Station']).mode()[0]\n print(\"The most frequent combination of stations: \",\n frequent_combination.split('~'))\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)",
"def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode().values[0]\n print('Popular start station: {} '.format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode().values[0]\n print('\\npopular end station: {} '.format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n counts = df.groupby(['Start Station','End Station']).size().idxmax()\n print('\\nMost frequent combination {}'.format(str(counts)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n start_station_counts = df.groupby('Start Station')['Start Station'].count()\r\n sorted_start_stations = start_station_counts.sort_values(ascending=False)\r\n most_popular_start_station = \"\\nMost popular start station: \" + sorted_start_stations.index[0]\r\n\r\n # display most commonly used end station\r\n end_station_counts = df.groupby('End Station')['End Station'].count()\r\n sorted_end_stations = end_station_counts.sort_values(ascending=False)\r\n most_popular_end_station = \"Most popular end station: \" + sorted_end_stations.index[0]\r\n\r\n # display most frequent combination of start station and end station trip\r\n trip_counts = df.groupby(['Start Station', 'End Station'])['Start Time'].count()\r\n sorted_trip_stations = trip_counts.sort_values(ascending=False)\r\n total_trips = df['Start Station'].count()\r\n print(\"Most popular trip: \" + \"\\n Start station: \" + str(sorted_trip_stations.index[0][0]) + \"\\n End station: \" + str(\r\n sorted_trip_stations.index[0][1]))\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)"
]
| [
"0.7815675",
"0.7553417",
"0.74914914",
"0.74748665",
"0.7446617",
"0.7428588",
"0.74237514",
"0.74177027",
"0.7400495",
"0.7387161",
"0.7385634",
"0.73647535",
"0.735934",
"0.73566747",
"0.73548365",
"0.7351854",
"0.7351274",
"0.735057",
"0.73421645",
"0.7340198",
"0.7337384",
"0.731165",
"0.73101383",
"0.7309774",
"0.7302735",
"0.7302693",
"0.7299954",
"0.72912693",
"0.72894394",
"0.72891724"
]
| 0.7662213 | 1 |
Displays statistics on the total and average trip duration. | def trip_duration_stats(data):
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_trip_time= data['Trip Duration'].sum()
print('The Total Travel Time is {} Hours'. format(total_trip_time/3600))
# display mean travel time
avg_trip= data['Trip Duration'].mean()
print('The Average Travel Time is {} Minutes'. format(avg_trip/60))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*100) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"The total travel time was:\",str(total_time))\n\n # TO DO: display mean travel time\n mean_time = df['Trip Duration'].mean()\n print(\"The average travel time was:\",str(mean_time))\n\n print('-'*40)",
"def trip_duration_stats(df):\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n total_time = df['Trip Duration'].sum()\r\n print('total trave time:',total_time,'seconds, or',total_time/3600,'hour')\r\n # TO DO: display mean travel time\r\n mean_time = df['Trip Duration'].mean()\r\n print('mean trave time:',mean_time,'seconds, or',mean_time/3600,'hour')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...')\n start_time = time.time()\n\n # TO DO: display total travel time\n trip_duration_total = df['Trip Duration'].sum()\n print('\\nThe total travel time:', trip_duration_total)\n\n # TO DO: display mean travel time\n trip_duration_mean = df['Trip Duration'].mean()\n print('\\nThe mean travel time:', trip_duration_mean)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n print('Total travel time: ')\r\n print(df['Trip Duration'].sum())\r\n\r\n # display mean travel time\r\n print('Average travel time: ')\r\n print(df['Trip Duration'].mean())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = t.time()\n\n #display total travel time\n total_travel_time = df['Trip Duration'].sum()\n\n print('Total Travel Time:', total_travel_time)\n print('')\n\n #display mean travel time\n average = df['Trip Duration'].mean()\n\n print('Mean/Average Travel Time:', average)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n\r\n #displaying total travel time using sum() method\r\n print('\\nTotal travel duration is: ',df['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n\r\n #displaying average travel time using mean() method\r\n print('\\nAverage travel duration is: ',df['Trip Duration'].mean())\r\n\r\n #extra statistics\r\n #what is the largest and smallest duration of travel time\r\n\r\n print('\\nLargest travel duration is: ',df['Trip Duration'].max())\r\n print('\\nSmallest travel duration is: ',df['Trip Duration'].min())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time is {} minutes.\".format(df[\"Trip Duration\"].sum()))\n\n # display mean travel time\n print(\"Mean travel time is {} minutes.\".format(df[\"Trip Duration\"].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total Travel Time: {:.2f} minutes ({:.2f} hours)'.format((df['Trip Duration'].sum() / 60), (df['Trip Duration'].sum() / 3600)))\n\n # display mean travel time\n print('Average Travel Time: {:.2f} minutes'.format((df['Trip Duration'].mean() / 60)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n print('total travel time : {}'.format(df['Trip Duration'].sum()))\n\n # TO DO: display mean travel time\n print('total travel time : {}'.format(df['Trip Duration'].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time:\", df['Trip Duration'].sum())\n\n # display mean travel time\n print(\"Mean travel time:\", df['Trip Duration'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total = df['Trip Duration'].sum()\n print('The total travel time is {} seconds'.format(total))\n\n # display mean travel time\n avg = df['Trip Duration'].mean()\n print('The mean travel time is {} seconds'.format(avg))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n tot_time=df['Trip Duration'].sum()\n print('The total travel time is ', tot_time)\n\n # display mean travel time\n mean_time=df['Trip Duration'].mean()\n print('The mean travel time is ', mean_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time: \" + str(df['Trip Duration'].sum()) + \" seconds\")\n\n # display mean travel time\n print(\"Mean travel time: \" + str(df['Trip Duration'].mean()) + \" seconds\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def trip_duration_stats(df):\n\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel = df['Trip Duration'].sum()\n print(\"Total Time traveled is: \", total_travel)\n\n # TO DO: display mean travel time\n avg_travel = df['Trip Duration'].mean()\n print(\"Average Time traveled is: \", avg_travel)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time:', total_travel_time)\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean Travel Time:', mean_travel_time)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_duration=df['Trip Duration'].sum()\n print(total_duration)\n # display mean travel time\n mean_duration=df['Trip Duration'].mean()\n print(mean_duration)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time in minutes\n tot_tt = df[\"Trip Duration\"].sum()/60\n print(\"The total travel time is\", tot_tt, \"minutes\")\n\n # display mean travel time in minutes\n avg_tt = df[\"Trip Duration\"].mean()/60\n print(\"The average (mean) travel time is\", avg_tt, \"minutes\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('The total time people spent on the trip are', df['Trip Duration'].sum()/(3600*24),\n 'days')\n\n # display mean travel time\n print('The average time people spent on the trip are', df['Trip Duration'].mean()/60,\n 'minutes')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"Total travel time is {}\".format(total_time))\n # display mean travel time\n avg_time = df['Trip Duration'].mean()\n print(\"Average travel time is {}\".format(avg_time))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time=df['Trip Duration'].sum()\n print('Total Travel Time:',total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time=df['Trip Duration'].mean()\n print('Mean Travel Time:',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n total_time = sec2time(total_time)\n print(\"\\nThe total travel time: {}\".format(total_time))\n\n # TO DO: display mean travel time\n average_time = df['Trip Duration'].mean()\n average_time = sec2time(average_time)\n print(\"\\nThe average travel time: {}\".format(average_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('\\n The total travel time is {}'.format(total_travel_time) +' Seconds')\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('\\nThe mean travel time is {}'.format(mean_travel_time) +' Seconds')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n g = df['Trip Duration'].sum()\n h = df['Trip Duration'].count()\n print('Total travel time: ',g)\n print('Count: ', h)\n # TO DO: display mean travel time\n i = df['Trip Duration'].mean()\n print('Average travel time: ', i)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n totalTravelTime=df['Trip Duration'].sum()\n print(f\" the total travel time: {totalTravelTime}\")\n\n # TO DO: display mean travel time\n average_duration = df['Trip Duration'].mean()\n minutes, seconds = divmod(average_duration, 60)\n if minutes >= 60:\n hours, minutes = divmod(minutes, 60)\n print(f\"\\nThe average trip duration is {hours}:{minutes}:{seconds}\")\n else:\n print(f\"\\nThe average trip duration is {minutes}:{seconds}\")\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n val = df['Trip Duration'].sum()/3600\n print(\"Total trip duration was {:.2f} hours\".format(val))\n\n # display mean travel time\n val = df['Trip Duration'].mean()/60\n print(\"Average trip duration was {:.2f} minutes\".format(val))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total travel time in seconds is : ', total_travel_time)\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Average travel time in seconds is : ', mean_travel_time)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n# TO DO: display total travel time\n print(\"Total travel time:\", df['Trip Duration'].sum())\n\n # TO DO: display mean travel time\n print(\"Total mean travel time: \", df['Trip Duration'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def trip_duration_statistics(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time=df['Trip Duration'].sum()\n print('The total travel time: ',total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time=df['Trip Duration'].mean()\n print('The mean travel_time is :',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_trip_time = str(df['Travel Time'].sum())\n print('\\tThe total time for all trips made is : ' + total_trip_time)\n\n # display mean travel time\n trip_time_mean = str(df['Travel Time'].mean())\n print('\\n\\tThe average travel time is : ' + trip_time_mean)\n\n # Print the time taken to process statistics.\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"The total travel time is:\", df['Trip Duration'].sum() ,'min \\n')\n\n # display mean travel time\n print(\"The mean travel time is:\", df['Trip Duration'].mean(),'min \\n')\n \n print('-'*40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)"
]
| [
"0.802504",
"0.8003995",
"0.7949535",
"0.7937111",
"0.793114",
"0.7926624",
"0.79147255",
"0.7913882",
"0.7907699",
"0.79052943",
"0.7900545",
"0.78914416",
"0.7889074",
"0.7886493",
"0.7884836",
"0.7884105",
"0.78822976",
"0.78735834",
"0.7868467",
"0.7866015",
"0.786337",
"0.78618985",
"0.7861244",
"0.7860548",
"0.7859361",
"0.7855678",
"0.7854813",
"0.78497094",
"0.7849028",
"0.7845677"
]
| 0.8142488 | 0 |
Returns a list containing the names of all installed FSLeyes plugins. | def listPlugins():
plugins = []
for dist in pkg_resources.working_set:
if dist.project_name.startswith('fsleyes-plugin-'):
plugins.append(dist.project_name)
return list(sorted(plugins)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)",
"def get_available_plugin_names():\n mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,\n check_func=_auth_plugin_available,\n invoke_on_load=True,\n propagate_map_exceptions=True)\n return frozenset(mgr.names())",
"def _list_plugins_on_fs(cls):\n return os.listdir(settings.PLUGINS_PATH)",
"def list_plugins():\n l = list()\n for dir in os.listdir(PLUGIN_DIRECTORY):\n l.append(dir)\n return l",
"def available_plugins():\n return PluginConnector.available_plugins()",
"def list(self):\n return self.rpc.call(MsfRpcMethod.PluginLoaded)['plugins']",
"def listShREEKPlugins(self):\n return self._ShREEKConfig.listPluginModules()",
"def plugin_names(self):\n return self.__plugin_names",
"def get_plugins(self):\n return []",
"def get_plugin_files(self):\n \n names = []\n \n for dirname in self._plugin_dirs:\n basenames = [x for x in os.listdir(dirname) \n if x.endswith(\"_plugin.py\")]\n logging.debug(\"Plugin modules in %s: %s\" % \n (dirname, \" \".join(basenames)))\n names += [os.path.join(dirname, x) for x in basenames]\n \n return names",
"def plugin_list(self):\r\n return get_module_list()",
"def getLocalPluginNames():\r\n return [os.path.basename(f) for f in glob(buildPath('*.dll'))]",
"def find_plugins():\n return list(straight.plugin.load('csbot.plugins', subclasses=Plugin))",
"def get_plugins(self) -> \"list[str]\":\n if self.plugins is None:\n return []\n return self.plugins.split(\",\")",
"def getAvailablePlugins(self):\n return self._plugins",
"def list_plugins(request):\n plugins = plugin.get_plugins()\n return plugins",
"def cli_list_plugins(self) -> str:\n resp = \"\"\n for name in sorted(self.name_to_plugin_class):\n enabled = self.name_to_enabled[name]\n process = self.name_to_process[name] if name in self.name_to_process else \"N/A\"\n process_pid = process.pid if process != \"N/A\" else \"N/A\"\n exit_event = self.name_to_exit_event[name].is_set() if name in self.name_to_exit_event else \"N/A\"\n\n process_str = \"\\033[1;32m\" + str(process) + \"\\033[0;0m\" if \"started\" in str(\n process) else \"\\033[1;31m\" + str(process) + \"\\033[0;0m\"\n enabled_str = \"\\033[1;32mYes\\033[0;0m\" if enabled else \"\\033[1;31mNo\\033[0;0m\"\n resp += \"name:{:<30} enabled:{:<3} process:{} pid:{} exit_event:{}\\n\".format(name, enabled_str, process_str,\n process_pid,\n exit_event)\n\n return resp",
"def list_plugins(directory=None):\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n if not plugins or not isinstance(plugins, dict):\n return None\n return plugins.keys()",
"def GetNamesOfParsersWithPlugins(cls):\n parser_names = []\n\n for parser_name, parser_class in cls.GetParsers():\n if parser_class.SupportsPlugins():\n parser_names.append(parser_name)\n\n return sorted(parser_names)",
"def get_plugins():\n all_modules = _load_modules(PYJEN_PLUGIN_FOLDER)\n retval = []\n for module in all_modules:\n retval.extend(_get_plugin_classes(module))\n return retval",
"def get_all_plugins(self):\r\n unfiltered_plugins = self.plugmanc.getAllPlugins()\r\n return self._get_supported_plugins(unfiltered_plugins)",
"def get_available_plugin_loaders():\n mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,\n check_func=_auth_plugin_available,\n invoke_on_load=True,\n propagate_map_exceptions=True)\n\n return dict(mgr.map(lambda ext: (ext.entry_point.name, ext.obj)))",
"def get_plugins():\n return [cls() for cls in get_plugin_classes()]",
"def get_startup_extensions(self):\n final_list = []\n for entry in self.bot_data_file[\"startup_extensions\"]:\n final_list.append(str(entry[\"name\"]))\n return final_list",
"def list(self):\n for dir in subdirs('plugins'):\n print dir.replace('plugins/', '')",
"def available_services(cls) -> List[str]:\n ret = []\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n ret.append(name)\n return ret",
"def list_plugins(self):\n if self.info is None:\n print(\"Currently no plugin is available.\\n\")\n else:\n print(self.info)\n print('\\n')\n if self.current_analyzer:\n self.check_analyzer()",
"def get_available_plugins() -> Dict[str, BasePlugin]:\n if not INITIALIZED:\n _load_and_register_plugins()\n\n return REGISTERED_PLUGINS",
"def getTrackingPluginNames(context):\n\n gsm = getGlobalSiteManager()\n global_plugins = set([p.name for p in gsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n lsm = getSite().getSiteManager()\n local_plugins = set([p.name for p in lsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n values = sorted(list(global_plugins | local_plugins))\n return SimpleVocabulary.fromValues(values)",
"def get_plugins(namespace, names=None):\n return list(get_manager(namespace, names))"
]
| [
"0.77920425",
"0.7777894",
"0.76436985",
"0.7446259",
"0.735109",
"0.733451",
"0.7286112",
"0.7230579",
"0.71848685",
"0.70858264",
"0.7053823",
"0.70029324",
"0.68295395",
"0.6769856",
"0.6700735",
"0.6697945",
"0.66847473",
"0.6670819",
"0.6656807",
"0.6656699",
"0.6639548",
"0.65943563",
"0.6577995",
"0.65401435",
"0.653343",
"0.6510833",
"0.65035355",
"0.64704466",
"0.64546156",
"0.63977784"
]
| 0.8093447 | 0 |
Loads the given Python file as a FSLeyes plugin. | def loadPlugin(filename):
name = op.splitext(op.basename(filename))[0]
modname = 'fsleyes_plugin_{}'.format(name)
distname = 'fsleyes-plugin-{}'.format(name)
if distname in listPlugins():
log.debug('Plugin %s is already in environment - skipping', distname)
return
log.debug('Loading plugin %s [dist name %s]', filename, distname)
dist = pkg_resources.Distribution(
project_name=distname,
location=op.dirname(filename),
version='0.0.0')
entryPoints = _findEntryPoints(filename, modname)
# Here I'm relying on the fact that
# Distribution.get_entry_map returns
# the actual dict that it uses to
# store entry points.
entryMap = dist.get_entry_map()
for group, entries in entryPoints.items():
entryMap[group] = {}
for name, item in entries.items():
ep = '{} = {}:{}'.format(name, modname, name)
ep = pkg_resources.EntryPoint.parse(ep, dist=dist)
entryMap[group][name] = ep
pkg_resources.working_set.add(dist) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_plugin(self, name, path):\n\t\ttry:\n\t\t\t# Plugins are just python modules.\n\t\t\tloader = importlib.machinery.SourceFileLoader(name, path)\n\t\t\tmodule = loader.load_module()\n\t\t\treturn module\n\t\texcept:\n\t\t\t# Upon error, alert the user and return None.\n\t\t\tbot.error(\"Failure loading plugin: {}, Exception: {} {}\".format(name, sys.exc_info()[0].__name__, sys.exc_info()[1]), fatal = True)\n\t\t\treturn None",
"def load_file(*args, **kwargs): # real signature unknown\n pass",
"def cli_load_plugin(self, args) -> str:\n plugin_name = args.plugin_name\n current_dir = os.path.dirname(os.path.realpath(__file__))\n if not os.path.isfile(\"{}/{}.py\".format(current_dir, plugin_name)):\n return error(\"Plugin {} DNE\".format(plugin_name))\n\n # First, let's see if this is already imported\n module_name = \"plugins.{}\".format(plugin_name)\n if module_name in sys.modules:\n self.cli_unload_plugin(plugin_name)\n mod = sys.modules[module_name]\n importlib.reload(mod)\n self.register_plugin(get_class(mod, plugin_name))\n return ok(\"Plugin {} reloaded\".format(plugin_name))\n\n importlib.invalidate_caches()\n mod = importlib.import_module(module_name)\n self.register_plugin(get_class(mod, plugin_name))\n return ok(\"Plugin {} loaded\".format(plugin_name))",
"def load(self, plugin):\n self.rpc.call(MsfRpcMethod.PluginLoad, [plugin])",
"def load_kv_from_py(f):\n filename = os.path.basename(os.path.splitext(f)[0])\n Builder.load_file(\n os.path.join(\n os.path.dirname(os.path.abspath(f)),\n filename + '.kv'\n )\n )",
"def _load_plugin(self, file_name):\n if file_name.endswith(\".py\"):\n file_name = file_name[:-3]\n\n base_name = file_name.split(\".\")[-1]\n\n print(\"[bot] Loading plugin\", base_name)\n\n try:\n plugin_file = importlib.import_module(file_name)\n except ModuleNotFoundError:\n print(\"Plugin not found:\", file_name.replace(\".\", \"/\") + \".py\")\n return False\n plugin = plugin_file.Plugin(bot=self,\n config=self.config,\n name=base_name)\n\n try:\n self.plugin_inboxes[base_name] = plugin.inbox\n except AttributeError:\n # This plugin has no inbox; that's fine. It's obviously not\n # an interface plugin.\n pass\n\n return plugin",
"def _LoadPlugin(self, registry, plugin_file, path):\n\n plugin_path = \"\"\n\n if os.path.isdir(path):\n\n plugin_path = os.path.join(path, plugin_file)\n\n \n if not os.path.isfile( plugin_path ):\n\n raise Exception(\"No %s descriptor file found in the plugin directory\" % plugin_file)\n\n elif os.path.isfile(path): \n\n dirrectory, filename = os.path.split(path)\n\n if filename != plugin_file:\n\n raise Exception(\"Invalid descriptor file found, expected %s, found %s\" % (plugin_file, filename))\n\n else:\n \n plugin_path = path\n \n else:\n\n raise Exception(\"%s is not a valid file or directory\" % path)\n\n registry.LoadPlugin(plugin_path)",
"def load(self, path):\n\t\tmname = os.path.splitext(os.path.split(path)[-1])[0]\n\t\tmodule = imp.load_source(mname, path)\n\t\tif hasattr(module, \"Plugin\"):\n\t\t\tinst = module.Plugin(self)\n\t\telse:\n\t\t\treturn None\n\t\t\n\t\tinst.set_info()\n\t\t\n\t\tfor c in inst.name:\n\t\t\tif not c in string.ascii_lowercase+\"_\":\n\t\t\t\treturn None\n\t\tfor c in inst.require:\n\t\t\tif not c in string.ascii_lowercase+string.digits+\"_:,\":\n\t\t\t\treturn None\n\t\tif not type(inst.version) == int:\n\t\t\treturn None\n\t\t\n\t\tself.__plugins[inst.name] = inst\n\t\tself.__plugins[inst.name].path = path\n\t\t\n\t\tif not self.__plugins[inst.name].start():\n\t\t\treturn None\n\t\t\n\t\treturn inst.name",
"def import_plugin(plugin_file):\n from . import api\n code, mod = load_module(plugin_file, api)\n exec(code, mod.__dict__)\n mod.__dict__['FILE'] = plugin_file\n return mod",
"def load(path):\n pass",
"def load(self, filename):\n pass",
"def loadCustom(plugin, path):\n global loaded\n logging.getLogger('maskgen').info(\"Loading plugin \" + plugin)\n with open(path) as jfile:\n data = json.load(jfile)\n loaded[plugin] = {}\n loaded[plugin]['function'] = 'custom'\n loaded[plugin]['operation'] = data['operation']\n loaded[plugin]['command'] = data['command']\n loaded[plugin]['group'] = None\n loaded[plugin]['mapping'] = data['mapping'] if 'mapping' in data else None\n loaded[plugin]['suffix'] = data['suffix'] if 'suffix' in data else None",
"def import_file(name: Text, file_path: Text):\n\n spec = spec_from_file_location(f\"luh3417.{name}\", file_path)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module",
"def load(cls, name):\n try:\n return importlib.import_module(cls._plugins[name])\n except Exception as err:\n print(\"** could not load command [%s]:\\n%s\" % (name, err))",
"def _load_module(self, filename):\n logging.debug(\"Loading module %s\" % filename)\n module_name, dummy = os.path.splitext(os.path.basename(filename))\n f = file(filename, \"r\")\n try:\n module = imp.load_module(module_name, f, filename,\n (\".py\", \"r\", imp.PY_SOURCE))\n except Exception, e: # pragma: no cover\n logging.warning(\"Failed to load plugin '%s' (%s)\" % \n (module_name, e))\n return None\n f.close()\n return module",
"def _load_logging_file(plugin_name, name):\n import pkg_resources\n\n utf8_reader = codecs.getreader('utf8')\n log_conf_file = utf8_reader(\n pkg_resources.resource_stream(plugin_name, name)\n )\n return json.load(log_conf_file)",
"def loadPlugin(*args, addCallback: Script=None, allPlugins: bool=True, name: AnyStr=\"\", quiet:\n bool=True, removeCallback: Script=None, **kwargs)->List[AnyStr]:\n pass",
"def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))",
"def LoadInputPlugin(self, path):\n\n self._LoadPlugin(self._input_registry, 'input.yml', path)",
"def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)",
"def load_plugin(self, plugin):\n return imp.load_module(self._main_module, *plugin[\"info\"])",
"def load(source_file):\n return loads(source_file.read())",
"def _import(attr, name, disable_logging):\n name = name.replace(\"-\", \"_\")\n ttl = attr.split(\".\")[-1].title()\n if not disable_logging:\n logger.info(\"Loading %s from %s plugin...\", ttl, name.title())\n attr = \"model\" if attr == \"Trainer\" else attr.lower()\n mod = \".\".join((\"plugins\", attr, name))\n module = import_module(mod)\n return getattr(module, ttl)",
"def load_plugin():\n return HostTestPluginCopyMethod_Shell()",
"def importfile(path):\n path = getpath(path, custom=True)\n assert _os.path.isfile(path) == True\n\n file_handler = _SourceFileLoader(*path.splitpath())\n return file_handler",
"def quickLoad(pluginManager, filename=None):\r\n from lauescript.laueio.loader import Loader\r\n loader = Loader(pluginManager.get_active_printer())\r\n loader.create(filename)\r\n mol = loader.load('quickloadedMolecule')\r\n return mol",
"def load():\n return TwitterPlugin()",
"def plugin_import(plugin):\n\n return importlib.import_module(plugin, package=\"directord\")",
"def load_plugin_data(self, data):\n return",
"async def load_plugin(self, name: str, reload_plugin: bool = False) -> None:\n _LOG.debug(_LOG_STR, f\"Importing {name}\")\n _IMPORTED.append(\n importlib.import_module(f\"gaganrobot.plugins.{name}\"))\n if reload_plugin:\n _IMPORTED[-1] = importlib.reload(_IMPORTED[-1])\n plg = _IMPORTED[-1]\n self.manager.update_plugin(plg.__name__, plg.__doc__)\n if hasattr(plg, '_init'):\n # pylint: disable=protected-access\n if asyncio.iscoroutinefunction(plg._init):\n _INIT_TASKS.append(self.loop.create_task(plg._init()))\n _LOG.debug(_LOG_STR, f\"Imported {_IMPORTED[-1].__name__} Plugin Successfully\")"
]
| [
"0.6108969",
"0.6028346",
"0.5957791",
"0.5890156",
"0.5883845",
"0.5814956",
"0.581036",
"0.58080155",
"0.5806757",
"0.5711296",
"0.5694454",
"0.56307757",
"0.55246925",
"0.55135196",
"0.550469",
"0.54819465",
"0.5479231",
"0.5416481",
"0.53727555",
"0.5365991",
"0.5356906",
"0.53458905",
"0.5344871",
"0.5320155",
"0.5294686",
"0.52686864",
"0.5246051",
"0.5244731",
"0.5237805",
"0.52212405"
]
| 0.6279767 | 0 |
Copies the given Python file into the FSLeyes settings directory, within a subdirectory called ``plugins``. After the file has been | def installPlugin(filename):
basename = op.splitext(op.basename(filename))[0]
dest = 'plugins/{}.py'.format(basename)
log.debug('Installing plugin %s', filename)
with open(filename, 'rt') as inf, \
fslsettings.writeFile(dest) as outf:
outf.write(inf.read())
dest = fslsettings.filePath(dest)
try:
loadPlugin(dest)
except Exception:
fslsettings.deleteFile(dest)
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def python_plugin(path, name):\n require.directory(_uwsgi.UWSGI_PLUGINS_LOCATION, use_sudo=True)\n if not _uwsgi.plugin_exists(name):\n with cd(_uwsgi.UWSGI_INSTALLATION_DIR):\n cmd = \"PYTHON={path} ./uwsgi --build-plugin \\\"plugins/python {name}\\\"\"\n sudo(cmd.format(**locals()))\n files.move(\n \"{0}_plugin.so\".format(name), _uwsgi.UWSGI_PLUGINS_LOCATION,\n use_sudo=True\n )",
"def copy_settings():\n new_filename = 'settings.ini'\n if os.path.isfile(new_filename):\n error_msg = '{} already exists'.format(new_filename)\n raise Exception(error_msg)\n\n # determine the path of the example settings in the package\n pkgdir = os.path.dirname(thief_snapshot.__file__)\n example_ini_path = os.path.join(pkgdir, 'example_settings.ini')\n\n copy_path = os.path.join(os.getcwd(), new_filename)\n shutil.copy(example_ini_path, copy_path)",
"def move_file_to_config(path):\n destination = str(os.path.expanduser('~')) +'/.config/hackerjobs/'\n shutil.copy(path,destination)",
"def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )",
"def prepare_plugin(self, plugin_path):\n # This method does not exist in MOS 7.0\n if not hasattr(self.env.admin_actions, 'upload_plugin'):\n mos7_upload_plugin(plugin=plugin_path, source=self.env.d_env)\n mos7_install_plugin(\n plugin_file_name=os.path.basename(plugin_path),\n source=self.env.d_env)\n else:\n self.env.admin_actions.upload_plugin(plugin=plugin_path)\n self.env.admin_actions.install_plugin(\n plugin_file_name=os.path.basename(plugin_path))",
"def setup_plugins(self, cfg, path):\n\n if cfg:\n with open(path, \"w\") as f:\n print(\"DOCUMENTATION='''\", file=f)\n print(\"---\", file=f)\n for key in cfg:\n print(f\"{key}: {cfg[key]}\", file=f)\n print(\"'''\", file=f)",
"def inject_sitecustomize(target: pathlib.Path):\n hook = textwrap.dedent(\n f\"\"\"\n import site\n site.addsitedir({os.fspath(target)!r})\n \"\"\"\n ).lstrip()\n target.joinpath('sitecustomize.py').write_text(hook, encoding='utf-8')",
"def setup(self):\n Utils.check_dir(os.path.join(expanduser('~'), '.drupdates', 'plugins'))",
"def add_local_settings():\n put('/Users/peter/Dropbox/Projects/ChromeFiddle/Local\\ Settings/prod/local_settings.py', \n '/home/django/web/chromefiddle/chromefiddle/settings')",
"def add_to_preset_path(self, path):\n if os.path.isfile(path):\n path = os.path.dirname(path)\n self.preset_path.append(path)",
"def write_plugin(config: Config) -> Config:\n with open(config.path, \"r\") as input_file:\n lines = input_file.read().split(\"\\n\")\n\n lines_computed = []\n\n ignore_lines = False\n id_section = False\n\n for line in lines:\n # id section\n if line.startswith(\"#----begin_id_section----\"):\n id_section = True\n\n lines_computed.append(COMMENT_PYTHON + PREFIX)\n\n continue\n\n if line.startswith(\"#----end_id_section----\"):\n id_section = False\n\n continue\n\n if id_section:\n if line.startswith(\"#\"):\n continue\n\n if not line:\n continue\n\n variables = [x.strip() for x in line.split(\"=\")]\n\n if variables:\n variable_name = variables[0]\n\n lines_computed.append(\n \"{} = {}\".format(\n variable_name,\n getattr(config.module, variable_name)\n )\n )\n\n continue\n\n # skip bootstrap4c4d lines\n if line.startswith(\"#----begin\"):\n ignore_lines = True\n\n continue\n\n if line.startswith(\"#----end\"):\n ignore_lines = False\n\n continue\n\n if not ignore_lines:\n lines_computed.append(line)\n\n compiled_plugin_file = os.path.join(\n config.destination, \"{}.pyp\".format(config.name)\n )\n\n assert_directories(compiled_plugin_file, True)\n\n with open(compiled_plugin_file, \"w\") as output_file:\n output_file.write(\"\\n\".join(lines_computed))\n\n return config",
"def set_lite_plugin_settings(\n self, config_path: Path, plugin_id: str, settings: Dict[str, Any]\n ) -> None:\n whole_file = config = json.loads(config_path.read_text(**UTF8))\n if config_path.name == JUPYTERLITE_IPYNB:\n config = whole_file[\"metadata\"][JUPYTERLITE_METADATA]\n\n config.setdefault(JUPYTER_CONFIG_DATA, {}).setdefault(\n LITE_PLUGIN_SETTINGS, {}\n ).update({plugin_id: settings})\n\n config_path.write_text(json.dumps(whole_file, **JSON_FMT), **UTF8)\n self.log.debug(\"%s wrote settings in %s: %s\", plugin_id, config_path, settings)\n self.maybe_timestamp(config_path)",
"def plugin_loaded():\n # Required for sublime.packages_path().\n # ST only \"loads resources\" from the Packages dir.\n global _temp_path\n packages_path = sublime.packages_path()\n _temp_path = os.path.join(packages_path, _temp_dir_name)\n\n _remove_temp_path()",
"def test_override_plugin(self):\n plugin_name = 'Stdout'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')",
"def user_plugin_dir() -> str:\n return os.path.join(user_data_dir(), 'plugins')",
"def _copy_asoundconf(asoundconf_file):\n this_dir, this_filename = os.path.split(__file__)\n asoundconf_path = os.path.join(this_dir, MicrophoneSetup.ASOUNDCONF_PATH, asoundconf_file)\n shutil.copy2(asoundconf_path, ASOUNDCONF_DEST_PATH)",
"def copy_supervisor_file():\n\n # check if the supervisor file exists\n if not os.path.isfile(\"./text_embeddings.conf\"):\n return Exception(\"File text_embeddings.conf does not exist\")\n\n # otherwise check if the supervisor folder exists\n if not os.path.exists(\"/etc/supervisor/conf.d\"):\n return Exception(\"Supervisor is not installed or folder /etc/supervisor/conf.d does not exist\")\n\n # copy the file to the final destination\n copyfile(\"./text_embeddings.conf\", \"/etc/supervisor/conf.d/text_embeddings.conf\")",
"def copy_to_plugin_step(self, xaf, plugin_name, step_name):\n target_path = os.path.join(get_plugin_step_directory_path(plugin_name,\n step_name),\n get_unique_hexa_identifier())\n self._set_after_tags(xaf, True)\n result = xaf.copy_or_nothing(target_path)\n return result",
"def configure():\n\n configuration_file = '{}/templates/tmux.conf'.format(ROOT_FOLDER)\n destination_file = '.tmux.conf'\n\n print(green('Uploading configuration file...'))\n put(configuration_file, destination_file)",
"def backup_config(context):\n context.copy_from(DNF_PLUGIN_DATA_PATH, DNF_PLUGIN_DATA_LOG_PATH)",
"def setup_local_config(self, file_path):\n try:\n shutil.copy(file_path, os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))\n except OSError as e:\n raise ContainerError(\"Local config file provided errored out: {}\".format(e))",
"def update_settings(self):\n\n param = \"settings.py\"\n self._check_path_availability([\"get_settings_dir\", \"get_settings_dir_to\"])\n self.updater.update_files(\n self.analizer.get_settings_dir(),\n self.analizer.get_settings_dir_to(),\n param,\n )\n return self.write_debug_message(\"Settings upgrade is done!\\n\")",
"def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)",
"def set_plugin_path(self, path):\n ckresult(_dll.FMOD_System_SetPluginPath(self._ptr, path))",
"def setup(self, **kwargs):\n if self.bash_script:\n src = os.fspath(FILES / self.bash_script)\n dst = os.fspath(self.project_dir / self.bash_script)\n shutil.copy(src, dst)",
"def initialize_settings(tool_name, source_path, dest_file_name=None):\n settings_dir = os.path.join(SETTINGS_DIRECTORY, tool_name)\n if not os.path.exists(settings_dir):\n os.mkdir(settings_dir)\n if not dest_file_name:\n dest_file_name = os.path.basename(source_path)\n settings_path = os.path.join(settings_dir, dest_file_name)\n if not os.path.exists(settings_path):\n shutil.copy(source_path, settings_path)\n else:\n try:\n SettingsMigrator(source_path, settings_path).migrate()\n except ConfigObjError, parsing_error:\n print 'WARNING! corrupted configuration file replaced with defaults'\n print parsing_error\n shutil.copy(source_path, settings_path)\n return os.path.abspath(settings_path)",
"def put_settings_files(env='development'):\n projects = build_projects_vars()\n project = projects[env]\n if exists('%(dir)s/%(inner_dir)s' % project):\n put(project['settings_path'], '%(dir)s/%(inner_dir)s/local_settings.py' % project)\n if env == 'production':\n with cd('%(dir)s/%(inner_dir)s' % project):\n sed('local_settings.py', '^DEBUG = True$', 'DEBUG = False')",
"def add_modulepath_to_env(plugin_path, env_path):\n plugin_mods = os.path.join(plugin_path, \"modules\")\n open_format = 'a' if os.path.exists(env_path) else 'w'\n try:\n with open(env_path, open_format) as modfile:\n if open_format == 'a' and modfile.tell() != 0:\n modfile.seek(-1, os.SEEK_END)\n next_char = modfile.read(1)\n if next_char != '\\n':\n modfile.write('\\n')\n if os.pathsep == ';':\n modfile.write(\n \"MAYA_MODULE_PATH=%MAYA_MODULE_PATH%;{0}\".format(\n plugin_mods))\n else:\n modfile.write(\n \"MAYA_MODULE_PATH=$MAYA_MODULE_PATH:{0}\".format(\n plugin_mods))\n return AzureBatchSetup.create_modfile(plugin_mods, plugin_path)\n except Exception as exp:\n print(\"Couldn't create new maya env file: %s\" % env_path)\n return False",
"def loadPlugin(filename):\n\n name = op.splitext(op.basename(filename))[0]\n modname = 'fsleyes_plugin_{}'.format(name)\n distname = 'fsleyes-plugin-{}'.format(name)\n\n if distname in listPlugins():\n log.debug('Plugin %s is already in environment - skipping', distname)\n return\n\n log.debug('Loading plugin %s [dist name %s]', filename, distname)\n\n dist = pkg_resources.Distribution(\n project_name=distname,\n location=op.dirname(filename),\n version='0.0.0')\n\n entryPoints = _findEntryPoints(filename, modname)\n\n # Here I'm relying on the fact that\n # Distribution.get_entry_map returns\n # the actual dict that it uses to\n # store entry points.\n entryMap = dist.get_entry_map()\n\n for group, entries in entryPoints.items():\n entryMap[group] = {}\n\n for name, item in entries.items():\n ep = '{} = {}:{}'.format(name, modname, name)\n ep = pkg_resources.EntryPoint.parse(ep, dist=dist)\n entryMap[group][name] = ep\n\n pkg_resources.working_set.add(dist)",
"def populateScript(self):\n filePath = pm.fileDialog2(fileMode=1,\n startingDirectory=self.startDir,\n fileFilter=' Post Script .py (*%s)' % \".py\")\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n self.gtUIInst.script_lineEdit.setText(filePath)"
]
| [
"0.60705787",
"0.5803295",
"0.5789761",
"0.5697095",
"0.56689143",
"0.55733675",
"0.5551937",
"0.5447961",
"0.54258406",
"0.53851646",
"0.53426594",
"0.53335357",
"0.5332662",
"0.53268933",
"0.53224117",
"0.5322032",
"0.52970606",
"0.52946967",
"0.5293114",
"0.52759516",
"0.5265463",
"0.5230512",
"0.5208937",
"0.5188499",
"0.5145572",
"0.50795037",
"0.50696516",
"0.50459045",
"0.5033644",
"0.5002109"
]
| 0.6132073 | 0 |
Handles a filesystem delete based on UUID. | def handle_delete(uuid):
location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)
print(uuid)
print(location)
shutil.rmtree(location) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})",
"def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})",
"def delete_record(uuid):\n\n collection[uuid].delete()\n return redirect('/')",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def delete(self, uuid: str) -> None:\n\n if not isinstance(uuid, str):\n raise TypeError(\"UUID must be type str\")\n if not validators.uuid(uuid):\n raise ValueError(\"UUID does not have proper form\")\n\n try:\n response = self._connection.delete(\n path=\"/objects/\" + uuid,\n )\n except RequestsConnectionError as conn_err:\n raise RequestsConnectionError('Object could not be deleted.') from conn_err\n if response.status_code == 204:\n # Successfully deleted\n return\n raise UnexpectedStatusCodeException(\"Delete object\", response)",
"def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()",
"def delete(self, store, uuid):\n\n session = get_session()\n session.begin()\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n try:\n session.delete(stored_file)\n session.commit()\n finally:\n session.close()",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')",
"def delete_file(_oid, attachmentId=None):\n md = Metadata.objects.get_or_404(pk=_oid)\n attachment = ''\n test_upload_path_prefix = \"uploadedfiles\"\n test_environment = False\n\n username = _authenticate_user_from_session(request)\n\n if username:\n try:\n try:\n md = Metadata.objects.get(id=_oid)\n \n try:\n # if developing locally we'll also want to remove file\n url = filter(\n lambda a: str(a.id) == attachmentId, md.attachments\n ).pop().url\n if str(os.environ['FLASKCONFIG']) == 'testing' or str(os.environ['FLASKCONFIG']) == 'development':\n test_environment = True\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n test_upload_path_prefix,\n _oid,\n os.path.basename(url)\n )\n )\n else:\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n _oid,\n os.path.basename(url)\n )\n )\n except Exception:\n #Throw exception specific for test or non-test enviroment\n if test_environment:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + test_upload_path_prefix + \"/\" + _oid + \"/\" + os.path.basename(url)\n else:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + _oid + \"/\" + os.path.basename(url)\n\n print \"There was a problem deleting the file! Tried to reach path: \" + file_path \n \n # don't need to save after this since we're updating existing\n Metadata.objects(id=_oid).update_one(\n pull__attachments__id=attachmentId\n )\n \n md = Metadata.objects.get(id=_oid)\n \n # we'll just go ahead and not care if it doesn't exist\n except ValueError:\n pass\n\n\n except KeyError:\n try:\n keys = request.json.keys()\n keys_str = ', '.join(keys)\n except Exception as e:\n print \"Error: \" + str(e)\n return Response(\"Server error deleting file...\", status=500)\n\n return jsonify(\n {\n 'message':\n 'Key(s) ' + keys_str + ' not recognized. ' +\n 'Must contain \\'attachment\\''\n },\n status=400\n )\n\n return jsonify(dict(message=attachment + ' successfully (at/de)tached!', record=md))\n \n else:\n return Response('Bad or missing session id.', status=401)",
"def delete(self, uuid):\n order = db.session.query(Order).filter_by(uuid=uuid).first()\n if not order:\n return \"\", 404\n db.session.delete(order)\n db.session.commit()\n logging.info(f'Order with uuid {uuid} was deleted')\n return '', 204",
"def delete(self, _id):",
"def remove_data(uuid: str) -> None:\n\n filename = os.path.join(DATA_DIR, uuid)\n if os.path.exists(filename):\n logger.info('removed %s', filename)\n os.unlink(filename)\n else:\n logger.warning('%s does not exist to remove', filename)",
"def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def handle_delete(self, api, command):\n return self._make_request_from_command('DELETE', command)",
"def test_6d_delete_file(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.deleting_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare deleting test.\")\n self.dismiss_dialogs()\n function = js_func[\"delete\"] % GST.gs_file_paths[\"file_to_delete_path\"]\n try:\n self.send_request(function, \"delete_data()\")\n except Exception as e:\n raise DeleteException(e.__str__()) \n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise DeleteException(response)",
"def delete():",
"def delete_filesystem(self, filesystem_identifier, headers=None, **kwargs):\n logger.debug('Deleting filesystem %s ...', filesystem_identifier)\n resource = 'filesystem'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._delete(endpoint=filesystem_identifier, params=params, headers=headers)\n return Command(self, response)",
"def delete(self, uid):\n raise NotImplementedError",
"def delete(self, uuid: str) -> bool: # dead: disable\n if uuid not in self._data:\n return False\n\n del self._data[uuid]\n return True",
"def delete_file(va_bucket, uuid):\n key = va_bucket.get_key(uuid)\n print(\" {0} deleted from VA\".format(uuid))\n va_bucket.delete_key(uuid)",
"def _delete(self, uuid):\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(method=\"delete\", path=path, error_json_invalid=False)",
"def delete(self, audit_uuid):\n audit_query = AuditTable.delete().where(AuditTable.uuid == audit_uuid)\n if audit_query.execute() == 0:\n abort(404, \"Not Found\")\n else:\n return {}",
"def _delete(self, remote_filename):\n\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be deleted: it does not exist' % (\n remote_filename))\n response = self.http_client.put(self.metadata_url + 'trash/' + file_id)\n response.raise_for_status()\n del self.names_to_ids[remote_filename]",
"def delete_fid(self, tsuid):\n\n # Checks inputs\n check_type(value=tsuid, allowed_types=str, var_name=\"tsuid\", raise_exception=True)\n\n if tsuid == \"\":\n self.session.log.error(\"tsuid must not be empty\")\n raise ValueError(\"tsuid must not be empty\")\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.DELETE,\n template=TEMPLATES['delete_fid'],\n uri_params={\n 'tsuid': tsuid\n })\n\n # in case of success, web app returns 2XX\n if response.status_code == 200:\n self.session.log.info(\"TSUID:%s - FID deleted\", tsuid)\n else:\n self.session.log.warning(\"TSUID [%s] - FID not deleted. Received status_code:%s\", tsuid,\n response.status_code)\n raise ValueError",
"def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]",
"def test_delete(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_delete = util.delete_file(client, DEFAULT_USER, response_upload.json[\"id\"])\n assert response_delete.status == \"204 NO CONTENT\"\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"404 NOT FOUND\"",
"def delete_volume(self, uid):\n try:\n volInfo = self.get_volume_info(uid)\n except SVCVolumeNotFound as ex:\n LOG.warn(_(\"No volume with UID %s found.\") % uid)\n # assume deleted if not found\n return\n\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n self.remove_fcmapping(uid)\n cmd = \"svctask rmvdisk -force %s\" % (volID)\n self._svc_command(cmd)"
]
| [
"0.70867515",
"0.70867515",
"0.660941",
"0.6598334",
"0.65764475",
"0.6433648",
"0.6430364",
"0.63916093",
"0.6335144",
"0.6332568",
"0.6312674",
"0.6248714",
"0.623831",
"0.61949",
"0.6183798",
"0.6121713",
"0.61208045",
"0.6118135",
"0.6065186",
"0.6054746",
"0.6049135",
"0.6036227",
"0.60316753",
"0.6022776",
"0.6007599",
"0.60069054",
"0.5997636",
"0.5989282",
"0.5988218",
"0.5967816"
]
| 0.80184585 | 1 |
Handle a chunked or nonchunked upload. | def handle_upload(f, attrs):
# chunked = False
dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])
dest = os.path.join(dest_folder, attrs['qqfilename'])
save_upload(f, dest) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _upload_chunk(self, final=False):\n out = self.fs.session.post(\n self.location,\n data=self.buffer.getvalue(),\n headers={\"content-type\": \"application/octet-stream\"},\n )\n out.raise_for_status()\n return True",
"def upload_chunk(self, request, **kwargs):\n import uuid\n\n self.method_check(request, allowed=[\"post\"])\n self.is_authenticated(request)\n\n if not self.check_dfo(request, kwargs[\"dfo_id\"]):\n return self.handle_error(\"Invalid object or access denied.\")\n\n checksum = request.headers.get(\"Checksum\", None)\n if checksum is None:\n checksum = request.META.get(\"Checksum\", None)\n if checksum is None:\n return self.handle_error(\"Missing 'Checksum' in header.\")\n\n content_range = request.headers.get(\"Content-Range\", None)\n if content_range is None:\n content_range = request.META.get(\"Content-Range\", None)\n if content_range is None:\n return self.handle_error(\"Missing 'Content-Range' in header.\")\n\n m = re.search(r\"^(\\d+)\\-(\\d+)\\/(\\d+)$\", content_range).groups()\n content_start = int(m[0])\n content_end = int(m[1])\n content_length = content_end-content_start\n if content_length > settings.CHUNK_MAX_SIZE:\n return self.handle_error(\"Chunk size is larger than max allowed.\")\n\n check = Chunk.objects.filter(\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start\n )\n if len(check) != 0:\n return self.handle_error(\"Chunk already uploaded.\")\n\n content_checksum = calc_checksum(settings.CHUNK_CHECKSUM, request.body)\n if content_checksum is None or content_checksum != checksum:\n return self.handle_error(\n \"Checksum does not match. {}:{}\".format(settings.CHUNK_CHECKSUM, content_checksum))\n\n if not os.path.exists(settings.CHUNK_STORAGE):\n try:\n os.mkdir(settings.CHUNK_STORAGE)\n except Exception as e:\n return self.handle_error(str(e))\n\n data_path = os.path.join(settings.CHUNK_STORAGE, kwargs[\"dfo_id\"])\n if not os.path.exists(data_path):\n try:\n os.makedirs(data_path, mode=0o770, exist_ok=True)\n os.chmod(data_path, 0o770)\n except Exception as e:\n return self.handle_error(str(e))\n\n chunk_id = str(uuid.uuid4())\n file_path = os.path.join(data_path, chunk_id)\n\n try:\n file = open(file_path, \"wb\")\n file.write(request.body)\n file.close()\n except Exception as e:\n return self.handle_error(str(e))\n\n dfo = DataFileObject.objects.get(id=kwargs[\"dfo_id\"])\n\n instrument = dfo.datafile.dataset.instrument\n if instrument is not None:\n instrument_id = instrument.id\n else:\n instrument_id = None\n\n try:\n chunk = Chunk.objects.create(\n chunk_id=chunk_id,\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start,\n size=content_length,\n instrument_id=instrument_id,\n user_id=request.user.id\n )\n except Exception as e:\n try:\n os.remove(file_path)\n except Exception as e:\n pass\n return self.handle_error(str(e))\n\n data = {\n \"success\": True,\n \"id\": chunk.id\n }\n\n return JsonResponse(data, status=200)",
"def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)",
"def upload_chunked(self, chunk_size = 4 * 1024 * 1024):\n\n while self.offset < self.target_length:\n next_chunk_size = min(chunk_size, self.target_length - self.offset)\n if self.last_block == None:\n self.last_block = self.file_obj.read(next_chunk_size)\n\n try:\n (self.offset, self.upload_id) = self.client.upload_chunk(\n StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)\n self.last_block = None\n except ErrorResponse as e:\n reply = e.body\n if \"offset\" in reply and reply['offset'] != 0:\n if reply['offset'] > self.offset:\n self.last_block = None\n self.offset = reply['offset']",
"def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)",
"def parse(self, stream, media_type=None, parser_context=None):\n\n parser_context = parser_context or {}\n request = parser_context['request']\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n meta = request.META\n upload_handlers = request.upload_handlers\n filename = self.get_filename(stream, media_type, parser_context)\n\n # Note that this code is extracted from Django's handling of\n # file uploads in MultiPartParser.\n content_type = meta.get('HTTP_CONTENT_TYPE',\n meta.get('CONTENT_TYPE', ''))\n try:\n content_length = int(meta.get('HTTP_CONTENT_LENGTH',\n meta.get('CONTENT_LENGTH', 0)))\n except (ValueError, TypeError):\n content_length = None\n\n if not filename:\n filename = 'autosave.zip'\n\n # See if the handler will want to take care of the parsing.\n for handler in upload_handlers:\n result = handler.handle_raw_input(None,\n meta,\n content_length,\n None,\n encoding)\n if result is not None:\n return DataAndFiles(None, {'file': result[1]})\n\n # This is the standard case.\n possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]\n chunk_size = min([2 ** 31 - 4] + possible_sizes)\n chunks = ChunkIter(stream, chunk_size)\n counters = [0] * len(upload_handlers)\n\n for handler in upload_handlers:\n try:\n handler.new_file(None, filename, content_type,\n content_length, encoding)\n except StopFutureHandlers:\n break\n\n for chunk in chunks:\n for i, handler in enumerate(upload_handlers):\n chunk_length = len(chunk)\n chunk = handler.receive_data_chunk(chunk, counters[i])\n counters[i] += chunk_length\n if chunk is None:\n break\n\n for i, handler in enumerate(upload_handlers):\n file_obj = handler.file_complete(counters[i])\n if file_obj:\n return DataAndFiles(None, {'file': file_obj})\n raise ParseError(\"FileUpload parse error - \"\n \"none of upload handlers can handle the stream\")",
"def handleContentChunk(data):",
"def handle_request_upload(self, msg):\n\n\t\tdirect_response = not msg.arguments or msg.arguments[0] in ('', '/')\n\t\tresult = []\n\t\tfor file_obj in msg.options:\n\t\t\ttmpfilename, filename, name = file_obj['tmpfile'], file_obj['filename'], file_obj['name']\n\n\t\t\t# limit files to tmpdir\n\t\t\tif not os.path.realpath(tmpfilename).startswith(TEMPUPLOADDIR):\n\t\t\t\traise BadRequest('invalid file: invalid path')\n\n\t\t\t# check if file exists\n\t\t\tif not os.path.isfile(tmpfilename):\n\t\t\t\traise BadRequest('invalid file: file does not exists')\n\n\t\t\t# don't accept files bigger than umc/server/upload/max\n\t\t\tst = os.stat(tmpfilename)\n\t\t\tmax_size = int(ucr.get('umc/server/upload/max', 64)) * 1024\n\t\t\tif st.st_size > max_size:\n\t\t\t\tos.remove(tmpfilename)\n\t\t\t\traise BadRequest('filesize is too large, maximum allowed filesize is %d' % (max_size,))\n\n\t\t\tif direct_response:\n\t\t\t\twith open(tmpfilename) as buf:\n\t\t\t\t\tb64buf = base64.b64encode(buf.read())\n\t\t\t\tresult.append({'filename': filename, 'name': name, 'content': b64buf})\n\n\t\tif direct_response:\n\t\t\tself.finished(msg.id, result)\n\t\telse:\n\t\t\tself.handle_request_command(msg)",
"def upload_chunk(self, file_obj, length, offset=0, upload_id=None):\n\n params = dict()\n\n if upload_id:\n params['upload_id'] = upload_id\n params['offset'] = offset\n\n url, ignored_params, headers = self.request(\"/chunked_upload\", params,\n method='PUT', content_server=True)\n\n try:\n reply = self.rest_client.PUT(url, file_obj, headers)\n return reply['offset'], reply['upload_id']\n except ErrorResponse as e:\n raise e",
"def handle_chunk_wrapper(self, status, name, content, file_info):\n out = self.output\n if out is not None:\n with out:\n print(\"handling chunk \" + repr(type(content)))\n self.handle_chunk(status, name, content, file_info)\n else:\n self.handle_chunk(status, name, content, file_info)",
"def chunkFileUpload(self, fp, chunksize=1024 * 4096):\n parts = int(math.ceil(fp.stat().st_size / float(chunksize)))\n err = False\n maxchunksize = 1024 * 1024 * 100\n if chunksize >= maxchunksize:\n print(\n 'not uploaded: defined chunksize {0} is bigger than the allowed maximum {1}'.format(chunksize, maxchunksize))\n return None\n\n part = 0\n for part, chunk in enumerate(self.chunkedread(fp, chunksize),1):\n logger.info('({2})uploading part {0} of {1}'.format(part, parts, fp.name))\n files = {'file': (str(fp.name), chunk)}\n res = self._post(self.fullUrl('/chunked_upload?chunk={0}').format(part), files=files)\n\n print('finish, uploaded part {0} of {1} '.format(part, parts))\n res = self._post(self.fullUrl('chunked_upload/commit?filename={0}'.format(fp.name)))\n return self.getFile(res['file']['selfUrl']), self.getObject(res['relatedObject']['selfUrl'])\n\n # relObj = res['relatedObject']\n # obj = self.getObject(relObj['selfUrl'])\n # return obj",
"def upload():\n return handle_upload(app, request)",
"def handle_uploaded_file(f, fname):\n with open(fname, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)",
"def upload(request):\n if request.method != \"POST\":\n return probe(request)\n\n md5chunk = request.args.get('md5chunk', False)\n md5total = request.args.get('md5total', False)\n\n chunk = int(request.args.get('chunk', 0))\n chunks = int(request.args.get('chunks', 0))\n\n if md5chunk and md5total:\n filename = upload_with_checksum(request, md5chunk, md5total, chunk, chunks)\n else:\n filename = upload_simple(request, chunk)\n\n return Response('%s uploaded' % filename)",
"def handle_read(self):\n try:\n chunk = self.recv(self.ac_in_buffer_size)\n except socket.error:\n self.handle_error()\n else:\n self.tot_bytes_received += len(chunk)\n if not chunk:\n self.transfer_finished = True\n #self.close() # <-- asyncore.recv() already do that...\n return\n # while we're writing on the file an exception could occur\n # in case that filesystem gets full; if this happens we\n # let handle_error() method handle this exception, providing\n # a detailed error message.\n self.file_obj.write(self.data_wrapper(chunk))",
"def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,\r\n num_cb):\r\n (server_start, server_end) = self.SERVER_HAS_NOTHING\r\n conn = key.bucket.connection\r\n if self.tracker_uri:\r\n # Try to resume existing resumable upload.\r\n try:\r\n (server_start, server_end) = (\r\n self._query_server_pos(conn, file_length))\r\n self.server_has_bytes = server_start\r\n key=key\r\n if conn.debug >= 1:\r\n print 'Resuming transfer.'\r\n except ResumableUploadException, e:\r\n if conn.debug >= 1:\r\n print 'Unable to resume transfer (%s).' % e.message\r\n self._start_new_resumable_upload(key, headers)\r\n else:\r\n self._start_new_resumable_upload(key, headers)\r\n\r\n # upload_start_point allows the code that instantiated the\r\n # ResumableUploadHandler to find out the point from which it started\r\n # uploading (e.g., so it can correctly compute throughput).\r\n if self.upload_start_point is None:\r\n self.upload_start_point = server_end\r\n\r\n if server_end == file_length:\r\n # Boundary condition: complete file was already uploaded (e.g.,\r\n # user interrupted a previous upload attempt after the upload\r\n # completed but before the gsutil tracker file was deleted). Set\r\n # total_bytes_uploaded to server_end so we'll attempt to upload\r\n # no more bytes but will still make final HTTP request and get\r\n # back the response (which contains the etag we need to compare\r\n # at the end).\r\n total_bytes_uploaded = server_end\r\n else:\r\n total_bytes_uploaded = server_end + 1\r\n fp.seek(total_bytes_uploaded)\r\n conn = key.bucket.connection\r\n\r\n # Get a new HTTP connection (vs conn.get_http_connection(), which reuses\r\n # pool connections) because httplib requires a new HTTP connection per\r\n # transaction. (Without this, calling http_conn.getresponse() would get\r\n # \"ResponseNotReady\".)\r\n http_conn = conn.new_http_connection(self.tracker_uri_host,\r\n conn.is_secure)\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n # Make sure to close http_conn at end so if a local file read\r\n # failure occurs partway through server will terminate current upload\r\n # and can report that progress on next attempt.\r\n try:\r\n return self._upload_file_bytes(conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb)\r\n except (ResumableUploadException, socket.error):\r\n resp = self._query_server_state(conn, file_length)\r\n if resp.status == 400:\r\n raise ResumableUploadException('Got 400 response from server '\r\n 'state query after failed resumable upload attempt. This '\r\n 'can happen if the file size changed between upload '\r\n 'attempts', ResumableTransferDisposition.ABORT)\r\n else:\r\n raise\r\n finally:\r\n http_conn.close()",
"def upload_file_handle(\n self,\n bucket: str,\n object_name: str,\n src_file_handle: typing.BinaryIO):\n raise NotImplementedError()",
"def _upload_file_bytes(self, conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb):\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = 0\r\n cb(total_bytes_uploaded, file_length)\r\n\r\n # Build resumable upload headers for the transfer. Don't send a\r\n # Content-Range header if the file is 0 bytes long, because the\r\n # resumable upload protocol uses an *inclusive* end-range (so, sending\r\n # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).\r\n put_headers = {}\r\n if file_length:\r\n range_header = self._build_content_range_header(\r\n '%d-%d' % (total_bytes_uploaded, file_length - 1),\r\n file_length)\r\n put_headers['Content-Range'] = range_header\r\n # Set Content-Length to the total bytes we'll send with this PUT.\r\n put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)\r\n http_request = AWSAuthConnection.build_base_http_request(\r\n conn, 'PUT', path=self.tracker_uri_path, auth_path=None,\r\n headers=put_headers, host=self.tracker_uri_host)\r\n http_conn.putrequest('PUT', http_request.path)\r\n for k in put_headers:\r\n http_conn.putheader(k, put_headers[k])\r\n http_conn.endheaders()\r\n\r\n # Turn off debug on http connection so upload content isn't included\r\n # in debug stream.\r\n http_conn.set_debuglevel(0)\r\n while buf:\r\n http_conn.send(buf)\r\n total_bytes_uploaded += len(buf)\r\n if cb:\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes_uploaded, file_length)\r\n i = 0\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n cb(total_bytes_uploaded, file_length)\r\n if total_bytes_uploaded != file_length:\r\n # Abort (and delete the tracker file) so if the user retries\r\n # they'll start a new resumable upload rather than potentially\r\n # attempting to pick back up later where we left off.\r\n raise ResumableUploadException(\r\n 'File changed during upload: EOF at %d bytes of %d byte file.' %\r\n (total_bytes_uploaded, file_length),\r\n ResumableTransferDisposition.ABORT)\r\n resp = http_conn.getresponse()\r\n body = resp.read()\r\n # Restore http connection debug level.\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n if resp.status == 200:\r\n return resp.getheader('etag') # Success\r\n # Retry timeout (408) and status 500 and 503 errors after a delay.\r\n elif resp.status in [408, 500, 503]:\r\n disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY\r\n else:\r\n # Catch all for any other error codes.\r\n disposition = ResumableTransferDisposition.ABORT\r\n raise ResumableUploadException('Got response code %d while attempting '\r\n 'upload (%s)' %\r\n (resp.status, resp.reason), disposition)",
"def handle_potential_upload(self):\n try:\n self.fileobj = FileGenerator(\n self.current_upload, event_handler=self.handle_upload_event)\n return self.maybe_upload()\n\n except exceptions.UploaderMissingFile as err:\n self.log(\"LOCAL_FILE_MISSING\", level=WARNING)\n if not self.current_upload.get(\"id\"):\n Backend.fail_unsigned(\n self.current_upload, location=self.location)\n else:\n Backend.fail(\n self.current_upload,\n bytes_downloaded=0,\n location=self.location)\n\n except exceptions.UploaderFileModified as err:\n self.log(\"LOCAL_FILE_CHANGED msg=%s\" % err, level=WARNING)\n Backend.fail(\n self.current_upload,\n bytes_downloaded=0,\n location=self.location)",
"def upload(request):\n # We pass the 'file_id' in the query string as a GET parameter. If\n # we read it from the POSTed data, WebOb would read all POSTed\n # data, which has various features and traps (like setting the\n # \"Content-Length\" header to 0) that we do not need since we are\n # going to read the data ourselves anyway.\n file_id = request.GET['X-Progress-ID']\n input_file, file_size, filename = get_file_from_request(request)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n upload_dir = request.registry.settings['poulda.upload_dir']\n user_id = authenticated_userid(request)\n # We use a temporary path to detect unfinished uploads (post\n # mortem, not in the application itself).\n path = os.path.join(upload_dir, '_'.join((user_id, file_id)))\n u.tmp_path = path\n u.started = int(time.time())\n u.size = file_size\n u.state = u'uploading'\n session.flush()\n # We need to commit the transaction so that changes to the Upload\n # object can be seen by the other threads (which will serve the\n # 'progress' JSON view called by the upload page).\n transaction.commit()\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n final_path = filename[1 + filename.rfind(os.sep):]\n final_path = os.path.join(upload_dir, final_path)\n os.rename(path, final_path)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n u.state = u'done'\n u.final_path = unicode(final_path, 'utf-8')\n return HTTPFound(location='success')",
"def handle_request(self,host,path,data=b''):\n\t\tif data:\n\t\t\tself.response_code(4,\"Uploads are not accepted.\")\n\t\t\treturn\n\t\tif not hasattr(self,\"root\"):\n\t\t\tself.response_code(5,\"Server is unable to handle requests at this time due to misconfiguration.\")\n\t\t\treturn\n\t\tself.root = os.path.abspath(self.root)\n\t\tif not (prefix:=os.path.abspath(os.path.join(self.root,host))).startswith(self.root):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not (filepath:=os.path.abspath(os.path.join(prefix,unquote(path.lstrip(\"/\"))))).startswith(prefix):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not os.path.exists(filepath):\n\t\t\tself.response_code(4,\"Not Found\")\n\t\t\treturn\n\t\tif os.path.isdir(filepath):\n\t\t\tif os.path.exists(os.path.join(filepath,\"index.gmi\")):\n\t\t\t\tfilepath = os.path.join(filepath,\"index.gmi\")\n\t\t\telse:\n\t\t\t\tself.response_code(5,\"Cowardly refusing to generate folder listing.\")\n\t\t\t\treturn\n\t\text = os.path.splitext(filepath)[1]\n\t\tmimetype = mimetypes.guess_type(filepath,False)\n\t\tif ext in self.OVERRIDE_MIMETYPES:\n\t\t\tmimetype = self.OVERRIDE_MIMETYPES[ext]\n\t\tmimetype = mimetype or \"application/octet-stream\"\n\t\twith open(filepath,\"rb\") as f:\n\t\t\tself.response_code(2,mimetype)\n\t\t\tshutil.copyfileobj(f,self.wfile)",
"def ischunked() :",
"def ischunked() :",
"def do_part_upload(args):\r\n # Multiprocessing args lameness\r\n bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries = args\r\n logger.debug(\"do_part_upload got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n s3.is_secure = secure\r\n bucket = s3.lookup(bucket_name)\r\n mpu = None\r\n for mp in bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # Read the chunk from the file\r\n fp = open(fname, 'rb')\r\n fp.seek(start)\r\n data = fp.read(size)\r\n fp.close()\r\n if not data:\r\n raise Exception(\"Unexpectedly tried to read an empty chunk\")\r\n\r\n def progress(x,y):\r\n logger.debug(\"Part %d: %0.2f%%\" % (i+1, 100.*x/y))\r\n\r\n try:\r\n # Do the upload\r\n t1 = time.time()\r\n mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = len(data)/1024./1024.\r\n logger.info(\"Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps\" % (i+1, s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries)",
"def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)",
"async def tus_upload_part(request: web.Request) -> web.Response:\n ctx: Context = request.app[\"ctx\"]\n secret = ctx.local_config[\"storage-proxy\"][\"secret\"]\n async with check_params(\n request,\n t.Dict(\n {\n t.Key(\"token\"): tx.JsonWebToken(\n secret=secret, inner_iv=upload_token_data_iv\n ),\n }\n ),\n read_from=CheckParamSource.QUERY,\n ) as params:\n token_data = params[\"token\"]\n async with ctx.get_volume(token_data[\"volume\"]) as volume:\n headers = await prepare_tus_session_headers(request, token_data, volume)\n vfpath = volume.mangle_vfpath(token_data[\"vfid\"])\n upload_temp_path = vfpath / \".upload\" / token_data[\"session\"]\n\n async with AsyncFileWriter(\n target_filename=upload_temp_path,\n access_mode=\"ab\",\n max_chunks=DEFAULT_INFLIGHT_CHUNKS,\n ) as writer:\n while not request.content.at_eof():\n chunk = await request.content.read(DEFAULT_CHUNK_SIZE)\n await writer.write(chunk)\n\n current_size = Path(upload_temp_path).stat().st_size\n if current_size >= int(token_data[\"size\"]):\n target_path = vfpath / token_data[\"relpath\"]\n upload_temp_path.rename(target_path)\n try:\n loop = asyncio.get_running_loop()\n await loop.run_in_executor(\n None, lambda: upload_temp_path.parent.rmdir()\n )\n except OSError:\n pass\n headers[\"Upload-Offset\"] = str(current_size)\n return web.Response(status=204, headers=headers)",
"def complete_upload(self, request, **kwargs):\n\n self.method_check(request, allowed=[\"get\"])\n self.is_authenticated(request)\n\n if not self.check_dfo(request, kwargs[\"dfo_id\"]):\n return self.handle_error(\"Invalid object or access denied.\")\n\n dfo = DataFileObject.objects.get(id=kwargs[\"dfo_id\"])\n\n if not dfo.verified:\n # Async task as we can't wait until file is ready\n tasks.complete_chunked_upload.apply_async(args=[dfo.id])\n\n data = {\n \"success\": True,\n \"verified\": dfo.verified\n }\n\n return JsonResponse(data, status=200)",
"def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if os.path.isdir(localpath):\n upload_dir(conn, localpath, remotepath, filter, chunk_size)\n elif os.path.isfile(localpath):\n upload_file(conn, localpath, remotepath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot upload %r\" % (localpath,))",
"async def _multipart_upload_from_buffer(self):\n # check to see if bucket needs to be created\n if self._create_bucket:\n # check whether the bucket exists\n bucket_list = await self._get_bucket_list()\n if not self._bucket in bucket_list:\n await self._conn_obj.conn.create_bucket(Bucket=self._bucket)\n\n # if the current part is 1 we have to create the multipart upload\n if self._current_part == 1:\n response = await self._conn_obj.conn.create_multipart_upload(\n Bucket = self._bucket,\n Key = self._path\n )\n self._upload_id = response['UploadId']\n # we need to keep a track of the multipart info\n self._multipart_info = {'Parts' : []}\n\n # upload from a buffer - do we need to split into more than one\n # multiparts?\n new_buffer = []\n for buffer_part in range(0, len(self._buffer)):\n # is the current part of the buffer larger than the maximum\n # upload size? split if it is\n data_buf = self._buffer[buffer_part]\n data_len = data_buf.tell()\n if data_len >= self._part_size:\n data_buf.seek(0)\n data_pos = 0\n # split the file up\n while data_pos < data_len:\n new_buffer.append(io.BytesIO())\n # copy the data - don't overstep the buffer\n if data_pos + self._part_size >= data_len:\n sub_data = data_buf.read(data_len-data_pos)\n else:\n sub_data = data_buf.read(\n self._part_size\n )\n new_buffer[-1].write(sub_data)\n # increment to next\n data_pos += self._part_size\n\n # free the old memory\n self._buffer[buffer_part].close()\n else:\n # copy the old buffer into a new one\n self._buffer[buffer_part].seek(0)\n new_buffer.append(io.BytesIO(self._buffer[buffer_part].read()))\n\n # close other buffers first\n for b in self._buffer:\n b.close()\n self._buffer = new_buffer\n\n tasks = []\n\n for buffer_part in range(0, len(self._buffer)):\n # seek in the BytesIO buffer to get to the beginning after the\n # writing\n self._buffer[buffer_part].seek(0)\n # upload here\n # schedule the uploads\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._conn_obj.conn.upload_part(\n Bucket=self._bucket,\n Key=self._path,\n UploadId=self._upload_id,\n PartNumber=self._current_part + buffer_part,\n Body=self._buffer[buffer_part]\n ))\n tasks.append(task)\n\n # await the completion of the uploads\n res = await asyncio.gather(*tasks)\n for buffer_part in range(0, len(self._buffer)):\n # insert into the multipart info list of dictionaries\n part = res[buffer_part]\n self._multipart_info['Parts'].append(\n {\n 'PartNumber' : self._current_part + buffer_part,\n 'ETag' : part['ETag']\n }\n )\n\n # add the total number of uploads to the current part\n self._current_part += len(self._buffer)\n\n # reset all the byte buffers and their positions\n for buffer_part in range(0, len(self._buffer)):\n self._buffer[buffer_part].close()\n self._buffer = [io.BytesIO()]\n self._seek_pos = 0",
"def handle(self):\n\t\ttry:\n\t\t\trequest_line = self.rfile.readline().decode(\"ascii\")\n\t\t\tassert request_line.endswith(\"\\r\\n\"), \"Request line must end in CRLF\"\n\t\t\tparts = request_line.strip().split()\n\t\t\tassert len(parts)==3, \"Invalid request line\"\n\t\t\thost, path, content_length = parts\n\t\t\tif (content_length:=int(content_length))>0:\n\t\t\t\tdata = self.rfile.read(content_length)\n\t\t\telse:\n\t\t\t\tdata = b''\n\t\t\tself.handle_request(host,path,data)\n\t\texcept AssertionError as e:\n\t\t\tself.response_code(4,e.args[0])"
]
| [
"0.6564833",
"0.65228426",
"0.6468114",
"0.6433403",
"0.64023376",
"0.63730204",
"0.6338926",
"0.6337131",
"0.6185726",
"0.61249465",
"0.6094075",
"0.6055695",
"0.5978826",
"0.59438723",
"0.58829886",
"0.5869195",
"0.58663183",
"0.5866303",
"0.5789348",
"0.5735959",
"0.5715981",
"0.5700395",
"0.5700395",
"0.56830883",
"0.56605417",
"0.5634351",
"0.5615459",
"0.56122446",
"0.56018746",
"0.55851626"
]
| 0.6543689 | 1 |
Save an upload. Uploads are stored in media/uploads | def save_upload(f, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb+') as destination:
destination.write(f.read()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save(self, name, content):\n if not self.oauth_token:\n raise AuthenticationError(\"You must be authenticated with oAuth \"\n \"for upload files.\")\n params = {\n 'title': name,\n # 'description': description,\n # 'tags': tags,\n # 'is_public': is_public,\n 'is_public': 1,\n # 'is_friend': is_friend,\n # 'is_family': is_family,\n # 'safety_level': safety_level,\n # 'content_type': content_type,\n # 'hidden': hidden\n }\n response = self.oauth_session.post(self.API_POST_URL, params=params,\n files={'photo': content.file})\n xmldoc = minidom.parseString(response.content)\n rsp = xmldoc.getElementsByTagName('rsp')[0]\n if rsp.getAttribute('stat') == 'fail':\n msg = xmldoc.getElementsByTagName('err')[0].getAttribute('msg')\n raise FileSaveError(msg)\n photo_id = xmldoc.getElementsByTagName('photoid')[0].firstChild.nodeValue\n return photo_id",
"def write_upload_to_file(photo_file, upload_path):\n fss = FileSystemStorage()\n filename = fss.save(upload_path + photo_file.name, photo_file)\n uploaded_file_url = fss.path(filename)\n return uploaded_file_url",
"def api_upload():\n return make_response(file_manager.save_uploaded_file(), 200)",
"def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)",
"def upload(self, filename, file_path):\n return",
"def save_uploaded_file(uploaded_file_object):\n client = _get_client()\n key = client.key(_FILE_ENTITY, uploaded_file_object.filename)\n entity = datastore.Entity(key)\n entity['url'] = uploaded_file_object.url\n client.put(entity)",
"def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise",
"def upload(self):\n if not self.prepare():\n Settings.err_print(\"unable to upload file - {}\".format(self.get_title()))\n return False\n self.backup()\n self.delete()\n return True",
"def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')",
"def upload():\n file = None\n if 'file' in request.files:\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return json_response(\n message=\"Upload successful\",\n result=\"/v/{}\".format(filename)\n )\n return json_response(\n message=\"Invalid filename or extension (jpg, png, gif)\",\n status_code=500\n )",
"def upload_file(self, file_path, file_name, output_path):",
"def upload_image(request):\n image_uploaded = request.FILES[\"image_uploaded\"]\n # Should check if the file exists already before saving it\n destination = open('/home/tanas/' + image_uploaded.name, \"wb+\")\n for chunk in image_uploaded.chunks():\n destination.write(chunk)\n destination.close()\n return Response({'received request': \"File saved\"})",
"def save(self) -> None:\n self.save_metadata()\n self.save_files()",
"def upload():\r\n\r\n if not os.path.isdir(TO_SEGMENT):\r\n os.mkdir(TO_SEGMENT)\r\n else:\r\n print(\"could not create upload directory: {}\".format(TO_SEGMENT))\r\n print(request.files.getlist(\"file\"))\r\n\r\n for upload in request.files.getlist(\"file\"):\r\n filename = upload.filename\r\n destination = \"/\".join([TO_SEGMENT, filename])\r\n upload.save(destination)\r\n\r\n return redirect(url_for('get_gallery'))",
"def save_image_to_media(image, image_name):\n print('u1')\n # Set image name saved\n image_name_save = image_name + '.' + str(imghdr.what(image))\n # Dir save\n print('u2')\n\n fs = FileSystemStorage(location=settings.IMAGE_USER)\n # Save image\n print('u3')\n\n filename = fs.save(image_name_save, image)\n # Url dir save\n print('u4')\n uploaded_file_url = fs.url(filename)\n print('u5')\n full_path_image = settings.IMAGE_PATH_STATIC + uploaded_file_url\n return full_path_image",
"def save(self, path):\n pass",
"def save(self, path):\n pass",
"def save(self, path):\n pass",
"def upload():\n return handle_upload(app, request)",
"def file_upload(form_instance, **kwargs):\n cleaned_data = form_instance.cleaned_data\n\n file_objects = [\n f for f in cleaned_data.values() if isinstance(f, InMemoryUploadedFile)\n ]\n\n try:\n upload_path = cleaned_data.pop(kwargs[\"upload_path_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"file_upload\", \"upload_path_field\")\n\n full_upload_path = os.path.join(settings.MEDIA_ROOT, upload_path)\n\n # Creates the dir path if it does not already exist\n if not os.path.exists(full_upload_path):\n os.makedirs(full_upload_path)\n\n for file_object in file_objects:\n file_path = increment_file_name(\n os.path.join(full_upload_path, file_object.name)\n )\n with open(file_path, \"wb+\") as destination:\n for chunk in file_object.chunks():\n destination.write(chunk)",
"def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n upload_key = str(uuid4())\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = app.config['UPLOAD_FOLDER'] + \"/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for image_upload in request.files.getlist(\"file\"):\n filename = secure_filename(image_upload.filename)\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n image_upload.save(destination)\n upload_image.delay(destination)\n\n if is_ajax:\n return ajax_response(True, upload_key)\n else:\n return redirect(\"/\")",
"def _save(self, name, content):\n if not self.file_overwrite:\n return super(OverwriteFileSystemStorage, self)._save(name, content)\n\n full_path = self.path(name)\n\n # Create any intermediate directories that do not exist.\n # Note that there is a race between os.path.exists and os.makedirs:\n # if os.makedirs fails with EEXIST, the directory was created\n # concurrently, and we can continue normally. Refs #16082.\n directory = os.path.dirname(full_path)\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if not os.path.isdir(directory):\n raise IOError(\"%s exists and is not a directory.\" % directory)\n\n # This file has a file path that we can move.\n if hasattr(content, 'temporary_file_path'):\n file_move_safe(content.temporary_file_path(), full_path)\n content.close()\n\n # This is a normal uploadedfile that we can stream.\n else:\n flags = (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0))\n # The current umask value is masked out by os.open!\n fd = os.open(full_path, flags, 0o666)\n _file = None\n try:\n locks.lock(fd, locks.LOCK_EX)\n for chunk in content.chunks():\n if _file is None:\n mode = 'wb' if isinstance(chunk, bytes) else 'wt'\n _file = os.fdopen(fd, mode)\n _file.write(chunk)\n finally:\n locks.unlock(fd)\n if _file is not None:\n _file.close()\n else:\n os.close(fd)\n\n if settings.FILE_UPLOAD_PERMISSIONS is not None:\n os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)\n\n return name",
"def upload(self, upload_request):\n raise NotImplementedError",
"def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)",
"def save(self, filename):\n pass",
"def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")",
"def saveAs(self):\n self.saveFile()",
"def save():",
"def save(self):\n im = Image.open(self.picture)\n output = BytesIO()\n im.thumbnail((350, 350))\n im.save(output, format='JPEG', quality=100)\n output.seek(0)\n self.picture = InMemoryUploadedFile(output, 'ImageField', \"%s.jpg\" % self.picture.name.split('.')[0],\n 'image/jpeg', sys.getsizeof(output), None)\n super(Tire, self).save()",
"def save(self, path: str):\n pass"
]
| [
"0.66148895",
"0.65704787",
"0.6502347",
"0.6467279",
"0.6402686",
"0.6367206",
"0.6337269",
"0.62911373",
"0.6290135",
"0.6253598",
"0.6241659",
"0.6228346",
"0.61645436",
"0.61511374",
"0.6148766",
"0.6096052",
"0.6096052",
"0.6096052",
"0.6071183",
"0.5982553",
"0.5977057",
"0.59676355",
"0.59602267",
"0.5955679",
"0.5926908",
"0.59249943",
"0.5922541",
"0.5915296",
"0.59131455",
"0.5912109"
]
| 0.7024745 | 1 |
A POST request. Validate the form and then handle the upload based ont the POSTed data. Does not handle extra parameters yet. | def post(self):
if validate(request.form):
handle_upload(request.files['qqfile'], request.form)
return make_response(200, {"success": True})
else:
return make_response(400, {"error": "Invalid request"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)",
"def post(self, request, *args, **kwargs):\n form = self.get_form(self.form_class)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)",
"def _handle_post_request(self):\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST'})\n\n if self.path == '/URLRequest':\n # First we check, whether the formular has been filled by\n # something behaving like a bot\n if form.has_key('URL'):\n self._send_homepage('<p class=\"warning\">Please check your input</p>')\n return\n else:\n url = form['real_URL'].value if form.has_key('real_URL') else None\n tmp = self._insert_url_to_db(url)\n if tmp:\n try:\n blocked = self._db.is_hash_blocked(tmp)\n if tmp < 0:\n self._send_database_problem()\n return\n elif blocked:\n self._send_blocked_page(blocked[3])\n return\n else:\n self._send_return_page(tmp)\n return\n except YuDatabaseError:\n self._send_database_problem()\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n elif self.path == '/ContactUs':\n if form.has_key('URL'):\n # Here we might have a bot who likes to send the webmaster some spam\n # who most likely will be not amused about.\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='There was an issue with your request. Are you a bot? '\n '<a href=\"/ContactUs\">Please try again</a>.')\n else:\n try:\n email = form['email'].value\n subj = form['subject'].value\n descr = form['request'].value\n if self._send_mail(subj, descr, email):\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail sent',\n msg=\"Your request has been sent. You will receive an answer soon.\")\n else:\n self._send_internal_server_error()\n return\n except KeyError:\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='It appers you did not fill out all needed fields.\\\n <a href=\"/ContactUs\">Please try again</a>.')\n\n elif self.path == '/Show':\n short_url = form['ShortURL'].value if form.has_key('ShortURL') else None\n if short_url != None and short_url.find(\"yaturl.net\") > -1:\n tmp = short_url.rfind(\"/\")\n if tmp > -1 and short_url != \"\":\n tmp = tmp + 1\n short_url = short_url[tmp:]\n if short_url != None and short_url.isalnum():\n try:\n result = self._db.get_link_from_db(short_url)\n except YuDatabaseError:\n self._send_database_problem()\n return\n template_filename = self._get_config_template('showpage')\n if result:\n new_url = '<p><a href=\"%(result)s\">%(result)s</a></p>' % \\\n {'result': result}\n else:\n new_url = '<p class=\"warning\">No URL found for this string. Please double check your\\\n <a href=\"/ShowURL\">input and try again</a></p>'\n\n stats = self._db.get_statistics_for_hash(short_url)\n\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n msg=new_url,\n stat=stats,\n statspage=\"/stats/\" + short_url)\n else:\n self._send_404()\n return\n\n else:\n self._send_404()\n return\n\n self._send_response(text, 200)",
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n self.form_invalid_init(form=form)\n self.form_invalid_add_global_errormessages(form=form)\n return self.form_invalid(form)",
"def _post_request(self):\n # check if input file size was not exceeded\n maxsize = configuration.get_config_value('server', 'maxrequestsize')\n maxsize = configuration.get_size_mb(maxsize) * 1024 * 1024\n if self.http_request.content_length > maxsize:\n raise FileSizeExceeded('File size for input exceeded.'\n ' Maximum request size allowed: %i megabytes' % maxsize / 1024 / 1024)\n\n try:\n doc = lxml.etree.fromstring(self.http_request.get_data())\n except Exception as e:\n if PY2:\n raise NoApplicableCode(e.message)\n else:\n raise NoApplicableCode(e.msg)\n\n operation = doc.tag\n request_parser = self._post_request_parser(operation)\n request_parser(doc)",
"def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n filepath = 'static/images/{}/{}'.format(request.form['qquuid'], request.form['qqfilename'])\n session['img_upload_filepath'] = filepath\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})",
"def action_POST(self):\n\n # Use the content-length header, though being user-defined input it's not really trustworthy.\n try:\n l = int(self.headers.get('content-length', 0))\n if l < 0:\n # Parsed properly, but some joker put in a negative number.\n raise ValueError()\n except ValueError:\n return self.serve_content(\"Illegal Content-Length header value: %s\" % self.headers.get('content-length', 0), 400)\n\n m = args[TITLE_MAX_LENGTH]\n if m and l > m:\n return self.serve_content('Maximum length: %d' % m, code = 413)\n\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={\n 'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n }\n )\n\n if 'file' not in form:\n return self.serve_content('No file provided.', 400)\n\n filename = form['file'].filename\n if not filename:\n # No FileName provided\n return self.serve_content('No file name.', 400)\n elif not re.match(r'^[^/\\\\]+$', filename) or filename in ['.', '..']:\n # Validate filename\n return self.serve_content('Invalid file name.', 400)\n\n if not os.path.isdir(self.file_path):\n return self.send_error(404)\n\n path_save = os.path.join(self.file_path, filename)\n\n if os.path.exists(path_save) and not os.path.isfile(path_save):\n return self.serve_content('Destination exists as a non-file', code = 406)\n\n if args[TITLE_UPLOAD_NO_CLOBBER] and os.path.isfile(path_save):\n return self.serve_content('File already exists.', code = 302)\n\n try:\n with open(path_save, 'wb') as output_file:\n # TODO: How to handle a user lying in their Content-Length header?\n self.copyobj(form['file'].file, output_file, False)\n except IOError:\n if os.path.isfile(path_save):\n os.remove(path_save)\n return self.serve_content('Failed to save file.', code = 500)\n\n return self.serve_content(self.render_file_table(self.file_path), code = 200)",
"def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()",
"def do_POST(self):\n self._try_to_process_request(self._handle_post_request)",
"def _DoFormProcessing(self, request, mr):\n self._DoCommonRequestProcessing(request, mr)\n\n if self.CHECK_SECURITY_TOKEN:\n xsrf.ValidateToken(\n request.POST.get('token'), mr.auth.user_id, request.path)\n\n redirect_url = self.ProcessFormData(mr, request.POST)\n\n # Most forms redirect the user to a new URL on success. If no\n # redirect_url was returned, the form handler must have already\n # sent a response. E.g., bounced the user back to the form with\n # invalid form fields higlighted.\n if redirect_url:\n self.redirect(redirect_url, abort=True)\n else:\n assert self.response.body",
"def post(self):\n data = self.post_parser.parse_args()\n\n try:\n LOGGER.debug('Trying to upload file to storage')\n self.storage.upload(data.file)\n LOGGER.debug('The file was uploaded with success')\n return {\n 'filename': data.file.filename,\n 'message': 'The file was uploaded with success'\n }\n except BaseException:\n abort(500, message='The file was not uploaded')\n LOGGER.error('A generic exception has occurred.', exc_info=True)",
"def post(self):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, current_user.net_id, request.headers[\"folder_name\"])\n\n request_submitted = path.exists(\"{0}request.submitted\".format(folder_path))\n request_processed = path.exists(\"{0}request.processed\".format(folder_path))\n request_voided = path.exists(\"{0}request.voided\".format(folder_path))\n\n if not request_submitted and not request_processed and not request_voided:\n if 'file' not in request.files or \"folder_name\" not in request.headers:\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Invalid request format.\"})\n\n file = request.files['file']\n\n if file and allowed_file(file.filename, self.__ALLOWED_EXTENSIONS__):\n try:\n Path(folder_path).mkdir(parents=True, exist_ok=True)\n filename = secure_filename(file.filename)\n file.save(path.join(folder_path, filename))\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"File successfully uploaded.\", \"filename\": filename})\n except Exception as e:\n print(e)\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"An error occurred while saving the file.\"})\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Invalid file or file extension.\"})\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Status of the request has changed.\"})",
"def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n return self.form_valid(form, request)\n else:\n return self.form_invalid(form, request)",
"def post_config_upload(self, req, **_kwargs):\n if req.POST:\n meters = req.json.get('meters', None)\n groups = req.json.get('groups', None)\n flows = req.json.get('flows', None)\n\n rm = self.api.process_meter_upload(meters) if meters else ''\n gm = self.api.process_group_upload(groups) if groups else ''\n fm = self.api.process_flow_upload(flows) if flows else ''\n res = Response()\n s = \"{}, {}, {}\".format(rm, gm, fm)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n\n return Response(status=400) # bad request",
"def do_POST(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n self.body = cgi.FieldStorage(fp=self.rfile,\n headers=self.headers, environ = {'REQUEST_METHOD':'POST'},\n keep_blank_values = 1, strict_parsing = 1)\n # throw away additional data [see bug #427345]\n while select.select([self.rfile._sock], [], [], 0)[0]:\n if not self.rfile._sock.recv(1):\n break\n self.handle_data()",
"def post_flow_form(self, req, **_kwargs):\n if req.POST:\n res = Response()\n s = self.api.process_flow_message(req.json)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n return Response(status=400) # bad request",
"def post(self):\n file_ = self.verify_param('file', cgi.FieldStorage)\n data, filemask = self.build_post_data(file_)\n return data, filemask",
"def post(cls, flow_name: str):\n data = file_schema.load(request.files) # {\"file\": FileStorage}\n try:\n file_path = uploads.save_file(data[\"file\"], folder=flow_name)\n basename = uploads.get_basename(file_path)\n return {\"message\": gettext(\"file_uploaded\").format(basename)}, 200\n \n except UploadNotAllowed:\n extension = uploads.get_extension(data[\"file\"])\n return {\"message\": gettext(\"file_illegal_extension\").format(extension)}, 400",
"def post(self):\n send_slack_log('Entered /slack/submit')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n if request.form.get('payload') is None:\n send_slack_log('Invalid request: no payload')\n return\n else:\n return handle_interaction(json.loads(request.form['payload']))",
"def do_POST(self):\n try:\n if self.path.endswith(\"/restaurant/new\"):\n ctype, pdict = cgi.parse_header(self.headers.getheader('Content-type'))\n if ctype == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, pdict)\n restaurantArray = fields.get('restaurant')\n\n # create a new Restaurant\n newRestaurantObject = Restaurant()\n newRestaurantObject.save(restaurantArray[0])\n\n self.send_response(301)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Location', '/restaurants')\n self.end_headers()\n return\n except:\n pass",
"def post(self, request, *args, **kw):\n logger.debug(\"POST request on UploadHandler\")\n try:\n if request.data is None or 'site' not in request.data or 'data' not in request.data:\n raise BadRequestException(\"Empty data or site\")\n site = get_object_or_404(Site, pk=request.data.get('site', None))\n logger.error('POST request at UploadHandler for site {}'.format(site))\n if request.user is None or not request.user.profile.canUpload or not site.isActive:\n raise BadRequestException(\"Unauthorized\")\n Timer(0, lambda: UploadHandler._save_to_database(request)).start()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n except BadRequestException as ex:\n logger.debug(ex)\n return Response(status=status.HTTP_400_BAD_REQUEST, data=ex.strerror)\n except Exception as ex:\n logger.error(ex)\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR, data=ex.strerror)",
"def do_POST(self):\r\n self._send_handler_response('POST')",
"def post(self, request, *args, **kwargs):\n\n\t\tself.object = None\n\t\tform_class = self.get_form_class()\n\t\tform = self.get_form(form_class)\n\n\t\tClaimStatusFormSet = claim_forms.get_claim_status_formset(extra=1)\n\t\tclaim_status_formset = ClaimStatusFormSet(self.request.POST, self.request.FILES, prefix=\"status\")\n\n\t\tClaimPhotoFormSet = claim_forms.get_claim_photos_formset(extra=1)\n\t\tclaim_photos_formset = ClaimPhotoFormSet(self.request.POST, self.request.FILES, prefix=\"photos\")\n\n\t\textra_forms = {\n\t\t\t'form': form,\n\t\t\t'status_formset': claim_status_formset,\n\t\t\t'photos_formset': claim_photos_formset,\n\t\t}\n\n\t\tif form.is_valid() and claim_status_formset.is_valid() and claim_photos_formset.is_valid():\n\t\t\treturn self.form_valid(**extra_forms)\n\t\telse:\n\t\t\tmessages.add_message(self.request, messages.ERROR, \"Error saving Claim information\",\n\t\t\t\t\t\t\t\t extra_tags=\"alert alert-danger\")\n\t\t\treturn self.form_invalid(**extra_forms)",
"def form_valid(self, form, request):\n self.object = form.save()\n if (self.object.filetype == FileUpload.HY3_FILE):\n process_hy3_upload.delay(self.object.id)\n if request.is_ajax():\n return HttpResponse('OK')\n else:\n return HttpResponseRedirect(self.get_success_url())",
"def post(self, request, *args, **kwargs):\n\n\t\tself.object = self.get_object()\n\t\tform_class = self.get_form_class()\n\t\tform = self.get_form(form_class)\n\n\t\tClaimStatusFormSet = claim_forms.get_claim_status_formset(extra=1)\n\t\tclaim_status_formset = ClaimStatusFormSet(self.request.POST, self.request.FILES, instance=self.object,\n\t\t\t\t\t\t\t\t\t\t\t\t prefix=\"status\")\n\n\t\tClaimPhotoFormSet = claim_forms.get_claim_photos_formset(extra=1)\n\t\tclaim_photos_formset = ClaimPhotoFormSet(self.request.POST, self.request.FILES, instance=self.object,\n\t\t\t\t\t\t\t\t\t\t\t\t prefix=\"photos\")\n\n\t\textra_forms = {\n\t\t\t'form': form,\n\t\t\t'status_formset': claim_status_formset,\n\t\t\t'photos_formset': claim_photos_formset,\n\t\t}\n\n\t\tif form.is_valid() and claim_status_formset.is_valid() and claim_photos_formset.is_valid():\n\t\t\treturn self.form_valid(**extra_forms)\n\t\telse:\n\t\t\tmessages.add_message(self.request, messages.ERROR, \"Error saving Claim information\",\n\t\t\t\t\t\t\t\t extra_tags=\"alert alert-danger\")\n\t\t\treturn self.form_invalid(**extra_forms)",
"def post(self, request):\n # GET REQUEST DATA\n fid = request.POST.get('fid', False)\n uuid = request.POST.get('uuid', False)\n title_text = request.POST.get('title', False)\n body = request.POST.get('body', False)\n photo = request.FILES.get('photo', False) # FOR STORAGE\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n data = {\n 'uuid': uuid,\n 'title_text': title_text,\n 'body': body,\n 'wfsxml': wfsxml\n }\n # VALIDATE FORM\n form = GeoPostForm(data, request.FILES)\n logger.info(\"\\ninstantiate Geopost form\\n\")\n # IF FORM VALIDATION ERROR\n if not form.is_valid():\n return server_error(request.body)\n #context = self.getContext(form)\n #return render(request, 'geopost/entry.html', context)\n else:\n pass\n # GET CLEAN VALUES\n uuid = form.cleaned_data['uuid']\n wfsxml = form.cleaned_data['wfsxml']\n # UPLOAD PHOTO TO BUCKET\n # if editing existing entry, first delete existing photo\n if fid:\n delete_from_bucket(uuid, self.imageBucket)\n else:\n pass\n photo.open('rb')\n error = upload_to_bucket(\n photo, self.imageBucket, photo.content_type, uuid)\n photo.close()\n # IF ERROR UPLOADING IMAGE\n if error:\n return server_error(error)\n else:\n pass\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, self.wfsURL)\n # ALL GOOD\n if not error:\n return HttpResponseRedirect(reverse('geopost_home'))\n # IF WFS TRANSACTION ERROR\n else:\n delete_from_bucket(uuid, self.imageBucket)\n return server_error(error)",
"def post(self) :\n\n self.msg = \"\"\n error = True\n importer = Importer(DataAccessor(self.addErrorMessage))\n\n try :\n target = self.request.POST.get('newFile').file.read()\n importer.parse(StringIO(target))\n\n except IOError :\n self.msg = \"Please select a valid file to import\"\n\n except Usage, err : \n self.msg = err.msg\n\n except AttributeError:\n self.msg = \"Please select a valid file to import\"\n\n if not self.msg : \n self.msg = 'Import was successful'\n error = False\n\n if len(self.msg) > 512 : \n self.msg = self.msg[0:512] + \"...\"\n \n setSessionMessageByRequest(self, self.msg, error)\n self.redirect('/admin')",
"def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}",
"def upload_validated(request):\n if 'file' not in request.files:\n flash('No file part')\n return False \n if not request.form.get('username', None):\n flash('No username part')\n return False \n torrent_file = request.files['file']\n if torrent_file.filename == '':\n flash('No selected file')\n return False \n if torrent_file and check_allowed_extension(torrent_file.filename):\n return True",
"def do_POST(self):\n logger.info(\"Received a POST request: {}\".format(self.path))\n path = self.strip_path()\n if path == SET_OP:\n self.send_response(self.process_set_request())\n elif path == COMMIT_OP:\n self.send_response(self.process_commit_request())\n else:\n logger.error(\"Invalid POST operation {} was received.\".format(path))\n self.send_response(404)"
]
| [
"0.6935135",
"0.6721188",
"0.67184585",
"0.67082894",
"0.6695506",
"0.66680175",
"0.665402",
"0.6633827",
"0.6602961",
"0.65494746",
"0.6471828",
"0.6467104",
"0.64652175",
"0.6428505",
"0.636845",
"0.63206375",
"0.62829965",
"0.62138766",
"0.6189452",
"0.61750424",
"0.6154288",
"0.6129773",
"0.60944694",
"0.6072724",
"0.6072217",
"0.6071217",
"0.6057382",
"0.60534173",
"0.602684",
"0.60191345"
]
| 0.7618754 | 0 |
Handles a filesystem delete based on UUID. | def handle_delete(uuid):
location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)
print(uuid)
print(location)
shutil.rmtree(location) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})",
"def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})",
"def delete_record(uuid):\n\n collection[uuid].delete()\n return redirect('/')",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def delete(self, uuid: str) -> None:\n\n if not isinstance(uuid, str):\n raise TypeError(\"UUID must be type str\")\n if not validators.uuid(uuid):\n raise ValueError(\"UUID does not have proper form\")\n\n try:\n response = self._connection.delete(\n path=\"/objects/\" + uuid,\n )\n except RequestsConnectionError as conn_err:\n raise RequestsConnectionError('Object could not be deleted.') from conn_err\n if response.status_code == 204:\n # Successfully deleted\n return\n raise UnexpectedStatusCodeException(\"Delete object\", response)",
"def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()",
"def delete(self, store, uuid):\n\n session = get_session()\n session.begin()\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n try:\n session.delete(stored_file)\n session.commit()\n finally:\n session.close()",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')",
"def delete_file(_oid, attachmentId=None):\n md = Metadata.objects.get_or_404(pk=_oid)\n attachment = ''\n test_upload_path_prefix = \"uploadedfiles\"\n test_environment = False\n\n username = _authenticate_user_from_session(request)\n\n if username:\n try:\n try:\n md = Metadata.objects.get(id=_oid)\n \n try:\n # if developing locally we'll also want to remove file\n url = filter(\n lambda a: str(a.id) == attachmentId, md.attachments\n ).pop().url\n if str(os.environ['FLASKCONFIG']) == 'testing' or str(os.environ['FLASKCONFIG']) == 'development':\n test_environment = True\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n test_upload_path_prefix,\n _oid,\n os.path.basename(url)\n )\n )\n else:\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n _oid,\n os.path.basename(url)\n )\n )\n except Exception:\n #Throw exception specific for test or non-test enviroment\n if test_environment:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + test_upload_path_prefix + \"/\" + _oid + \"/\" + os.path.basename(url)\n else:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + _oid + \"/\" + os.path.basename(url)\n\n print \"There was a problem deleting the file! Tried to reach path: \" + file_path \n \n # don't need to save after this since we're updating existing\n Metadata.objects(id=_oid).update_one(\n pull__attachments__id=attachmentId\n )\n \n md = Metadata.objects.get(id=_oid)\n \n # we'll just go ahead and not care if it doesn't exist\n except ValueError:\n pass\n\n\n except KeyError:\n try:\n keys = request.json.keys()\n keys_str = ', '.join(keys)\n except Exception as e:\n print \"Error: \" + str(e)\n return Response(\"Server error deleting file...\", status=500)\n\n return jsonify(\n {\n 'message':\n 'Key(s) ' + keys_str + ' not recognized. ' +\n 'Must contain \\'attachment\\''\n },\n status=400\n )\n\n return jsonify(dict(message=attachment + ' successfully (at/de)tached!', record=md))\n \n else:\n return Response('Bad or missing session id.', status=401)",
"def delete(self, uuid):\n order = db.session.query(Order).filter_by(uuid=uuid).first()\n if not order:\n return \"\", 404\n db.session.delete(order)\n db.session.commit()\n logging.info(f'Order with uuid {uuid} was deleted')\n return '', 204",
"def delete(self, _id):",
"def remove_data(uuid: str) -> None:\n\n filename = os.path.join(DATA_DIR, uuid)\n if os.path.exists(filename):\n logger.info('removed %s', filename)\n os.unlink(filename)\n else:\n logger.warning('%s does not exist to remove', filename)",
"def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def handle_delete(self, api, command):\n return self._make_request_from_command('DELETE', command)",
"def test_6d_delete_file(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.deleting_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare deleting test.\")\n self.dismiss_dialogs()\n function = js_func[\"delete\"] % GST.gs_file_paths[\"file_to_delete_path\"]\n try:\n self.send_request(function, \"delete_data()\")\n except Exception as e:\n raise DeleteException(e.__str__()) \n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise DeleteException(response)",
"def delete():",
"def delete_filesystem(self, filesystem_identifier, headers=None, **kwargs):\n logger.debug('Deleting filesystem %s ...', filesystem_identifier)\n resource = 'filesystem'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._delete(endpoint=filesystem_identifier, params=params, headers=headers)\n return Command(self, response)",
"def delete(self, uid):\n raise NotImplementedError",
"def delete(self, uuid: str) -> bool: # dead: disable\n if uuid not in self._data:\n return False\n\n del self._data[uuid]\n return True",
"def delete_file(va_bucket, uuid):\n key = va_bucket.get_key(uuid)\n print(\" {0} deleted from VA\".format(uuid))\n va_bucket.delete_key(uuid)",
"def _delete(self, uuid):\n path = self.router.roles_by_uuid.format(uuid=uuid)\n return self.request(method=\"delete\", path=path, error_json_invalid=False)",
"def delete(self, audit_uuid):\n audit_query = AuditTable.delete().where(AuditTable.uuid == audit_uuid)\n if audit_query.execute() == 0:\n abort(404, \"Not Found\")\n else:\n return {}",
"def _delete(self, remote_filename):\n\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be deleted: it does not exist' % (\n remote_filename))\n response = self.http_client.put(self.metadata_url + 'trash/' + file_id)\n response.raise_for_status()\n del self.names_to_ids[remote_filename]",
"def delete_fid(self, tsuid):\n\n # Checks inputs\n check_type(value=tsuid, allowed_types=str, var_name=\"tsuid\", raise_exception=True)\n\n if tsuid == \"\":\n self.session.log.error(\"tsuid must not be empty\")\n raise ValueError(\"tsuid must not be empty\")\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.DELETE,\n template=TEMPLATES['delete_fid'],\n uri_params={\n 'tsuid': tsuid\n })\n\n # in case of success, web app returns 2XX\n if response.status_code == 200:\n self.session.log.info(\"TSUID:%s - FID deleted\", tsuid)\n else:\n self.session.log.warning(\"TSUID [%s] - FID not deleted. Received status_code:%s\", tsuid,\n response.status_code)\n raise ValueError",
"def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]",
"def test_delete(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_delete = util.delete_file(client, DEFAULT_USER, response_upload.json[\"id\"])\n assert response_delete.status == \"204 NO CONTENT\"\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"404 NOT FOUND\"",
"def delete_volume(self, uid):\n try:\n volInfo = self.get_volume_info(uid)\n except SVCVolumeNotFound as ex:\n LOG.warn(_(\"No volume with UID %s found.\") % uid)\n # assume deleted if not found\n return\n\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n self.remove_fcmapping(uid)\n cmd = \"svctask rmvdisk -force %s\" % (volID)\n self._svc_command(cmd)"
]
| [
"0.70867515",
"0.70867515",
"0.660941",
"0.6598334",
"0.65764475",
"0.6433648",
"0.6430364",
"0.63916093",
"0.6335144",
"0.6332568",
"0.6312674",
"0.6248714",
"0.623831",
"0.61949",
"0.6183798",
"0.6121713",
"0.61208045",
"0.6118135",
"0.6065186",
"0.6054746",
"0.6049135",
"0.6036227",
"0.60316753",
"0.6022776",
"0.6007599",
"0.60069054",
"0.5997636",
"0.5989282",
"0.5988218",
"0.5967816"
]
| 0.80184585 | 0 |
if we downscale the image, the intrinsic matrix also needs to be changed. | def rescale_intrinsic(self):
# scale focal length and principal points wrt image resizeing
if self.downscale > 1:
self.K = self.K_orig.copy()
self.K[0, 0] /= float(self.downscale)
self.K[1, 1] /= float(self.downscale)
self.K[0, 2] /= float(self.downscale)
self.K[1, 2] /= float(self.downscale)
self.intrinsic = self.K
else:
self.K = self.intrinsic = self.K_orig.copy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rescale(self, img):\n\n if self.scale != 1:\n return imutils.resize(img, width=int(img.shape[1] * self.scale))\n else:\n return img",
"def _scale(self, image):\n\n if image.GetWidth() != self._width or image.GetHeight()!= self._height:\n image.Rescale(self._width, self._height)\n \n return image",
"def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r",
"def adjust(self, image):\n ...",
"def rescale_image(image, scale=0.50):\r\n \r\n wi = int(image.shape[1]*scale)\r\n hei = int(image.shape[0]*scale)\r\n dimension = (wi, hei)\r\n return cv.resize(image, dimension, interpolation = cv.INTER_AREA)",
"def rescale_image(image: np.ndarray, scale: float) -> np.ndarray:\n (height, width) = image.shape[:2]\n new_dims = (int(width * scale), int(height * scale))\n return cv2.resize(image, new_dims, interpolation=cv2.INTER_CUBIC)",
"def scale_down(image:np.array)->np.array:\n src = image\n scale_percent = 25\n width = int(src.shape[1] * scale_percent / 100)\n height = int(src.shape[0] * scale_percent / 100)\n dsize = (width, height)\n output = cv2.resize(src, dsize)\n return output",
"def imgProcessing(self):\n if (self.image_width > 320):\n self.cv_image = imutils.resize(self.cv_image, width = 320)\n else:\n pass\n\n \"\"\" optional -- image-mirrored \"\"\"\n # self.cv_image = cv2.flip(self.cv_image, 1)",
"def rescale(self):\n # forecast on real data, don't need this anymore\n pass",
"def transform(self, previousimage):",
"def resize_img(self,scale=1):\n reduced = self.image.reduce((scale,scale))\n reduced.save(\"../edited/{}\".format(self.image.filename))\n\n reduced = Image.open(\"../edited/{}\".format(self.image.filename))\n return reduced",
"def transform(self, mat: TxMatrix) -> None:\n super().transform(mat)\n self.scale = self.scale * mat.scale",
"def scale(self):",
"def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image",
"def _do_adaptive_shrinking(self, im):\n im_sz = list(im.shape)\n dim = len(im_sz)\n dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]\n dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]\n new_dim_sz = [(dim_rem[i])*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]\n before_id = [(-new_dim_sz[i] +im_sz[i]+1)//2 for i in range(dim)]\n after_id = [new_dim_sz[i] + before_id[i] for i in range(dim)]\n new_img = im[before_id[0]:after_id[0],before_id[1]:after_id[1],before_id[2]:after_id[2]].copy()\n return new_img",
"def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i",
"def modified(self):\n if self._allow_rescale:\n return super(ImageScaling, self).modified()\n else:\n return 1",
"def __scale_image(image, scale: float):\r\n height, width, _ = image.shape\r\n\r\n width_scaled = int(np.ceil(width * scale))\r\n height_scaled = int(np.ceil(height * scale))\r\n\r\n im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)\r\n\r\n # Normalize the image's pixels\r\n im_data_normalized = (im_data - 127.5) * 0.0078125\r\n\r\n return im_data_normalized",
"def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img",
"def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img",
"def scale_invert(self):",
"def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)",
"def rescale_image(self, img_file, new_width, new_height, model_path, file_description):\n cwd = os.getcwd()\n self.new_width = new_width\n self.new_height = new_height\n self.extract_file_name(img_file)\n shutil.copy(img_file, os.path.join('utils_dfn/temp', self.file_name_with_ext))\n self.run_padding()\n self.run_dfn(model_path)\n self.restore_to_correct_size(file_description)\n clean()",
"def update_img(self):\n self.img = np.array(self.image)",
"def mold_image(image, config=None):\n if np.max(image) <= 1 and np.min(image) >= 0:\n image[:,:,:3] = image[:,:,:3]*2.0 - 1.0\n elif np.min(image) >= 0:\n image[:, :, :3] = image[:, :, :3] * (1.0/127.5) - 1.0\n return image.astype(np.float32)",
"def rescale_image_01(image):\n # scale image to from [0, 255] to [0.0, 1.0]\n image = image.astype(np.float32)\n return image / 255",
"def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img",
"def force_rescale(self,rescaleFactor):\n if not self.built:\n raise Exception(\"model should be built before calling this function\")\n for l in self.layerList:\n l.rescale(rescaleFactor)\n self.rescaleFactor.assign(rescaleFactor)",
"def normalise(image):",
"def rescale(input_image, shift=None, scale=None):\n if scale is None and shift is None:\n return input_image\n\n output_image = sitk.ShiftScale(input_image, float(shift), float(scale))\n return output_image"
]
| [
"0.6996561",
"0.68154985",
"0.67812395",
"0.66067845",
"0.65932626",
"0.657384",
"0.64108557",
"0.63874376",
"0.6248599",
"0.6233107",
"0.6231488",
"0.62114924",
"0.6186186",
"0.6160583",
"0.61534476",
"0.61527234",
"0.61435246",
"0.6095281",
"0.6093892",
"0.6093892",
"0.60875374",
"0.6006399",
"0.5984781",
"0.5983318",
"0.5956389",
"0.595629",
"0.59494424",
"0.593675",
"0.593524",
"0.5923429"
]
| 0.78269804 | 0 |
Loads a set of images to self.imgs list | def load_images(self):
self.img_paths = sorted(glob(self.img_pattern))
self.imgs = []
for idx, this_path in enumerate(self.img_paths):
try:
this_img = cv2.imread(this_path)
if self.downscale > 1:
this_img = cv2.resize(this_img, (0, 0),
fx=1/float(self.downscale),
fy=1/float(self.downscale),
interpolation=cv2.INTER_LINEAR)
except Exception as e:
print("error loading img: %s" % (this_path))
if this_img is not None:
self.imgs.append(this_img)
print("loaded img %d size=(%d,%d): %s" %
(idx, this_img.shape[0], this_img.shape[1], this_path))
print("loaded %d images" % (len(self.imgs))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))",
"def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list",
"def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))",
"def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]",
"def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)",
"def loadImgs(self, ids=[]):\n if _isArrayLike(ids):\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]",
"def loadImgs(self, ids=[]):\r\n if isinstance(ids, tuple) or isinstance(ids, list):\r\n return [self.imgs[id] for id in ids]\r\n elif type(ids) == int:\r\n return [self.imgs[ids]]",
"def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets",
"def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images",
"def getimgs():",
"def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)",
"def load_images(self, list_indices=None, start=None, end=None):\n if start is None and list_indices is None:\n start = 0\n if end is None and list_indices is None:\n end = len(self._image_names)\n if list_indices is None:\n assert start >= 0\n assert start < end\n assert end <= len(self._image_names)\n list_indices = np.arange(start, end)\n\n self.image_indices = []\n self.images = []\n self.image_names = []\n for i, image_name in enumerate(self._image_names):\n if i in list_indices:\n image_filename = os.path.join(self.directory, image_name)\n image = skio.imread(image_filename)\n self.image_indices.append(i)\n self.images.append(image)\n self.image_names.append(image_name)\n print(len(self.images), 'images loaded!')",
"def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images",
"def get_images(self):\n \n return self.img_lst",
"def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs",
"def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)",
"def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)",
"def load_images(self, filename):\n\n self.images = self.load(filename)\n self.length = len(self.images)\n self.create_teacher()",
"def preload_pathimgs(self, pathimgs):\n self.pathimgs = pathimgs\n print('build list images :' + self.pathimgs)\n listfiles = self.get_list_files()\n listfiles.sort(key=lambda v: v.upper())\n for imgpath in listfiles:\n if imgpath.endswith('gif'):\n listgif = self.build_list_gif(imgpath)\n self.listimages += listgif * self.passgif\n self.tempo += [self.durationgif] * len(listgif) * self.passgif\n else:\n img = Image.open(imgpath)\n img = img.resize((self.matrix.width, self.matrix.height), Image.ANTIALIAS)\n self.listimages.append(img.convert('RGB'))\n self.tempo += [self.durationimg]\n print(\" duration: {}s, {} Images\".format(int(sum(self.tempo, 0)), len(self.listimages)))",
"def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")",
"def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images",
"def images(self, images):\n\n self._images = images",
"def images(self, images):\n\n self._images = images",
"def images(self, images):\n\n self._images = images",
"def images(self, images):\n\n self._images = images",
"def load_images(self,im_paths,imlist,im_index):\n\n\t\timlist_arr = []\n\t\tj = 0\n\t\tfor im_path in im_paths:\n\t\t\tim = None\n\n\t\t\ttry:\n\t\t\t\tim = Image.open(im_path)\n\t\t\t\t#im = imread(im_path)\n\t\t\t\t#print im.shape\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\t\t\n\t\t\tif im != None:\n\t\t\t\ttry:\n\t\t\t\t\tim_aux = np.array(im,dtype=theano.config.floatX)\n\t\t\t\t\tim_converted = True\n\t\t\t\texcept TypeError, e:\n\t\t\t\t\tim_converted = False\n\t\t\t\t\tprint e\n\t\t\t\t\n\t\t\t\tif im_converted == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif im_aux.shape[2] == 4:\n\t\t\t\t\t\t\tbackground = Image.new(\"RGB\", im.size, (255, 255, 255))\n\t\t\t\t\t\t\tbackground.paste(im, mask=im.split()[3]) # 3 is the alpha channel\n\t\t\t\t\t\t\tim = background\n\t\t\t\t\t\t\tim_aux = np.array(background,dtype=theano.config.floatX)\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\n\t\t\t\t\t\tif im_aux.shape[2] == 3:\n\t\t\t\t\t\t\tbn_parsed = os.path.basename(im_path).split(\"_\")\n\t\t\t\t\t\t\tim_id = int(bn_parsed[0])\n\t\t\t\t\t\t\t#print im_id\n\t\t\t\t\t\t\t#Ignore potential duplicates\n\t\t\t\t\t\t\t#if im_id not in self.im_index:\n\t\t\t\t\t\t\tif im_id not in im_index:\n\t\t\t\t\t\t\t\tim_aux = self.scale_and_crop_img(im)\n\t\t\t\t\t\t\t\t# This is for multiprocessing\n\t\t\t\t\t\t\t\tim_index.append(im_id)\n\t\t\t\t\t\t\t\timlist.append(np.asarray(im_aux))\n\n\t\t\t\t\t\t\t\t# Uncomment this if you are not using multiprocessing\n\t\t\t\t\t\t\t\t# self.im_index.append(im_id)\n\t\t\t\t\t\t\t\t# self.imlist.append(np.asarray(im_aux))\n\t\t\t\t\t\t\t\t#self.imlist.append(im_aux)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"invalid image: {} size:{}\".format(im.filename, im_aux.shape)\n\t\t\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t#raise e\n\t\t\t\t\t\tprint e\n\t\n\t\t\t# if self.verbose:\n\t\t\t# \tsys.stdout.write(\"\\r Process: {0}/{1}\".format(j, len(im_paths)))\n\t\t\t# \tsys.stdout.flush()\n\n\t\t\tj += 1",
"def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets",
"def get_images(self, file_path: str) -> Iterable[Image]:\n return []",
"def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True",
"def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs"
]
| [
"0.8590295",
"0.77870566",
"0.7699553",
"0.76801467",
"0.7673592",
"0.7590096",
"0.75795263",
"0.74018186",
"0.72978824",
"0.7295722",
"0.725373",
"0.72240764",
"0.7198496",
"0.7194455",
"0.7186009",
"0.7184365",
"0.718259",
"0.71660835",
"0.71409017",
"0.7089566",
"0.7082811",
"0.7058165",
"0.7058165",
"0.7058165",
"0.7058165",
"0.7013404",
"0.6987545",
"0.69654894",
"0.69573843",
"0.69284457"
]
| 0.7847628 | 1 |
This function visualizes the epipolar lines | def visualize_epipolar_lines(self, img1, img2, p1, p2, E, save_path):
# get fundamental matrix
F, mask_fdm = cv2.findFundamentalMat(p1, p2, cv2.RANSAC)
p1_selected = p1[mask_fdm.ravel() == 1]
p2_selected = p2[mask_fdm.ravel() == 1]
# draw lines
lines1 = cv2.computeCorrespondEpilines(
p2_selected.reshape(-1, 1, 2), 2, F).reshape(-1, 3)
img5, _ = self.drawlines(
img1, img2, lines1, p1_selected, p2_selected, 100)
lines2 = cv2.computeCorrespondEpilines(
p1_selected.reshape(-1, 1, 2), 1, F).reshape(-1, 3)
img3, _ = self.drawlines(
img2, img1, lines2, p2_selected, p1_selected, 100)
canvas = np.concatenate((img5, img3), axis=1)
cv2.imwrite(save_path, canvas) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_lines(self):\n self.plot(3)",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')",
"def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()",
"def plot(self):\n pass",
"def plotArt(self):\n self.isArt=True\n warr=self.ws.value(self.xarr)\n asfarr=st.interpolate(warr, self.swarr, self.sfarr, left=0.0, right=0.0)\n asfarr=asfarr*self.farr.max()/asfarr.max()\n self.fpcurve,=self.axes.plot(self.xarr,asfarr,linewidth=0.5,linestyle='-',\n marker='None',color='r')",
"def plotLine(self):\n minc = 0\n maxc = 500\n num = 500\n levels = np.linspace(minc,maxc,num+1)\n title = textwrap.dedent(\"\"\"\\\n Orography difference between LGM and Modern ICE-5G data\n using {0} meter contour interval\"\"\").format((maxc-minc)/num)\n plt.figure()\n plt.contour(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n #if self.save:\n #plt.savefig('something')\n print(\"Line contour plot created\")",
"def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())",
"def display(self):\r\n \r\n plt.rcParams['font.size'] = 14\r\n plt.rcParams['axes.linewidth'] = 1.2 # 1.2 for single plot, 0.5 for all 6\r\n plt.rcParams['lines.linewidth'] = 20.0 # Aah, this doesn't work because line width is changed later on\r\n\r\n cwd = os.getcwd() # Gets current working directory.\r\n cwd = cwd.replace('\\\\', '/')\r\n path = cwd + directory # This is the folder all the results are stored in.\r\n \r\n if type(array_element) == str:\r\n dataframes = [file + array_element] # This is to pass a single csv file\r\n else:\r\n dataframes = [file + i for i in array_element] # This is a list so you can pass multiple csv files to be overlayed on the same plot.\r\n\r\n colours = ['black', 'darkred', 'darkmagenta', 'darkturquoise', 'saddlebrown'] # Array of colours for the lines.\r\n\r\n dfE = pd.read_csv(cwd + \"/experimental_data.csv\") # Reads in the experimental data as a pandas dataframe.\r\n\r\n # Rescale the x-axis of the experimental data.\r\n ratio_of_capacities = 272.4 / 338.313338 # experimental maximum capacity / theoretical maximum capacity\r\n dfE[\"x_theo\"] = ratio_of_capacities * dfE[\"x\"]\r\n # 'x' is the experimental x and 'x_theo' is the theoretical x.\r\n\r\n # Second derivative of enthalpy for experimental data. One w/ respect to the experimental x and one w/ respect to theoretical x.\r\n secder_enthalpy_experimental_x = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x']))\r\n secder_enthalpy_experimental_x_theo = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x_theo']))\r\n dfE['secder enthalpy x'] = secder_enthalpy_experimental_x\r\n dfE['secder enthalpy x theo'] = secder_enthalpy_experimental_x_theo\r\n\r\n # vertical shift on p.m. entropy for vibrational effect\r\n vibrational_shift = 0.0108 # eV K this includes being multiplied by the ratio of capacities.\r\n dfE[\"Entropy dS/dx\"] = (dfE[\"Entropy dS/dx\"]) - vibrational_shift\r\n\r\n # Integrates the p.m. entropy\r\n entropy_list_experimental = integrate.cumtrapz(dfE['Entropy dS/dx'], dfE['x'],\r\n initial=0) # Contains the entropy values\r\n dfE['Entropy'] = entropy_list_experimental\r\n\r\n dfE['x_new'] = ((dfE['x_theo'] - dfE['x_theo'].iloc[0]) * dfE['x_theo'][73]) / (dfE['x_theo'][73] - dfE['x_theo'].iloc[0]) # Rescales the line so that the experimental data starts at 0.\r\n dfE['x'] = ((dfE['x'] - dfE['x'].iloc[0]) * dfE['x'][73]) / (dfE['x'][73] - dfE['x'].iloc[0]) # Same as above but for experimental x axis.\r\n\r\n # Calculates the analytical solution\r\n points = 1000\r\n x_pos = np.linspace(0, 1, points) # x for p.m. entropy\r\n y_pos = np.linspace(0, 1, points) # y for p.m. etropy\r\n s_x = np.linspace(0, 1, points) # x for entropy\r\n s_y = np.linspace(0, 1, points) # y for entropy\r\n l = 0.329217689 # This must be the same as what was used in the main script\r\n R = -0.0000862 # eV/K.Site\r\n T = 288 # K\r\n for index, x in enumerate(x_pos):\r\n if x < l:\r\n s_y[index] = (R * (x * np.log(x / l) - (x - l) * np.log((l - x) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l) - np.log((l - x) / l))\r\n else:\r\n s_y[index] = (R * l * (\r\n (x / l - 1) * np.log(x / l - 1) + (1 - x) / l * np.log((1 - x) / l) - (1 - l) / l * np.log(\r\n (1 - l) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l - 1) - np.log(1 / l - x / l))\r\n\r\n # Calculates the single solid state entropy\r\n x_ent = np.linspace(0, 1, points)\r\n y_ent = np.linspace(0, 1, points)\r\n for index, x in enumerate(x_ent):\r\n y_ent[index] = T * R * (x * np.log(x) + (1-x) * np.log(1-x))\r\n \r\n \"\"\"\r\n #\r\n #\r\n # Create plot and formats\r\n #\r\n #\r\n \"\"\"\r\n \r\n fig, axes = plt.subplots(nrows=num_row, ncols=num_col, constrained_layout=True, squeeze=False)\r\n # squeeze=False is needed to prevent errors when plotting a single subplot\r\n plt.rc('legend', fontsize=13, handlelength=1)\r\n plt.rc('tick')\r\n lw = 1.5 # Line width\r\n \r\n plt.tick_params(bottom=True, top=True, left=True, right=True)\r\n plt.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False)\r\n plt.tick_params(direction='in', width=1.2, length=4.5, pad=3) # For single plot\r\n # plt.tick_params(direction='in', width=1, length=4.5, pad=3) # For multiple plots\r\n\r\n marker_list = ['v', '^', 'p', 'o']\r\n mark_size = 3 #0.7 for 6 plots\r\n \r\n colours = ['#176ba0', '#af4bce', 'orangered', '#48a11b', '#3caea3'] #'#af4bce'\r\n common_legend = ['400 Averaging Steps', '800 Averaging Steps', '2000 Averaging Steps']\r\n \r\n if num_col==2 and num_row==3: # This will work when using the original axes dimensions (3 rows, 2 columns)\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 1]),\r\n ('dQ/dV', axes[1, 0]),\r\n ('dH/dx', axes[1, 1]),\r\n ('S', axes[2, 0]),\r\n ('d/dx(dH/dx)', axes[2, 1])\r\n ])\r\n else: # If axes dimensions are different, I'm probably trying to plot one graph\r\n \"\"\"\r\n If plotting more than one graph, the position on the plot in the subplot can be adjusted\r\n by appropriately altering the axes[] parameter. For the graphs that are not being plotted, \r\n leave their position as axes[0, 0].\r\n \"\"\"\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 0]),\r\n ('dQ/dV', axes[0, 0]),\r\n ('dH/dx', axes[0, 0]),\r\n ('S', axes[0, 0]),\r\n ('d/dx(dH/dx)', axes[0, 0])\r\n ])\r\n \r\n # Plots all of the experimental data\r\n if experimental_plot == True:\r\n if pick_plot['voltage'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['voltage'], x='x_new', y='OCV')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['voltage'], x='x', y='OCV')\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dS/dx'], x='x_new', y='Entropy dS/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dS/dx'], x='x', y='Entropy dS/dx')\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dQ/dV'], x='OCV', y='dQdV') \r\n \r\n if pick_plot['dH/dx'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dH/dx'], x='x_new', y='Enthalpy dH/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dH/dx'], x='x', y='Enthalpy dH/dx')\r\n \r\n if pick_plot['S'] == True:\r\n ax5 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['S'], x='x_new', y='Entropy')\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['d/dx(dH/dx)'], x='x_new', y='secder enthalpy x theo')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['d/dx(dH/dx)'], x='x', y='secder enthalpy x')\r\n\r\n # Iterate through all the data to be plotted\r\n if simulation_plot == True:\r\n for count, df in enumerate(dataframes):\r\n df1 = pd.read_csv(path + df) # reads file into a dataframe.\r\n \r\n df1 = df1.replace(0, np.nan).dropna(axis=0, how='all') # For the rows with all '0' entries they are replaced with 'nan' and then these rows are dropped.\r\n df1 = df1.replace(np.nan, 0) # As some legitimate 0 entries such as 0 volts we flip back the remaining from 'nan' to 0.\r\n \r\n # Integrates the p.m. entropy\r\n entropy_list = integrate.cumtrapz(df1['Partial molar entropy'], df1['Total mole fraction'],\r\n initial=0) # Contains the entropy values\r\n df1['Entropy'] = entropy_list\r\n \r\n # Rescale voltage profile and p.m. enthalpy by the chain rule.\r\n df1[\"adjusted voltage\"] = df1[\"Chemical potential\"] * ratio_of_capacities\r\n df1[\"adjusted enthalpy\"] = df1[\"Partial molar enthalpy\"] * ratio_of_capacities\r\n df1[\"adjusted entropy\"] = df1[\"Partial molar entropy\"] * ratio_of_capacities\r\n df1[\"adjusted dq/de\"] = df1[\"dq/de\"] * (1/ratio_of_capacities)**2\r\n \r\n # Differentiate the p.m. enthalpy to get the second derivative.\r\n pm_enthalpy = np.array(df1['adjusted enthalpy'])\r\n mole_fraction = np.array(df1['Total mole fraction'])\r\n secder_enthalpy = np.gradient(pm_enthalpy, mole_fraction)\r\n df1['secder enthalpy'] = secder_enthalpy\r\n \r\n if pick_plot['voltage'] == True:\r\n ax1 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['voltage'], x='Total mole fraction', y='adjusted voltage')\r\n ax1.set_xlim([0, 1])\r\n ax1.set_xlabel('Na content $[x]$')\r\n ax1.set_ylabel('Voltage $[V]$')\r\n ax1.legend(common_legend) \r\n # ax1.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dS/dx'], x='Total mole fraction', y='adjusted entropy')\r\n # ax2.plot(x_pos, y_pos, linewidth=lw, color='red') # Plots the ideal p.m. entropy\r\n ax2.set_xlim([0, 1])\r\n ax2.set_xlabel('Na content $[x]$')\r\n ax2.set_ylabel('$\\\\frac{dS}{dx}$ $[eV K/site]$')\r\n ax2.legend(common_legend) \r\n # ax2.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data', 'Analytical solution'])\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n ax3 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dQ/dV'], x='Chemical potential', y='adjusted dq/de') \r\n ax3.set_xlim([-0.1, 1])\r\n ax3.set_xlabel('Voltage $[V]$')\r\n ax3.set_ylabel('$\\\\frac{dQ}{dV}$ [$\\mathregular{eV^{-1}}$]')\r\n ax3.legend(common_legend)\r\n # ax3.legend(['Experimental data', 'Monte Carlo Data'])\r\n \r\n if pick_plot['dH/dx'] == True:\r\n ax4 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dH/dx'], x='Total mole fraction', y='adjusted enthalpy')\r\n ax4.set_xlim([0, 1])\r\n ax4.set_xlabel('Na content $[x]$')\r\n ax4.set_ylabel('$\\\\frac{dH}{dx}$ $[eV/site]$')\r\n ax4.legend(common_legend) \r\n # ax4.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n ax5 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['d/dx(dH/dx)'], x='Total mole fraction', y='secder enthalpy')\r\n ax5.set_xlim([0, 1])\r\n ax5.set_ylim([0, 6])\r\n ax5.set_xlabel('Na content $[x]$')\r\n ax5.set_ylabel('$\\\\frac{d^2H}{dx^2}$ $[eV/site]$')\r\n ax5.legend(common_legend)\r\n \r\n # ax5.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['S'] == True:\r\n ax6 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['S'], x='Total mole fraction', y='Entropy')\r\n \r\n # ax6.plot(s_x, s_y, linewidth=lw, color='red') # Plots the entropy for l=0.32...\r\n # ax6.plot(x_ent, y_ent, linewidth=lw, color='grey') # Plots the entropy for solid state solution.\r\n ax6.set_xlim([0, 1])\r\n ax6.set_xlabel('Na content $[x]$')\r\n ax6.set_ylabel('S $[eV K/site]$')\r\n ax6.legend(common_legend)\r\n # ax6.legend(['Experimental data', 'Monte Carlo data', 'Analytical solution', 'Solid state solution'], loc='upper right', bbox_to_anchor=(0.75, 0.5))\r\n \r\n \r\n\r\n # parameter_file = open(path + \"/Input_arguments_\" + uid + \".txt\", \"w\")\r\n # parameter_file.write(str(self.args))\r\n # parameter_file.close()\r\n\r\n # manager = plt.get_current_fig_manager()\r\n # # manager.resize(*manager.window.maxsize())\r\n # # fig_path = cwd + \"/Na_plot_results.png\"\r\n # # plt.savefig(path + \"/Na_monte_carlo_plot_\" + uid + \".png\")\r\n # plt.show()\r\n \r\n plt.savefig(\"Varying sps Overlaid Plots - dQ_dV\", dpi = 300)\r\n\r\n plt.show()",
"def plt_spec_lines():\n\n for i in range(0, Molecule.species_count):\n mid_line = (Molecule.right_endpt[i] + Molecule.left_endpt[i]) / 2\n shift1 = Molecule.energy[i] - PlotParameter.energy_vshift\n shift2 = Molecule.energy[i] + PlotParameter.name_vshift\n\n en = '{0:5.2f}'.format(Molecule.energy[i])\n\n plt.plot([Molecule.left_endpt[i], Molecule.right_endpt[i]], [Molecule.energy[i], Molecule.energy[i]],\n color=PlotParameter.species_line_color, lw=PlotParameter.species_line_width, linestyle='-')\n plt.text(mid_line, shift1, en, weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.energy_font_size, color='black')\n plt.text(mid_line, shift2, Molecule.name[i], weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.name_font_size, color='black')",
"def test_line_plot(self):\n clf()\n filename = 'lines_plot.png'\n N = 10\n lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])\n ax = lines.plot()\n self._compare_images(ax=ax, filename=filename)",
"def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax",
"def linePlot(self):\n clf()\n plot(self.x,self.averages)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('line.png')",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return",
"def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)",
"def main():\n colors = {\n 0: 'w',\n 1: 'g',\n 2: 'r',\n 3: 'c',\n 4: 'm',\n 5: 'y',\n 6: 'k',\n 7: 'b',\n UNKNOWN_EMOTION: '0.1'\n }\n\n plot_data = { emotion: ([], []) for emotion in EMOTIONS }\n\n subjects = get_subjects()\n for subject in subjects:\n image_sequences = get_image_sequences(subject)\n for image_sequence in image_sequences:\n emotion = read_emotion(subject, image_sequence)\n X, Y = read_peak_landmarks(subject, image_sequence)\n\n plot_data[emotion][0].append(X)\n plot_data[emotion][1].append(Y)\n\n for emotion in EMOTIONS:\n if emotion == UNKNOWN_EMOTION or len(plot_data[emotion][0]) == 0:\n continue\n\n X = np.concatenate(plot_data[emotion][0])\n Y = np.concatenate(plot_data[emotion][1])\n plt.scatter(X, Y, color=colors[emotion], alpha=0.5, s=20, lw=0, label=EMOTIONS[emotion])\n\n plt.xlabel('X pixel position of landmark.')\n plt.ylabel('Y pixel position of landmark.')\n plt.legend()\n plt.grid(True)\n plt.show()",
"def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin",
"def plot_endpoints( polylines, mymap ):\n map( \\\n lambda start : mymap.addpoint( start[-1][0], start[-1][1], \"#0000FF\") if start != [] else [],\n polylines)",
"def plotFeatures(self):\n fl=np.array(self.xp)*0.0+0.25*self.farr.max()\n self.splines=self.axes.plot(self.xp, fl , ls='', marker='|', ms=20, color='#00FF00')\n #set up the text position\n tsize=0.83\n self.ymin, self.ymax = self.axes.get_ylim()\n ppp=(self.ymax-self.ymin)/(self.arcfigure.figure.get_figheight()*self.arcfigure.figure.get_dpi())\n f=self.ymax-10*tsize*ppp\n for x,w in zip(self.xp, self.wp):\n w='%6.2f' % float(w)\n self.axes.text(x, f, w, size='small', rotation='vertical', color='#00FF00')",
"def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)",
"def plt_connecting_lines():\n\n for i in range(0, Molecule.connection_count):\n tmp1 = Molecule.right_endpt[Molecule.left_connection[i] - 1]\n tmp2 = Molecule.left_endpt[Molecule.right_connection[i] - 1]\n tmp3 = Molecule.energy[Molecule.left_connection[i] - 1]\n tmp4 = Molecule.energy[Molecule.right_connection[i] - 1]\n\n plt.plot([tmp1, tmp2], [tmp3, tmp4], color=PlotParameter.connection_line_color,\n lw=PlotParameter.connection_line_width, linestyle='--')\n\n return None",
"def draw_edges(self):\n pass",
"def plot_graph(self) -> None:",
"def make_line_plot(the_sets, params):\n bed_filename = params['bed_filename']\n\n coords = get_read_coordinates(bed_filename, normalize=True)\n gapset, spanset, preset, postset = the_sets\n colors = node_set_colors(coords.keys(), gapset, spanset, preset, postset)\n\n y_increment = (1. / float(len(coords)))\n y_values = [float(i) * y_increment for i in range(0, len(coords))]\n for i, (coord, y_value) in enumerate(zip(coords.values(), y_values)):\n plt.plot(list(coord), [y_value, y_value], color=colors[i], linestyle='-', linewidth=1.5)\n plt.axis('off')\n plt.title(\"IGV style line plot\")",
"def _draw_ephemeris_info(self) -> None:\n\n ephemerides = self.ephemerides\n basic_annotations = self.basic_annotations\n center_ra, center_dec = EphemerisService.center_position(ephemerides)\n\n # is the target moving much?\n ra_min = min(ephemerides, key=lambda e: e.ra).ra\n ra_max = max(ephemerides, key=lambda e: e.ra).ra\n dec_min = min(ephemerides, key=lambda e: e.dec).dec\n dec_max = max(ephemerides, key=lambda e: e.dec).dec\n\n ra_width = ra_max - ra_min\n dec_width = dec_max - dec_min\n if (\n ra_width > FinderChart.MINIMUM_PATH_BOX_WIDTH\n or dec_width > FinderChart.MINIMUM_PATH_BOX_WIDTH\n ):\n significant_movement = True\n else:\n significant_movement = False\n\n # we have to convert angles to floats, as the vstack function does not accept\n # Quantity values\n right_ascensions = [e.ra for e in ephemerides]\n declinations = [e.dec for e in ephemerides]\n epochs = [e.epoch for e in ephemerides]\n start_time = epochs[0]\n end_time = epochs[-1]\n\n dra_start_to_end = right_ascensions[-1] - right_ascensions[0]\n ddec_start_to_end = declinations[-1] - declinations[0]\n\n # plot the target's path\n if significant_movement and not basic_annotations:\n # we have to convert angles to floats, as the vstack function does not accept\n # Quantity values\n right_ascensions_deg = [e.ra.to_value(u.deg) for e in ephemerides]\n declinations_deg = [e.dec.to_value(u.deg) for e in ephemerides]\n lv = np.vstack([right_ascensions_deg, declinations_deg])\n self.plot.show_lines(\n [lv], layer=\"object_path_lines\", color=\"b\", linewidth=1, alpha=1\n )\n\n # direction at the start and end\n ddec_start = declinations[1] - declinations[0]\n dra_end = right_ascensions[-1] - right_ascensions[-2]\n ddec_end = declinations[-1] - declinations[-2]\n\n if not basic_annotations:\n if significant_movement:\n # plot the arrow at the end time to show the direction\n self._draw_arrow_head(\n right_ascensions[-1], declinations[-1], dra_end, ddec_end\n )\n else:\n ra_correction = abs(np.cos(center_dec))\n v_x, v_y = dra_start_to_end * ra_correction, ddec_start_to_end\n length = np.sqrt(v_x ** 2 + v_y ** 2)\n v_x, v_y = (\n v_x.to_value(u.deg) / length.to_value(u.deg),\n v_y.to_value(u.deg) / length.to_value(u.deg),\n )\n self._draw_arrow_head(\n center_ra + 0.0013 * u.deg * v_x / ra_correction,\n center_dec + 0.0013 * u.deg * v_y,\n dra_start_to_end,\n ddec_start_to_end,\n )\n\n # the labels shouldn't overlap with the path\n abs_vertical_shift = 0.002 * u.deg\n if significant_movement:\n label_position_start = {\n \"horizontal_alignment\": \"center\",\n \"horizontal_position\": right_ascensions[0],\n \"vertical_alignment\": \"top\" if ddec_start > 0 else \"bottom\",\n \"vertical_position\": declinations[0],\n \"vertical_shift\": (-1 if ddec_start > 0 else 1)\n * abs_vertical_shift,\n }\n label_position_end = {\n \"horizontal_alignment\": \"center\",\n \"horizontal_position\": right_ascensions[-1],\n \"vertical_alignment\": \"bottom\" if ddec_start > 0 else \"top\",\n \"vertical_position\": declinations[-1],\n \"vertical_shift\": (1 if ddec_end > 0 else -1) * abs_vertical_shift,\n }\n else:\n radius = 0.5 * FinderChart.MINIMUM_PATH_BOX_WIDTH\n abs_vertical_position_offset = radius\n label_position_start = {\n \"horizontal_alignment\": \"center\",\n \"horizontal_position\": center_ra,\n \"vertical_alignment\": \"top\" if ddec_start_to_end > 0 else \"bottom\",\n \"vertical_position\": center_dec\n + (-1 if ddec_start_to_end > 0 else 1)\n * abs_vertical_position_offset,\n \"vertical_shift\": (-1 if ddec_start_to_end > 0 else 1)\n * abs_vertical_shift,\n }\n label_position_end = {\n \"horizontal_alignment\": \"center\",\n \"horizontal_position\": center_ra,\n \"vertical_alignment\": \"bottom\" if ddec_start_to_end > 0 else \"top\",\n \"vertical_position\": center_dec\n + (1 if ddec_start_to_end > 0 else -1)\n * abs_vertical_position_offset,\n \"vertical_shift\": (1 if ddec_start_to_end > 0 else -1)\n * abs_vertical_shift,\n }\n\n # add the start time label\n self.draw_label(\n label_position_start[\"horizontal_position\"],\n label_position_start[\"vertical_position\"]\n + label_position_start[\"vertical_shift\"],\n start_time.strftime(\"%Y-%m-%d %H:%M UT\"),\n size=\"8\",\n horizontalalignment=label_position_start[\"horizontal_alignment\"],\n verticalalignment=label_position_start[\"vertical_alignment\"],\n color=(0, 0, 1),\n )\n\n # add the end time label\n self.draw_label(\n label_position_end[\"horizontal_position\"],\n label_position_end[\"vertical_position\"]\n + label_position_end[\"vertical_shift\"],\n end_time.strftime(\"%Y-%m-%d %H:%M UT\"),\n size=\"8\",\n horizontalalignment=label_position_end[\"horizontal_alignment\"],\n verticalalignment=label_position_end[\"vertical_alignment\"],\n color=(0, 0, 1),\n )\n\n # add a \"target circle\" if the movement isn't significant\n if not significant_movement:\n self.draw_circle(\n center_ra, center_dec, FinderChart.MINIMUM_PATH_BOX_WIDTH / 2.0, \"b\"\n )\n else:\n # output the time range\n self.draw_label(\n center_ra,\n center_dec - 4 * u.arcmin,\n start_time.strftime(\"%Y-%m-%d %H:%M UT\")\n + \" - \"\n + end_time.strftime(\"%Y-%m-%d %H:%M UT\"),\n size=\"large\",\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n color=(0, 0.5, 1),\n )",
"def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])",
"def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))"
]
| [
"0.65509456",
"0.65389705",
"0.65010214",
"0.64777714",
"0.6332598",
"0.63312864",
"0.6290788",
"0.6257814",
"0.61941946",
"0.61722386",
"0.61518055",
"0.6132958",
"0.6102459",
"0.6102354",
"0.6092172",
"0.60891706",
"0.6056098",
"0.60538334",
"0.6052147",
"0.6045826",
"0.60191655",
"0.6006337",
"0.60010976",
"0.5979625",
"0.59661853",
"0.59615034",
"0.59600174",
"0.59524304",
"0.59360725",
"0.5935583"
]
| 0.6725889 | 0 |
Evaluates the baseline predictor. | def evalBaseline(self, df = None):
if (df is None):
self.r_b = self.df.merge(self.df_user[["user ind", "b_u"]], on = "user ind")
self.r_b = self.r_b.merge(self.df_item[["item ind", "b_i"]], on = "item ind")
self.r_b["baseline"] = self.r_mean + self.r_b["b_u"] + self.r_b["b_i"]
return self.r_b[["user id", "item id", "baseline"]]
else:
df = df.merge(self.df_user, on = "user id").merge(self.df_item, on = "item id")
df["baseline"] = self.r_mean + df["b_u"] + df["b_i"]
# clip the score to the interval [1, 5]
df["baseline"] = np.minimum(np.maximum(df["baseline"], 1), 5)
return df[["user id", "item id", "baseline"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_prediction(self):\n\n # ratio_train = self.evaluate_data(self.train_x, self.train_y)\n ratio_test = self.evaluate_data(self.test_x, self.test_y)\n\n print(\"\\n*NAIVE BAYES:\")\n # print(\"Test1: {}%\".format(ratio_dev*100))\n print(\"Test: {} %\".format(ratio_test*100))",
"def baseline(dataset: Dataset, conf: Namespace) -> np.ndarray:\n\n print(\"Calibrating global scores...\")\n # Copy dataset dataframe and set up the score and test columns\n df = dataset.df.copy()\n df['test'] = df['fold'] == dataset.fold\n df['score'] = df[dataset.feature]\n\n # Extract score and ground truth of train set\n score = df[ df['test'] == False ][ 'score' ].to_numpy()\n ground_truth = df[ df['test'] == False ][ 'same' ].to_numpy(dtype=int)\n\n # Set up calibrator on train set\n calibrator = BetaCalibration(scores=score,\n ground_truth=ground_truth,\n score_min=-1,\n score_max=1,\n )\n\n # Run calibrator on all data\n return calibrator.predict(df['score'])",
"def evaluate(self):\n RV = -self.predict()\n RV += self.Ystar()\n return RV",
"def evaluate(self):\n # define start index test set\n start_test_set = int(len(self.X) * 2 / 3)\n\n # Different methods for cummulativa vs day-ahead forecasting\n if self.forecast_horizon == 1:\n # In sample\n lin_residuals_in_sample = self.y - (self.betas[0] + np.dot(self.X, self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(self.y)\n\n # Out of sample\n # Calculate MSE of wls-ev prediction\n self.mse_wlsev = np.mean((self.y[start_test_set:] - self.ols_predict()) ** 2)\n # Calculate MSE of benchmark prediction\n self.mse_benchmark = np.mean((self.y[start_test_set:] - self.benchmark_predict()) ** 2)\n else:\n # In Sample with betas estimated on full time series\n lin_residuals_in_sample = rolling_sum(self.y, self.forecast_horizon) - (\n self.betas[0] + np.dot(self.X[:-(self.forecast_horizon-1)], self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(rolling_sum(self.y, self.forecast_horizon))\n\n # Out of sample\n # calculate realized cummulative returns over forecast horizon sequences\n cum_rets_realized = rolling_sum(self.y[start_test_set:], self.forecast_horizon)\n # Calculate MSE of wls-ev prediction, only where realized values are available\n self.mse_wlsev = np.mean((cum_rets_realized - self.ols_predict()[:-(self.forecast_horizon-1)]) ** 2)\n # Calculate MSE of benchmark prediction, only where realized values are available\n self.mse_benchmark = np.mean(\n (cum_rets_realized - self.benchmark_predict()[:-(self.forecast_horizon-1)]) ** 2)\n\n # Calculate out of sample r-squared\n self.oos_r_squared = 1 - (self.mse_wlsev / self.mse_benchmark)\n # Calculate in sample r-squared\n self.in_sample_r_squared = 1.0 - (self.rmse_in_sample ** 2) / self.var_in_sample",
"def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score",
"def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)",
"def baseline(x_data, y_data, stra = \"uniform\"):\r\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\r\n dummy = DummyClassifier(strategy= stra)\r\n dummy.fit(x_train, y_train)\r\n y_pred = dummy.predict(x_test)\r\n accu = accuracy_score(y_test, y_pred)\r\n return accu",
"def baseline(self):\n return self.data[self.data['treatment'] == 'Baseline']",
"def baseline_test(n_iter, input_dim, X_train, y_train, X_valid, y_valid, X_test, y_test, scaler):\n \n baseline_MSE=[]\n baseline_MAE=[]\n \n \n for i in range(n_iter):\n model = build_baseline_model(input_dim)\n # train the network (i.e., no hyperparameter tuning)\n print(\"[INFO] training model...\")\n H = model.fit(x=X_train, y=y_train,\n validation_data=(X_valid, y_valid),\n batch_size=128,\n epochs=100, verbose=0, shuffle=False)\n # mke predictions on the test set and evaluate it\n \n \n baseline_pred = model.predict(X_test)\n baseline_pred_rescaled = scaler.inverse_transform(baseline_pred)\n \n \n #plot_comparison_graph(y_test, baseline_pred_rescaled)\n #plot_loss(H)\n \n measures = calculate_error_measures(y_test, baseline_pred_rescaled)\n baseline_MSE.append(measures[0])\n baseline_MAE.append(measures[1])\n \n K.clear_session()\n \n \n\n return sum(baseline_MSE)/n_iter, sum(baseline_MAE)/n_iter",
"def supervised_baseline(self, supervised_decoder):\r\n supervised_decoder_pred, _, baseline_reward = sm.validationMetricPerformance(\r\n input_pairs=[(self.source_sentence, self.target_sentence)], encoder=self.supervised_encoder,\r\n decoder=supervised_decoder, similarity_model=self.similarity_model, fluency_model=self.fluency_model,\r\n ESIM_model=self.ESIM_model, logr_model=self.logr_model, std_scaler=self.std_scaler,\r\n similarity_dist=self.similarity_dist, fluency_dist=self.fluency_dist, ESIM_dist=self.ESIM_dist,\r\n vocab_index=vocab_index, verbose=False, metric=self.reward_function)\r\n \r\n supervised_decoder_pred = supervised_decoder_pred[0][1]\r\n \r\n return supervised_decoder_pred, np.around(baseline_reward, 3)",
"def baseline(self) -> List[PredictionsDatapoints]:\n return self._baseline",
"def baseline(*args):\n XTrain, XTest, yTrain, yTest = args\n clf = DecisionTreeClassifier(random_state=42)\n clf.fit(XTrain, yTrain)\n return clf.score(XTest, yTest), clf.feature_importances_",
"def _evaluate(self, y_true, y_pred):\n pass",
"def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df",
"def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)",
"def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics",
"def evaluate(self, prediction_fn):\n pass",
"def baseline_predictor(self, user, movie):\n return self.user_biases[user] + self.movie_biases[movie] + self.global_mean",
"def predict_and_eval_in_val(self, sess, tst_reader, metrics):\n raise NotImplementedError(\"\"\"please customize predict_and_eval_in_val\"\"\")",
"def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics",
"def mean_baseline(self):\n train_mean = np.mean(self.data.loc[self.train_index, self.target_name])\n rmse = np.sqrt(\n np.mean(np.square(self.data.loc[self.test_index, self.target_name] - train_mean)))\n print 'mean baseline RMSE: {}'.format(rmse)",
"def baseline(data):\n weights = weighting(data)\n return np.inner(weights,data['clicks'])/weights.sum()",
"def eval(self):\n target_truth_labels = self.get_target_labels()\n for key in self.id_uncertainty_measures.keys():\n # deep copy needed as we mutate confidence values later on\n decision_fn_value = np.concatenate((copy.deepcopy(self.id_uncertainty_measures[key]),\n copy.deepcopy(self.ood_uncertainty_measures[key])),\n axis=0)\n # negation needed for confidence, as confidence is indicator of label=0 samples\n # i.e for correct classified samples.\n # But we need scores for label=1 samples i.e misclassified samples\n # to be higher, so we negate.\n if key == UncertaintyMeasuresEnum.CONFIDENCE or key == UncertaintyMeasuresEnum.PRECISION:\n decision_fn_value *= -1.0\n\n aupr, auroc = ClassifierPredictionEvaluator.compute_pr_roc_curves(\n decision_fn_value, target_truth_labels, self.result_dir, key._value_)\n\n with open(os.path.join(self.result_dir, 'results.txt'), 'a') as f:\n f.write('AUPR using ' + key._value_ + \": \" +\n str(np.round(aupr * 100.0, 1)) + '\\n')\n f.write('AUROC using ' + key._value_ + \": \" +\n str(np.round(auroc * 100.0, 1)) + '\\n')",
"def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:",
"def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error",
"def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)",
"def baseline(self, baseline: List[PredictionsDatapoints]):\n\n self._baseline = baseline",
"def predict(self) :\n y_pred = np.dot(self.W.T,self.X_test) + self.b \n if self.thr!=-1 :\n y_pred[y_pred <= self.thr] = -1\n y_pred[y_pred > self.thr] = 1\n y_pred = y_pred.astype(\"int\")\n corr = 0\n for i in range(y_pred.shape[1]) :\n if y_pred[:,i]==self.y_test[:,i] :\n corr += 1\n accu = (corr / y_pred.shape[1])*100\n print(\"ACCURACY : {}\".format(accu))\n else :\n rmse = np.sqrt(np.sum(np.square(self.y_test - y_pred)) / y_pred.shape[1])\n print(\"RMSE : {}\".format(rmse))",
"def test_calc_baseline_error_to_observed_error(self):\r\n exp_ratio = calc_baseline_error_to_observed_error(\r\n self.baseline_error_input,\r\n self.obs_error_input)\r\n self.assertEqual(self.ratio_result, exp_ratio)",
"def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])"
]
| [
"0.6852386",
"0.6681168",
"0.66578424",
"0.6598341",
"0.6555525",
"0.65538764",
"0.65262705",
"0.65068984",
"0.65044415",
"0.64675784",
"0.6452903",
"0.63666344",
"0.6350837",
"0.6343575",
"0.6316385",
"0.630979",
"0.62960446",
"0.6286752",
"0.62639403",
"0.625002",
"0.62463796",
"0.6227708",
"0.6222456",
"0.622223",
"0.6217671",
"0.6216862",
"0.62109715",
"0.6202867",
"0.6171752",
"0.61443704"
]
| 0.7059672 | 0 |
Estimates model coefficients from calculated shifts. | def initialize_model(self, positions, shifts_y, shifts_x):
shifts_y = list(map(lambda x: x*-1, shifts_y))
shifts_x = list(map(lambda x: x*-1, shifts_x))
def list_shift(pos, c):
return np.array([DeformationModel.calculate_shifts_from_coeffs(p[0],
p[1], p[2], c) for p in pos])
def residuals(c, shift, pos):
return shift - list_shift(pos, c)
c0y = [1] * 9
res_y = optimize.leastsq(residuals, c0y, args=(shifts_y, positions))[0]
c0x = [1] * 9
res_x = optimize.leastsq(residuals, c0x, args=(shifts_x, positions))[0]
result = np.concatenate((res_y, res_x), axis=0).reshape(2, 9)
self.coeffs = result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coefficients(self) :\n raise NotImplementedError",
"def coef_(self):\n assert self.sess is not None, \"Model has not been fitted yet!\"\n return self.sess.run(self.W_lst)[0]",
"def coefficients(self):\r\n return self.coef_['x']",
"def apply_const_shift_operator(model, operator):\n for term in operator:\n dif = take_derivative_shift_op(model, term)\n try:\n total += dif\n except NameError:\n total = dif\n return total",
"def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition",
"def getModel(self):\n m,n = self.m,self.n\n A = np.zeros((m,m))\n B = np.zeros((m,n))\n C = np.zeros(m)\n Apattern,Bpattern,Cpattern = self.coeffPattern\n for i,e in enumerate(self.estimators):\n aofs = 0\n bofs = m\n cofs = m+n\n if Apattern==None:\n ai = e.x[aofs:m+aofs]\n else:\n bofs=aofs\n ai = []\n for j,pj in enumerate(Apattern[i]):\n if pj == None:\n ai.append(e.x[bofs])\n bofs += 1\n else:\n ai.append(pj)\n if Bpattern==None:\n bi = e.x[bofs:n+bofs]\n else:\n cofs=bofs\n bi = []\n for j,pj in enumerate(Bpattern[i]):\n if pj == None:\n bi.append(e.x[cofs])\n cofs += 1\n else:\n bi.append(pj)\n if Cpattern==None:\n ci = e.x[cofs]\n cofs+=1\n else:\n if Cpattern[i] == None:\n ci = e.x[cofs]\n cofs+=1\n else:\n ci = Cpattern[i]\n assert(cofs == e.n)\n assert len(ai)==m\n assert len(bi)==n\n A[i,:] = ai\n B[i,:] = bi\n C[i] = ci\n return (A,B,C)",
"def set_coefficients(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n\r\n # select file names according to airfoil section:\r\n if cls.SEC == 1:\r\n clfile, cdfile = 'naca0012cl.csv', 'naca0012cd.csv'\r\n elif cls.SEC == 2:\r\n clfile, cdfile = 'naca0015cl.csv', 'naca0015cd.csv'\r\n elif cls.SEC == 3:\r\n clfile, cdfile = 'naca0018cl.csv', 'naca0018cd.csv'\r\n elif cls.SEC == 4:\r\n clfile, cdfile = 'naca0021cl.csv', 'naca0021cd.csv'\r\n elif cls.SEC == 5:\r\n clfile, cdfile = 'du06w200cl.csv', 'du06w200cd.csv'\r\n else:\r\n raise Exception('Input error: invalid airfoil section number!')\r\n \r\n # load arrays of coefficients:\r\n CL = np.loadtxt(clfile, delimiter=',')\r\n CD = np.loadtxt(cdfile, delimiter=',')\r\n\r\n # angle of attack and reynolds tables:\r\n if cls.SEC != 5:\r\n AA = np.loadtxt('nacaaa.csv', unpack=True)\r\n RE = np.loadtxt('nacare.csv', unpack=True)\r\n else:\r\n AA = np.loadtxt('du06w200aa.csv', unpack=True)\r\n RE = np.loadtxt('du06w200re.csv', unpack=True) \r\n \r\n # create functions for lift and drag coefficients:\r\n fCL = interp2d(RE, AA, CL, kind='cubic')\r\n fCD = interp2d(RE, AA, CD, kind='cubic')\r\n \r\n # vectorize lift and drag functions:\r\n cls.v_fCL, cls.v_fCD = np.vectorize(fCL), np.vectorize(fCD)",
"def coefficients(self) :\n return self.__coefficients",
"def _set_coefficients(self, user_defined_coefficients=None):\n # Check to ensure that if there any NaNs, a different basis must be used and solver must be changed\n # to least squares!\n if user_defined_coefficients is not None:\n self.coefficients = user_defined_coefficients\n return\n indices_with_nans = np.argwhere(np.isnan(self._model_evaluations))[:,0]\n if len(indices_with_nans) != 0:\n print('WARNING: One or more of your model evaluations have resulted in an NaN. We found '+str(len(indices_with_nans))+' NaNs out of '+str(len(self._model_evaluations))+'.')\n print('The code will now use a least-squares technique that will ignore input-output pairs of your model that have NaNs. This will likely compromise computed statistics.')\n self.inputs = np.delete(self._quadrature_points, indices_with_nans, axis=0)\n self.outputs = np.delete(self._model_evaluations, indices_with_nans, axis=0)\n self.subsampling_algorithm_name = None\n number_of_basis_to_prune_down = self.basis.cardinality - len(self.outputs)\n if number_of_basis_to_prune_down > 0:\n self.basis.prune(number_of_basis_to_prune_down + self.dimensions) # To make it an over-determined system!\n self.method = 'least-squares'\n self.mesh = 'user-defined'\n self._set_solver()\n self._set_points_and_weights()\n self.set_model()\n if self.mesh == 'sparse-grid':\n counter = 0\n multi_index = []\n coefficients = np.empty([1])\n multindices = np.empty([1, self.dimensions])\n for tensor in self.quadrature.list:\n P = self.get_poly(tensor.points, tensor.basis.elements)\n W = np.diag(np.sqrt(tensor.weights))\n A = np.dot(W , P.T)\n _, _ , counts = np.unique( np.vstack( [tensor.points, self._quadrature_points]), axis=0, return_index=True, return_counts=True)\n indices = [i for i in range(0, len(counts)) if counts[i] == 2]\n b = np.dot(W , self._model_evaluations[indices])\n del counts, indices\n coefficients_i = self.solver.get_coefficients(A, b) * self.quadrature.sparse_weights[counter]\n multindices_i = tensor.basis.elements\n coefficients = np.vstack([coefficients_i, coefficients])\n multindices = np.vstack([multindices_i, multindices])\n counter = counter + 1\n multindices = np.delete(multindices, multindices.shape[0]-1, 0)\n coefficients = np.delete(coefficients, coefficients.shape[0]-1)\n unique_indices, indices , counts = np.unique(multindices, axis=0, return_index=True, return_counts=True)\n coefficients_final = np.zeros((unique_indices.shape[0], 1))\n for i in range(0, unique_indices.shape[0]):\n for j in range(0, multindices.shape[0]):\n if np.array_equiv( unique_indices[i,:] , multindices[j,:]):\n coefficients_final[i] = coefficients_final[i] + coefficients[j]\n self.coefficients = coefficients_final\n self.basis.elements = unique_indices\n else:\n P = self.get_poly(self._quadrature_points)\n W = np.diag(np.sqrt(self._quadrature_weights))\n A = np.dot(W , P.T)\n b = np.dot(W , self._model_evaluations)\n if self.gradient_flag:\n # Now, we can reduce the number of rows!\n dP = self.get_poly_grad(self._quadrature_points)\n C = cell2matrix(dP, W)\n G = np.vstack([A, C])\n r = np.linalg.matrix_rank(G)\n m, n = A. shape\n print('Gradient computation: The rank of the stacked matrix is '+str(r)+'.')\n print('The number of unknown basis terms is '+str(n))\n if n > r:\n print('WARNING: Please increase the number of samples; one way to do this would be to increase the sampling-ratio.')\n self.coefficients = self.solver.get_coefficients(A, b, C, self._gradient_evaluations)\n else:\n self.coefficients = self.solver.get_coefficients(A, b)",
"def forc_model(self):\n lag1_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag1_by_store')\n lag2_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag2_by_store')\n for add in self.X.address1.unique():\n add_mask = self.X.address1 == add\n foo = self.X[ add_mask ].sort_values('visit_date', ascending=False)\n top_index = foo.index[0]\n clust = int(foo.cluster.values[0])\n # get values from last visit for store\n base_input = foo[self.model_mask_cols].values[0]\n base_actual = self.y[top_index]\n lag2_val = base_input[lag1_loc]\n lag1_val = base_actual\n\n for i in range(1, self.num_periods + 1):\n model = self.model_list[clust]\n inputs = base_input\n inputs[lag1_loc] = lag1_val\n inputs[lag2_loc] = lag2_val\n \n pred = model.predict(inputs.reshape(1, -1))\n self._update_cust_table(add, i, pred)\n \n lag2_val = lag1_val\n lag1_val = pred",
"def get_coefficients(self):\n return self.coefficients",
"def get_coefficients(self):\n return self.coefficients",
"def fit(self):\n\t\tfor i in range(self.n_iter):\n\t\t\tself.weights = self.weights - (self.learning_rate / self.n_samples) \\\n\t\t\t\t* self.x_data.T @ (self.x_data @ self.weights - self.y_data)\n\n\t\tself.intercept_ = self.weights[0]\n\t\tself.coef_ = self.weights[1:]\n\n\t\treturn self",
"def reconstruct(self, coefs, *args):\n return self._waverec(\n wt.unravel_coeffs(coefs, *args, output_format=self._of), self.wname\n )",
"def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))",
"def calculate_shift(self, y, x, t, axis):\n return DeformationModel.calculate_shifts_from_coeffs(y, x, t,\n self.coeffs[axis])",
"def coefficients(self):\n return self._coefficients",
"def coefficients(self):\n return self._coefficients",
"def calculate_coefficients(self, start, end):\n A = np.array([\n [self.deltaT**3, self.deltaT**4, self.deltaT**5],\n [3 * self.deltaT**2, 4 * self.deltaT**3, 5 * self.deltaT**4],\n [6 * self.deltaT, 12 * self.deltaT**2, 20 * self.deltaT**3],\n ])\n\n a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0\n c_0 = a_0 + a_1 * self.deltaT + a_2 * self.deltaT**2\n c_1 = a_1 + 2 * a_2 * self.deltaT\n c_2 = 2 * a_2\n\n B = np.array([\n end[0] - c_0,\n end[1] - c_1,\n end[2] - c_2\n ])\n\n a_3_4_5 = np.linalg.solve(A, B)\n coeff = np.concatenate((np.array([a_0, a_1, a_2]), a_3_4_5))\n\n return coeff",
"def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl",
"def smooth_wcoeffs(coeffs,order=1):\n from copy import deepcopy\n coeffs_new=deepcopy(coeffs)\n\n for j in range(1,order+1):\n # create a list of three zero arrays\n czero=tuple(zeros(coeffs[-j][i].shape) for i in range(3))\n # to replace detailed coeffs with zeros\n coeffs_new[-j]=czero\n\n return coeffs_new",
"def smooth_wcoeffs(coeffs,order=1):\n from copy import deepcopy\n coeffs_new=deepcopy(coeffs)\n\n for j in range(1,order+1):\n # create a list of three zero arrays\n czero=tuple(np.zeros(coeffs[-j][i].shape) for i in range(3))\n # to replace detailed coeffs with zeros\n coeffs_new[-j]=czero\n\n return coeffs_new",
"def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)",
"def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )",
"def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]",
"def _tf_model_coeffs(self):\n tf = self.tf\n with self._tf_graph.as_default():\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n return tf.get_variable(\"R\", [self.n_nodes, self.n_nodes],\n dtype=tf.float64)",
"def coefficients(self) -> np.ndarray:\n return self._coefficients",
"def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )",
"def coefficient(self) -> float:\n ...",
"def coefficients(self):\n if self._coefficients is None:\n return np.hstack([c.coefficients for c in self._traces])\n return self._coefficients"
]
| [
"0.5734117",
"0.5412557",
"0.536942",
"0.5354503",
"0.5295235",
"0.52839845",
"0.52596015",
"0.52424043",
"0.51920736",
"0.5175553",
"0.5158605",
"0.5158605",
"0.5134293",
"0.5107185",
"0.5097566",
"0.5093408",
"0.5078818",
"0.5078818",
"0.50681216",
"0.50326604",
"0.5027408",
"0.5018645",
"0.50094384",
"0.50093144",
"0.50073624",
"0.49972388",
"0.4997139",
"0.49969512",
"0.49964082",
"0.49772525"
]
| 0.693465 | 0 |
Randomly generates model with reasonable coefficients. | def initialize_model_randomly(self, shape=(2048, 2048), tn=50):
self.coeffs = self.generate_random_coeffs(shape, tn) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_model (d):\n return np.random.rand (d+1, 1)",
"def initializeWeights(mlModel):\n\n randInitRange = mlModel.randInitRange\n\n numFeatures = mlModel.features.shape[1]\n mlModel.weights = np.random.rand(numFeatures+1) * 2 * randInitRange - randInitRange\n\n return mlModel",
"def random_coefficients(self, n=3, max_range = 10):\n return np.random.uniform(-1*max_range, max_range, n)",
"def coef_random(an,bn,random_trun_start=0,random_start=1,random_end= 32, halfwidth0=1,pow=-1):\n\n an=np.asarray(an)\n bn=np.asarray(bn)\n half=halfcube(random_start,random_end,halfwidth0,pow)\n an_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n bn_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n\n an_random=np.append(np.zeros(random_trun_start-0),an_random)\n bn_random=np.append(np.zeros(random_trun_start-0),bn_random)\n\n if an.shape[0]>an_random.shape[0]:\n an_random.resize(an.shape)\n bn_random.resize(bn.shape)\n else:\n an.resize(an_random.shape)\n bn.resize(bn_random.shape)\n an_random=an+an_random\n bn_random=bn+bn_random\n\n return an_random,bn_random",
"def expected_model():\n model = cobra.Model(id_or_model=\"expected_model\", name=\"expected_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_1\n return model",
"def test_custom_models(model):\n atom = ATOMRegressor(X_reg, y_reg, random_state=1)\n atom.run(models=model, n_calls=2, n_initial_points=1)\n assert atom.rfr.fullname == \"RandomForestRegressor\"\n assert atom.rfr.estimator.get_params()[\"random_state\"] == 1",
"def random_weight_init(_p: Perceptron):\n\n _p.weights = [rd.choice([1-rd.random(), -1+rd.random()]) for _ in range(_p.input_size)]",
"def get_model():\n model = ecole.scip.Model.from_file(str(DATA_DIR / \"bppc8-02.mps\"))\n model.disable_cuts()\n model.disable_presolve()\n model.set_param(\"randomization/permuteconss\", True)\n model.set_param(\"randomization/permutevars\", True)\n model.set_param(\"randomization/permutationseed\", 784)\n model.set_param(\"randomization/randomseedshift\", 784)\n model.set_param(\"randomization/lpseed\", 784)\n return model",
"def generate_limittedmodel():\r\n print('Loading model')\r\n model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)\r\n print('Model loaded!')\r\n\r\n print('Loading dot products')\r\n dp = np.load(DP_NAME)\r\n print('Dot products loaded')\r\n\r\n print('Filtering vocab')\r\n for name, vocab in list(model.vocab.items()):\r\n if dp[vocab.index] < MAX_DEGREE:\r\n del model.vocab[name]\r\n\r\n il = list(model.vocab.items())\r\n print('Sorting vocab')\r\n il.sort(key=lambda x: x[1].index)\r\n\r\n # Find the indexes of the words that are being kept\r\n print('Generating indexes')\r\n indexes = []\r\n for i in range(0, len(il)):\r\n name, vocab = il[i]\r\n indexes.append(vocab.index)\r\n model.vocab[name].index = i\r\n\r\n print('Modifying model weights')\r\n model.syn0 = model.syn0[indexes]\r\n\r\n print('Saving file')\r\n model.save_word2vec_format(SAVE_NAME, binary=True)",
"def continuous_model():\n return {\"x\": np.random.beta(2, 5, size=100), \"y\": np.random.beta(2, 5, size=100)}",
"def random_models(batch0, source, n_models, n_epochs, ref_source, kgrid, ref_mcmc_version,\n constant=None, epoch_independent=('x', 'z', 'mass'),\n epoch_dependent=('accrate', 'qb'), epoch_chosen=None,\n scratch_file_sys=False):\n aliases = {'mass': 'm_nw', 'accrate': 'mdot'}\n if constant is None:\n constant = {'tshift': 0.0, 'acc_mult': 1.0, 'qnuc': 5.0, 'qb_delay': 0.0,\n 'accmass': 1e16, 'accdepth': 1e19}\n if epoch_chosen is None:\n epoch_chosen = {}\n\n mv = mcmc_versions.McmcVersion(source=ref_source, version=ref_mcmc_version)\n params_full = {}\n\n # ===== fill model params =====\n for key in epoch_independent:\n mv_key = aliases.get(key, key)\n params_full[key] = mcmc_tools.get_random_params(mv_key, n_models=n_models, mv=mv)\n\n # ===== fill constant params =====\n for key, val in constant.items():\n params_full[key] = np.full(n_models, val)\n\n for i in range(n_epochs):\n for key in epoch_dependent:\n if key in epoch_chosen:\n val = epoch_chosen[key][i]\n params_full[key] = np.full(n_models, val)\n else:\n mv_key = aliases.get(key, key)\n mv_key = f'{mv_key}{i+1}'\n params_full[key] = mcmc_tools.get_random_params(mv_key,\n n_models=n_models, mv=mv)\n\n create_batch(batch0+i, dv={}, params={}, source=source, nbursts=30, kgrid=kgrid,\n walltime=96, setup_test=False, nuc_heat=True,\n auto_qnuc=False, grid_version=0, substrate_off=True,\n params_full=params_full, scratch_file_sys=scratch_file_sys)",
"def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)",
"def test_1():\n constr = dict()\n constr['maxfun'] = np.random.randint(1, 5 + 1)\n\n get_random_init(constr)\n simulate('test.trempy.ini')\n estimate('test.trempy.ini')",
"def __init__(self, lm, temp=1.0):\r\n self.lm = lm\r\n self.rnd = random.Random()\r\n self.temp = temp",
"def my_model_random(point=None, size=None):\n return my_model((point[\"H\"], point[\"φ\"], point[\"K\"], point[\"ct\"], point[\"Q\"], point[\"cs\"]), x)\n # return my_model((point[\"m\"], point[\"c\"]), x)",
"def nbc_model(params):\n if (params['random']):\n params['alpha'] = random.randrange(1, 10, step=1) * 0.1\n model = MultinomialNB(\n alpha=params['alpha']\n )\n\n return model",
"def simulate_lm(\n num_obs,\n num_coef,\n coef_vals=None,\n corrs=None,\n mus=0.0,\n sigmas=1.0,\n noise_params=(0, 1),\n family=\"gaussian\",\n seed=None,\n):\n\n if seed is not None:\n np.random.seed(seed)\n\n if coef_vals is not None:\n if len(coef_vals) - num_coef == 0:\n raise ValueError(\n \"Missing one coefficient value. Did you provide a value for the intercept term?\"\n )\n else:\n assert (\n len(coef_vals) == num_coef + 1\n ), \"Number of coefficient values should be num_coef + 1 (for intercept)\"\n\n b = coef_vals\n else:\n b = np.random.rand(num_coef + 1)\n\n if isinstance(mus, list) or isinstance(mus, np.ndarray):\n assert len(mus) == len(b) - 1, \"mus must match number of num_coef\"\n if isinstance(sigmas, list) or isinstance(sigmas, np.ndarray):\n assert len(sigmas) == len(b) - 1, \"sigmas must match number of num_coef\"\n assert (\n isinstance(noise_params, tuple) and len(noise_params) == 2\n ), \"noise_params should be a tuple of (mean,std)\"\n\n # Generate random design matrix\n if corrs is not None:\n X = easy_multivariate_normal(num_obs, num_coef, corrs, mus, sigmas, seed)\n else:\n X = np.random.normal(mus, sigmas, size=(num_obs, num_coef))\n # Add intercept\n X = np.column_stack([np.ones((num_obs, 1)), X])\n # Generate data\n Y = np.dot(X, b) + np.random.normal(*noise_params, size=num_obs)\n # Apply transform if not linear model\n if family == \"binomial\":\n Y = discrete_inverse_logit(Y)\n dat = pd.DataFrame(\n np.column_stack([Y, X[:, 1:]]),\n columns=[\"DV\"] + [\"IV\" + str(elem + 1) for elem in range(X.shape[1] - 1)],\n )\n\n return dat, b",
"def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model",
"def randomize(self):\r\n # first take care of all parameters (from N(0,1))\r\n x = self._get_params_transformed()\r\n x = np.random.randn(x.size)\r\n self._set_params_transformed(x)\r\n # now draw from prior where possible\r\n x = self._get_params()\r\n if self.priors is not None:\r\n [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]\r\n self._set_params(x)\r\n self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)\r",
"def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model",
"def __init__(self, reg_penalty='l2', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Perceptron\")\n self.reg_penalty = reg_penalty\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.Perceptron(penalty=reg_penalty,\n alpha=self.reg,\n max_iter=1000,\n random_state=self.random_state)",
"def init_benchmark_model(\n input_size, hidden_size, num_classes, rand_seed=None,\n **kwargs\n):\n rs = np.random.RandomState(seed=rand_seed)\n\n model = {}\n D, H, C = input_size, hidden_size, num_classes\n model['W1'] = rs.rand(D, H)\n model['b1'] = rs.rand(H)\n model['W2'] = rs.rand(H, C)\n model['b2'] = rs.rand(C)\n\n return model",
"def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model",
"def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)",
"def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)",
"def randomize(self):\n self.weights = np.random.rand(*self.weights.shape) - 0.5",
"def __init__(self, base_model='LogisticRegression', number_model=50, \n hidden_layer_sizes=(100,), activation='relu',\n kernel='poly', degree=3, gamma='auto',\n criterion='gini', reg_penalty='l2', reg=0.001, random_state=0):\n self.number_model = number_model\n r = random_state\n # Initialise all_model list\n self.all_model = []\n for i in range(number_model):\n if base_model=='Perceptron':\n curr_model = Perceptron(reg_penalty=reg_penalty, reg=reg,\n random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='MLPerceptron':\n curr_model = MLPerceptron(hidden_layer_sizes=hidden_layer_sizes,\n activation=activation, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='LogisticRegression':\n curr_model = LogisticRegression(reg_penalty=reg_penalty,\n reg_inv=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelSVM':\n curr_model = ModelSVM(kernel=kernel, degree=degree,\n gamma=gamma, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelDecisionTree':\n curr_model = ModelDecisionTree(criterion=criterion, random_state=i+r*100)\n self.all_model.append(curr_model.model)",
"def sample_model(model, x, y, params_init, model_loss='multi_class_linear_output' ,num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, tau_out=1.,tau_list=None, store_on_GPU = True, desired_accept_rate=0.8, verbose = False):\n\n device = params_init.device\n params_shape_list = []\n params_flattened_list = []\n build_tau = False\n if tau_list is None:\n tau_list = []\n build_tau = True\n for weights in model.parameters():\n params_shape_list.append(weights.shape)\n params_flattened_list.append(weights.nelement())\n if build_tau:\n tau_list.append(torch.tensor(1.))\n\n log_prob_func = define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=normalizing_const, device = device)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU, verbose = verbose)",
"def randomize(self, rand_gen=None, *args, **kwargs):\n if rand_gen is None:\n rand_gen = np.random.normal\n # first take care of all parameters (from N(0,1))\n x = rand_gen(size=self._size_transformed(), *args, **kwargs)\n updates = self.update_model()\n self.update_model(False) # Switch off the updates\n self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)\n # now draw from prior where possible\n x = self.param_array.copy()\n #Py3 fix\n #[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]\n [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None]\n unfixlist = np.ones((self.size,),dtype=np.bool)\n unfixlist[self.constraints[__fixed__]] = False\n self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]\n self.update_model(updates)",
"def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients"
]
| [
"0.64208275",
"0.5918288",
"0.58846515",
"0.5881533",
"0.5875465",
"0.58405405",
"0.5833292",
"0.5814761",
"0.5787209",
"0.5702006",
"0.5701312",
"0.5696805",
"0.5688786",
"0.5681196",
"0.56801695",
"0.56607825",
"0.56421375",
"0.5633048",
"0.5573776",
"0.55682325",
"0.5542598",
"0.55370194",
"0.5531629",
"0.55298334",
"0.55201584",
"0.5511293",
"0.5480659",
"0.54695755",
"0.54544294",
"0.54515475"
]
| 0.68959796 | 0 |
Generates vector of reasonable random model coefficients a_i. shape is (height, width) tuple. Generated coefficients are in interval with c_0 in . | def generate_random_coeffs(shape, tn):
res = np.zeros((2, 9))
# reasonable space-dependent part
width = shape[1]
height = shape[0]
min_val = 1e-4
for i in range(2):
c = res[i]
# generate quadratic coefficients
c[2] = np.random.uniform(min_val, (0.05*width) / (width*width))
longer = max(width, height)
# c[4] is chosen so that ration between it and c[2] is in <1/3, 3>
# and so that the combined effect of c[2] and c[4] is at most 5% of
# the longest image side
lower_bound = min(c[2] / 3.0, (0.1 * longer - c[2] * width) / \
(height*height))
upper_bound = min(c[2] * 3.0, (0.1 * longer - c[2] * width) / \
(height*height))
c[4] = np.random.uniform(lower_bound, upper_bound)
# rotation
rotation = np.random.uniform(-math.pi, math.pi)
cs = math.cos(rotation)
sn = math.sin(rotation)
c[2] = c[2]*cs*cs + c[4]*sn*sn
c[4] = c[2]*sn*sn + c[4]*cs*cs
c[5] = 2*c[4]*sn*cs - 2*c[2]*sn*cs
# translation of the origin
originx = -1*np.random.randint(-int(0.1 * width), int(width + \
0.1 * width))
originy = -1*np.random.randint(-int(0.1 * height), int(height + \
0.1 * height))
c[0] = c[2] * originx * originx + c[4] * originy * originy + \
c[5] * originy * originx
c[1] = 2 * c[2] * originx + c[5] * originy
c[3] = 2 * c[4] * originy + c[5] * originx
#time-dependent part
xn = np.random.uniform(0.001, 2) # max scaling value
c[6] = (3 * xn) / tn
c[7] = (-6*xn) / (tn*tn)
c[8] = (4*xn) / (tn*tn*tn)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coef_random(an,bn,random_trun_start=0,random_start=1,random_end= 32, halfwidth0=1,pow=-1):\n\n an=np.asarray(an)\n bn=np.asarray(bn)\n half=halfcube(random_start,random_end,halfwidth0,pow)\n an_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n bn_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n\n an_random=np.append(np.zeros(random_trun_start-0),an_random)\n bn_random=np.append(np.zeros(random_trun_start-0),bn_random)\n\n if an.shape[0]>an_random.shape[0]:\n an_random.resize(an.shape)\n bn_random.resize(bn.shape)\n else:\n an.resize(an_random.shape)\n bn.resize(bn_random.shape)\n an_random=an+an_random\n bn_random=bn+bn_random\n\n return an_random,bn_random",
"def random_coefficients(self, n=3, max_range = 10):\n return np.random.uniform(-1*max_range, max_range, n)",
"def make_coefficients(r, a, num_terms):\n\n\tnum_vars = 4\n\tcoeffs = np.zeros((num_vars, num_terms))\n\tfor i in range(num_vars):\n\t\tcoeffs[i, i+1] = r[i]\n\tcoeffs[0, [5, 6, 7, 8]] = a[0]\n\tcoeffs[1, [6, 9, 10, 11]] = a[1]\n\tcoeffs[2, [7, 10, 12, 13]] = a[2]\n\tcoeffs[3, [8, 11, 13, 14]] = a[3]\n\t\n\treturn coeffs.ravel()",
"def __init_cr(self,i,conv,C):\n self.params['W'+i]=np.random.randn(conv[0],C,conv[1],conv[1])*self.weight_scale\n self.params['b'+i]=np.zeros(conv[0]) \n if self.use_batchnorm:\n self.params['gamma'+i]=np.ones(conv[0])\n self.params['beta'+i]=np.zeros(conv[0])",
"def WeightInitializer():\n return np.random.uniform(-1, 1)",
"def generate_examples_from_coefficients(a2, a3, a4, number_of_observations):\n x = np.linspace(-1, 1, num=number_of_observations)\n examples = x + (a2 * (x ** 2)) + (a3 * (x ** 3)) + (a4 * (x ** 4))\n examples = examples.reshape(examples.shape[0], number_of_observations * irrelevant_data_multiplier)\n return examples.astype(dtype=np.float32)",
"def initialize_model_randomly(self, shape=(2048, 2048), tn=50):\n self.coeffs = self.generate_random_coeffs(shape, tn)",
"def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))",
"def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients",
"def coefficients(k, xi, x):\n\n import pyweno.cnonuniform\n\n x = np.asarray(x, np.float64)\n xi = np.asarray(xi, np.float64)\n\n nc = len(x) - 1\n n = len(xi)\n c = np.zeros((nc, n, k, k), np.float64)\n beta = np.zeros((nc, k, k, k), np.float64)\n varpi = np.zeros((nc, n, k), np.float64)\n\n pyweno.cnonuniform.nonuniform_coeffs(k, xi, x, c, beta, varpi)\n\n return c, beta, varpi",
"def generate_model (d):\n return np.random.rand (d+1, 1)",
"def model(r, p0, n=1):\n# print \"oi\"\n Pt = zeros(n, float) # initialize the output vector\n P = p0\n for i in xrange(n):\n Pt[i] = r*P\n P = Pt[i]\n \n return Pt",
"def sample_utility(n, model, alpha, beta, bmax):\n A, b = matrix(0.0, (n,n)), matrix(ra.uniform(0,bmax,(n,1)))\n \n if model == 1: A = matrix(ra.uniform(0,beta,(n,n)))\n \n if model == 2:\n for i in range(n):\n for j in range(n/2):\n A[i, int(np.mod(i+j+1,n))] = beta**(j+1)\n A[i, int(np.mod(i-(j+1),n))] = beta**(j+1)\n \n if model == 3: A = 0.5*matrix(ra.binomial(1,beta,(n,n)))\n \n for i in range(n): A[i,i] = 1.0\n \n return Utility((alpha*A,b), 'sqrt')",
"def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]",
"def generation_model(self, z, c): # P(x|z, c)\n z_c = torch.cat((z, c), dim=1)\n x_hat = self.decoder(z_c)\n return x_hat",
"def generation_model(self, z, c): # P(x|z, c)\n z_c = torch.cat((z, c), dim=1)\n x_hat = self.decoder(z_c)\n return x_hat",
"def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha",
"def gen_small(s, n):\n\tdeg = n\n\tcoeff_vector = deg*[_sage_const_0 ]\n\tcoeff_vector[deg-_sage_const_1 ] = _sage_const_1 \n\tcoeff_vector[_sage_const_0 ] = _sage_const_1 \n\tindex_set = set({_sage_const_0 ,deg-_sage_const_1 })\n\tfor i in range(s-_sage_const_2 ):\n\t# add 1's\n\t\twhile True:\n\t\t\tindex1 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index1 in index_set:\n\t\t\t\tcoeff_vector[index1] = _sage_const_1 \n\t\t\t\tindex_set = index_set.union({index1})\n\t\t\t\tbreak\n\t# add -1's\n\tfor i in range(s):\n\t\twhile True:\n\t\t\tindex2 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index2 in index_set:\n\t\t\t\tcoeff_vector[index2] = -_sage_const_1 \n\t\t\t\tindex_set = index_set.union({index2})\n\t\t\t\tbreak\n\treturn coeff_vector",
"def _generate_poly_array(self, nchan, coeff=[]):\n if nchan < 0:\n raise ValueError, \"nchan should be >=0\"\n if len(coeff)==0:\n if nchan ==0: return []\n else: raise ValueError, \"No valid coefficient given.\"\n polyarr = numpy.zeros(nchan)\n for iorder in range(len(coeff)):\n polyarr += coeff[iorder]*numpy.array(xrange(nchan))**iorder\n return polyarr",
"def linear_params(key, o, u, ifactor=1.0):\n keys = random.split(key, 2)\n ifactor = ifactor / np.sqrt(u)\n return {'w' : random.normal(keys[0], (o, u)) * ifactor}",
"def continuous_model():\n return {\"x\": np.random.beta(2, 5, size=100), \"y\": np.random.beta(2, 5, size=100)}",
"def generate_polynomial_examples(number_of_examples, number_of_observations):\n a2, a3, a4 = generate_double_a2_a3_a4_coefficients(number_of_examples)\n examples = generate_examples_from_coefficients(a2, a3, a4, number_of_observations)\n examples += np.random.normal(0, 0.1, examples.shape)\n labels = np.squeeze(a3[:, 0], axis=-1)\n return examples, labels",
"def coefficients(self) :\n raise NotImplementedError",
"def coeff_b(nrows, ncols) -> np.ndarray:\n coeff_array = np.zeros((nrows, ncols), dtype=\"complex_\")\n for idx, _ in np.ndenumerate(coeff_array):\n coeff_array[idx] = 1j * (idx[0] - idx[1])\n return coeff_array",
"def generate_double_a2_a3_a4_coefficients(number_of_examples):\n a2_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])\n a2 = a2_distribution.rvs(size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(dtype=np.float32)\n a3_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])\n a3 = a3_distribution.rvs(size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(dtype=np.float32)\n a4_distribution = MixtureModel([uniform(-2, 1), uniform(1, 1)])\n a4 = a4_distribution.rvs(size=[number_of_examples, irrelevant_data_multiplier, 1]).astype(dtype=np.float32)\n return a2, a3, a4",
"def generate_data(params, sigma):\n rng = random.PRNGKey(0)\n k = len(params) // 2\n a_array = params[:k]\n b_array = params[k:]\n n = 20 * k\n xs = sample_our_uniform(n, 1, rng).reshape((n,))\n ys = onp.zeros(n)\n all_indices = set(onp.arange(n))\n for i in range(k):\n i_idxs = onp.random.choice(list(all_indices), 20, replace=False)\n all_indices = set(all_indices) - set(i_idxs)\n ys[i_idxs] = xs[i_idxs] * a_array[i] + b_array[i] + onp.random.normal(0, sigma, size=(20,))\n return xs, ys",
"def getModel(self):\n m,n = self.m,self.n\n A = np.zeros((m,m))\n B = np.zeros((m,n))\n C = np.zeros(m)\n Apattern,Bpattern,Cpattern = self.coeffPattern\n for i,e in enumerate(self.estimators):\n aofs = 0\n bofs = m\n cofs = m+n\n if Apattern==None:\n ai = e.x[aofs:m+aofs]\n else:\n bofs=aofs\n ai = []\n for j,pj in enumerate(Apattern[i]):\n if pj == None:\n ai.append(e.x[bofs])\n bofs += 1\n else:\n ai.append(pj)\n if Bpattern==None:\n bi = e.x[bofs:n+bofs]\n else:\n cofs=bofs\n bi = []\n for j,pj in enumerate(Bpattern[i]):\n if pj == None:\n bi.append(e.x[cofs])\n cofs += 1\n else:\n bi.append(pj)\n if Cpattern==None:\n ci = e.x[cofs]\n cofs+=1\n else:\n if Cpattern[i] == None:\n ci = e.x[cofs]\n cofs+=1\n else:\n ci = Cpattern[i]\n assert(cofs == e.n)\n assert len(ai)==m\n assert len(bi)==n\n A[i,:] = ai\n B[i,:] = bi\n C[i] = ci\n return (A,B,C)",
"def get_B100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A[np.tril_indices(m, -2)] = 0\n return A",
"def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c",
"def _like4(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, i):\r\n\t\r\n\tplx_mod, v, sigma_v = init_par[i], init_par[-4:-1], init_par[-1] \r\n\tp, q, r = normalTriad(alpha, delta)\r\n\tmualpha_mod = np.dot(np.transpose(p),v)*plx_mod/_A ### [mas/yr]\r\n\tmudelta_mod = np.dot(np.transpose(q),v)*plx_mod/_A ### [mas/yr]\r\n\t### Add the model vector for the radial velocities:\r\n\tvrad_mod = np.dot(np.transpose(r),v) ### [km/s]\r\n \t\r\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\r\n\tC = np.zeros((4,4),dtype=np.float64) ### This is a 4x4 matrix \r\n\t### Diagonal terms:\r\n\tC[0,0],C[1,1],C[2,2] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\r\n\tC[3,3] = sigma_vrad**2.\r\n\t\r\n\tr_plx_muRa, r_plx_muDec, r_muRa_muDec = ccoef[0], ccoef[1], ccoef[2] \r\n \r\n\t### Correlation terms:\r\n\tC[0,1], C[0,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_plx_muDec*sigma_plx*sigma_mudelta\r\n\tC[1,0], C[1,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\tC[2,0], C[2,1] = r_plx_muDec*sigma_plx*sigma_mudelta, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\r\n\tE = np.zeros((4,4),dtype=np.float64) ### 4x4 matrix \r\n\tE[1,1],E[2,2] = (sigma_v**2.)*(plx_mod/_A)**2., (sigma_v**2.)*(plx_mod/_A)**2. ### [mas/yr]\r\n\tE[3,3] = sigma_v**2.\t\t\t\t\t\t\t\t ### [km/s]\r\n\r\n\t\r\n\tD = np.add(E,C)\r\n\tdetD = det(D) \r\n\tinvD = inv(D)\r\n\t\t\r\n\ta_c = np.array([plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod, vrad_obs - vrad_mod])\r\n\tg_func = row_matrix_col_4d(a_c, a_c, invD) \r\n\t\r\n\t\r\n\treturn detD, g_func"
]
| [
"0.6511364",
"0.6201331",
"0.6073942",
"0.6068727",
"0.5951372",
"0.5904999",
"0.5790906",
"0.5731309",
"0.5686012",
"0.5672112",
"0.5663319",
"0.5612603",
"0.56111264",
"0.55973434",
"0.5548478",
"0.5548478",
"0.5527172",
"0.55149466",
"0.5494699",
"0.548396",
"0.54814345",
"0.54628193",
"0.546049",
"0.543064",
"0.54168373",
"0.53972507",
"0.5390374",
"0.5368718",
"0.5360898",
"0.53479135"
]
| 0.6319211 | 1 |
Defines the base unit of this sensor | def base_unit() -> ureg:
return ureg.meter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_base_unit(self, obj: Dimension) -> Unit:\n return obj.base_unit",
"def unit_of_measurement(self):\n return self.sensor_type[\"unit\"]",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def _unit_min(self):\n return self.time_base",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self.sensor][1]",
"def unit_of_measurement(self):\n\n if self._sensor_class == DEVICE_CLASS_TEMPERATURE:\n return TEMP_CELSIUS\n\n elif self._sensor_class == DEVICE_CLASS_HUMIDITY:\n return '%'",
"def unit_of_measurement(self):\n return self._sensor_type.unit",
"def unit_min(self):\n return self.time_base",
"def bus_kv_base(self) -> float:\n return self.dss_obj.BUSF(0, 0)",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self.variable][1]",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self.variable][1]",
"def unit_of_measurement(self) -> Any:\n return TEMP_CELSIUS",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self.type][1]",
"def unit_of_measurement(self):\n if self.api_unit in TEMPERATURE_UNITS:\n return self.hass.config.units.temperature_unit\n\n if self.api_unit in LENGTH_UNITS:\n return self.hass.config.units.length_unit\n\n if self.api_unit in PRESSURE_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return self.hass.config.units.pressure_unit\n return PRESSURE_HPA\n\n if self.api_unit in FUEL_CONSUMPTION_UNITS:\n if self.hass.config.units == IMPERIAL_SYSTEM:\n return FUEL_CONSUMPTION_MPG\n return FUEL_CONSUMPTION_L_PER_100KM\n\n return self.api_unit",
"def _base_unit(self, df, colname):\n\n # This is not generic, but today we have to deal only with nanoseconds, so this is good\n # enough.\n if self._refdefs.info[colname].get(\"unit\") != \"nanosecond\":\n return df[colname]\n\n base_colname = f\"{colname}_base\"\n if base_colname not in df:\n df[base_colname] = df[colname] / 1000000000\n return df[base_colname]",
"def to_base_unit(val: float, unit: str = \"\") -> float:\n unit_scalar = UNITS.get(unit, None)\n if not unit_scalar:\n raise UnknownUnitError(f\"Unit '{unit}' is not supported\")\n\n return val * unit_scalar",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self._sensor][0]",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self._type][2]",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self._type][2]",
"def unit_of_measurement(self):\n return self._sensor.unit",
"def unit_of_measurement(self):\n return SENSOR_TYPES[self._type][1]",
"def unit_of_measurement(self) -> str | None:\n set_req = self.gateway.const.SetReq\n if (\n AwesomeVersion(self.gateway.protocol_version) >= AwesomeVersion(\"1.5\")\n and set_req.V_UNIT_PREFIX in self._values\n ):\n custom_unit: str = self._values[set_req.V_UNIT_PREFIX]\n return custom_unit\n\n if set_req(self.value_type) == set_req.V_TEMP:\n if self.hass.config.units.is_metric:\n return TEMP_CELSIUS\n return TEMP_FAHRENHEIT\n\n unit = self._get_sensor_type()[0]\n return unit",
"def unit(self):\n return self.__unit",
"def set_unit(self,unit):\n self.unit = unit",
"def unit(cls, val):\n raise NotImplementedError",
"def get_unit(self):\n return self.unit",
"def unit_of_measurement(self):\n return SENSOR_TYPES.get(self._sensor_type)[1]",
"def unit_of_measurement(self):\n return SENSOR_TYPES.get(self._sensor_type)[1]",
"def unit_of_measurement(self) -> str:\n raw_units = self.raw_units\n\n if raw_units in [TEMP_CELSIUS, TEMP_FAHRENHEIT]:\n return self.hass.config.units.temperature_unit\n return raw_units"
]
| [
"0.7427461",
"0.66860276",
"0.6682726",
"0.6682726",
"0.65782034",
"0.6503735",
"0.64724845",
"0.6439907",
"0.64389485",
"0.6436502",
"0.6423663",
"0.6423663",
"0.6419997",
"0.6406627",
"0.63824344",
"0.63757634",
"0.6370861",
"0.63593864",
"0.63399184",
"0.63399184",
"0.63355315",
"0.6325889",
"0.6325311",
"0.6316108",
"0.63156193",
"0.63119215",
"0.6291137",
"0.6283384",
"0.6283384",
"0.62628084"
]
| 0.7912085 | 0 |
Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors. | def test_byteps_push_pull(self):
dtypes = ['float16', 'float32', 'float64']
dims = [1, 2, 3]
count = 0
ctx = self._current_context()
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
# MXNet uses gpu_id as part of the seed, so to get identical seeds
# we must set a context.
mx.random.seed(10 + 10 * bps.rank(), ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
input = tensor.asnumpy()
bps.byteps_declare_tensor("tensor_" + str(count))
bps.byteps_push_pull(tensor, name="tensor_"+str(count))
tensor.wait_to_read()
output = tensor.asnumpy()
assert np.allclose(input, output)
count += 1
print('test_byteps_push_pull passed') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_byteps_push_pull_inplace(self):\n size = bps.size()\n dtypes = ['float16', 'float32', 'float64']\n dims = [1, 2, 3]\n count = 0\n ctx = self._current_context()\n shapes = [(), (17), (17, 17), (17, 17, 17)]\n for dtype, dim in itertools.product(dtypes, dims):\n mx.random.seed(1234, ctx=ctx)\n tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],\n ctx=ctx)\n tensor = tensor.astype(dtype)\n multiplied = tensor.copy()\n bps.byteps_declare_tensor(\"tensor_\" + str(count))\n bps.byteps_push_pull(tensor, name=\"tensor_\" + str(count))\n max_difference = mx.nd.max(mx.nd.subtract(tensor, multiplied))\n count += 1\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in ['int32', 'int64']:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n if max_difference > threshold:\n print(\"self\", count, dtype, dim, max_difference, threshold)\n print(\"tensor\", bps.rank(), tensor)\n print(\"multiplied\", bps.rank(), multiplied)\n assert max_difference <= threshold, 'bps.byteps_push_pull produces \\\n incorrect results for self'\n\n print('test_byteps_push_pull_inplace passed')",
"def test_unstack3():\n x = np.arange(1, 49).reshape((4, 3, 2, 2)).astype(np.float64)\n axis = 1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack2():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = 2\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack1():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = -1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack():\n x = np.arange(1, 13).reshape((3, 2, 2)).astype(np.int32)\n axis = 0\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def push_pull(tensor, scope='', average=None, device_dense='', device_sparse='',\n compression=Compression.none, op=None, enable_async=False):\n op = handle_average_backwards_compatibility(op, average).value\n # Averaging happens in framework code, so translate that to Sum for the actual call\n true_op = Sum if op == Average else op\n\n with tf.device(device_dense):\n byteps_size = tf.cast(size(), dtype=tensor.dtype)\n tensor_compressed, ctx = compression.compress(tensor)\n summed_tensor_compressed = _push_pull(tensor_compressed, scope)\n summed_tensor = compression.decompress(summed_tensor_compressed, ctx)\n if not enable_async:\n _div = tf.div if hasattr(tf, 'div') else tf.math.divide\n new_tensor = (_div(summed_tensor, byteps_size)\n if op == Average else summed_tensor)\n else: # no need to average for async training\n new_tensor = summed_tensor\n return new_tensor",
"def test_remote_buffer() -> None:\n # Prepare the input and output data\n shape_1 = (1, 3, 5)\n shape_2 = (7, 11)\n d_type_1 = np.dtype(\"float32\")\n d_type_2 = np.dtype(\"float16\")\n\n data: Dict[str, np.ndarray] = {}\n\n # Store and load data for the first tensor\n data[\"store_in_1\"] = np.random.rand(*shape_1).astype(d_type_1)\n data[\"load_in_1\"] = np.zeros(shape_1).astype(d_type_1)\n data[\"load_in_1_inplace\"] = np.zeros(shape_1).astype(d_type_1)\n # Store and load data for the second tensor\n data[\"store_in_2\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_2\"] = np.zeros(shape_2).astype(d_type_2)\n # Store and load data for the third tensor\n data[\"store_in_3\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_3_inplace\"] = np.zeros(shape_2).astype(d_type_2)\n\n ir, d2h_streams = build_model(data)\n\n # Get the tensor_ids\n labels = (\n \"load_in_1\",\n \"load_in_1_inplace\",\n \"load_out_1\",\n \"load_out_1_inplace\",\n \"load_in_2\",\n \"load_in_3_inplace\",\n \"load_out_2\",\n \"load_out_3_inplace\",\n )\n tensor_d2h = {label: d2h_streams[label] for label in labels}\n\n session = popxl.Session(ir, \"ipu_model\")\n with session:\n outputs = session.run()\n\n # Assert that the tensors are correct\n remote_load_scenarios = (\n \"1\",\n \"1_inplace\",\n \"2\",\n \"3_inplace\",\n )\n for scenario in remote_load_scenarios:\n print(f\"Now asserting remote load scenario {scenario}\")\n # Get data to assert\n store_in_data = data[f\"store_in_{scenario.replace('_inplace', '')}\"]\n load_in_data_before_op_call = data[f\"load_in_{scenario}\"]\n load_in_data_after_op_call = outputs[tensor_d2h[f\"load_in_{scenario}\"]]\n load_out_data = outputs[tensor_d2h[f\"load_out_{scenario}\"]]\n shape = shape_1 if \"1\" in scenario else shape_2\n d_type = d_type_1 if \"1\" in scenario else d_type_2\n inplace = True if \"inplace\" in scenario else False\n # Assert shape and type\n assert load_in_data_after_op_call.shape == shape\n assert load_in_data_after_op_call.dtype == d_type\n assert load_out_data.shape == shape\n assert load_out_data.dtype == d_type\n\n # Assert that the data has been loaded\n assert np.allclose(store_in_data, load_out_data)\n if inplace:\n # Assert that the load in data has been overwritten\n assert np.allclose(load_in_data_after_op_call, store_in_data)\n else:\n # Assert that the load in data has not been overwritten\n assert np.allclose(load_in_data_after_op_call, load_in_data_before_op_call)",
"def test_radd(self):\n tensor = Tensor([2, 4, 6, 8])\n result = 1 + tensor\n result_np = np.array(1) + tensor\n result_arr = [1, 1, 1, 1] + tensor\n\n assert result.data.tolist() == [3, 5, 7, 9]\n assert result_np.data.tolist() == [3, 5, 7, 9]\n assert result_arr.data.tolist() == [3, 5, 7, 9]",
"def test_add_op_jit():\n x = np.array([1, 2, 3, 4, 5, 6, 7])\n paddle_x = paddle.to_tensor(x).astype(\"float32\")\n paddle_x.stop_gradient = False\n print(paddle_x)\n a = 1\n b = 5\n out = custom_ops.slice_test(paddle_x, a, b)\n print(\"out: \", out)\n print(\"numpy out: \", x[a:b])\n assert np.allclose(out.numpy(), x[a:b])\n print(\"run success\")",
"def test13(self):\n a = bcolz.ones((self.N, 1))\n b = bcolz.zeros(a.shape)\n b = bcolz.eval('a + b')\n self.assertEqual(b.sum(), self.N)",
"def test_op_add_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_op_add_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_network_unflow_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n forward_flow, backward_flow, forward_flows, backward_flows = self.pwc_net.get_bidirectional(\n input_image_a, input_image_b)\n self.assertEqual(6, len(forward_flows))\n self.assertEqual(6, len(backward_flows))\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Test output shapes.\n outputs = self.sess.run([forward_flow, backward_flow] + forward_flows + backward_flows,\n feed_dict={input_image_a: image_a, input_image_b: image_b})\n self.assertTupleEqual((batch_size, height, width, 2), outputs[0].shape)\n self.assertTupleEqual((batch_size, height, width, 2), outputs[1].shape)\n self.assertTupleEqual((batch_size, height / 64, width / 64, 2), outputs[2].shape)\n self.assertTupleEqual((batch_size, height / 32, width / 32, 2), outputs[3].shape)\n self.assertTupleEqual((batch_size, height / 16, width / 16, 2), outputs[4].shape)\n self.assertTupleEqual((batch_size, height / 8, width / 8, 2), outputs[5].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[6].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[7].shape)\n self.assertTupleEqual((batch_size, height / 64, width / 64, 2), outputs[8].shape)\n self.assertTupleEqual((batch_size, height / 32, width / 32, 2), outputs[9].shape)\n self.assertTupleEqual((batch_size, height / 16, width / 16, 2), outputs[10].shape)\n self.assertTupleEqual((batch_size, height / 8, width / 8, 2), outputs[11].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[12].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[13].shape)\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(forward_flow, trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n grad_op = tf.gradients(backward_flow, trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the loss.\n training_loss, _, _, _, _ = self.pwc_net.get_unflow_training_loss(input_image_a, input_image_b,\n forward_flows, backward_flows)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b})\n self.assertNotAlmostEqual(loss_value, 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))",
"def sliding_window_decoding( model, X, input_shape, overlapping = 32 ) :\n patch_bboxes = get_patch_bboxes( X.shape, input_shape, overlapping )\n n_samples, n_chs, height, width = X.shape\n Z = np.zeros( X.shape, dtype = np.float32 )\n C = np.zeros( X.shape, dtype = np.float32 )\n pad_before, pad_after = min( input_shape ) // 4, min( input_shape ) // 4\n for top, bot, left, right in patch_bboxes :\n x = X[ :, :, top:bot, left:right ]\n z = model.predict( x )\n if ( top == 0 ) and ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot,left:right] += z\n C[:,:,top:bot,left:right] += 1. \n elif ( left == 0 ) :\n Z[:,:,top:bot,left:right-pad_after] += z[:,:,:,:-pad_after]\n C[:,:,top:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top:bot,left+pad_before:right] += z[:,:,:,pad_before:]\n C[:,:,top:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot,left+pad_before:right-pad_after] += z[:,:,:,pad_before:-pad_after]\n C[:,:,top:bot,left+pad_before:right-pad_after] += 1. \n elif ( top == 0 ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot-pad_after,left:right] += z[:,:,:-pad_after,:]\n C[:,:,top:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top:bot-pad_after,left:right-pad_after] += z[:,:,:-pad_after,:-pad_after]\n C[:,:,top:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top:bot-pad_after,left+pad_before:right] += z[:,:,:-pad_after,pad_before:]\n C[:,:,top:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,:-pad_after,pad_before:-pad_after]\n C[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += 1.\n elif ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top+pad_before:bot,left:right] += z[:,:,pad_before:,:]\n C[:,:,top+pad_before:bot,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot,left:right-pad_after] += z[:,:,pad_before:,:-pad_after]\n C[:,:,top+pad_before:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top+pad_before:bot,left+pad_before:right] += z[:,:,pad_before:,pad_before:]\n C[:,:,top+pad_before:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += z[:,:,pad_before:,pad_before:-pad_after]\n C[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += 1.\n else :\n if ( left == 0 ) and ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right] += z[:,:,pad_before:-pad_after,:]\n C[:,:,top+pad_before:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += z[:,:,pad_before:-pad_after,:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += z[:,:,pad_before:-pad_after,pad_before:]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,pad_before:-pad_after,pad_before:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += 1.\n return Z / C",
"def test_op_isub_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)",
"def test_np_memory_layout_add_input_tensor_pystepiocallback():\n\n def _test(transposedInput, transposedOutput):\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedInput:\n input1Value = np.transpose(input1Value, [1, 0])\n output1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedOutput:\n output1Value = np.transpose(output1Value, [1, 0])\n\n with pytest.raises(\n (Exception, RuntimeError, popart.popart_exception)\n ) as e_info:\n\n # pylint: disable=unused-argument\n def input_callback(id, prefetch):\n return input1Value\n\n def input_complete_callback(_): # id is an unused parameter\n pass\n\n def output_callback(_): # id is an unused parameter\n return output1Value\n\n def output_complete_callback(_): # id is an unused parameter\n pass\n\n stepio = popart.PyStepIOCallback(\n input_callback,\n input_complete_callback,\n output_callback,\n output_complete_callback,\n )\n\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]\n\n _test(transposedInput=True, transposedOutput=False)\n _test(transposedInput=False, transposedOutput=True)",
"def test_bit_add_multiple_bytes(self):\n ops = [bitwise_operations.bit_add(self.test_bin_zeroes, 8, 16, 65535, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [255] * 2 + [0] * 2)\n assert bins[self.test_bin_zeroes] == expected_result",
"def test_op_isub_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)",
"def test_np_memory_layout_add_input_tensor_pystepio():\n\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n anchors = session.initAnchorArrays()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n input1Value = np.transpose(input1Value, [1, 0])\n\n with pytest.raises((RuntimeError, popart.popart_exception)) as e_info:\n stepio = popart.PyStepIO({input1: input1Value}, anchors)\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]",
"def test_op_sub_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a - offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def __call__(self, sample):\n temp = sample['stack']/255.0\n totensor = transforms.ToTensor()\n sample['stack'] = totensor(temp.transpose((1, 2, 0)))\n return sample",
"def test_add_get_tensor_2D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 2D tensors of all data types\n data_2D = mock_data.create_data((10, 10))\n add_get_arrays(dataset, data_2D)",
"def PrePush(self, image):\n pass",
"def test_unbroadcast_addbroadcast(self):\r\n\r\n x = matrix()\r\n assert unbroadcast(x, 0) is x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is x\r\n assert unbroadcast(x, 0, 1) is x\r\n\r\n assert addbroadcast(x, 0) is not x\r\n assert addbroadcast(x, 1) is not x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 0), 0) is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is not x\r\n x = row()\r\n assert unbroadcast(x, 0) is not x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is not x\r\n assert unbroadcast(x, 0, 1) is not x\r\n\r\n assert addbroadcast(x, 0) is x\r\n assert addbroadcast(x, 1).owner.inputs[0] is x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n assert addbroadcast(x, 0, 1).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 1), 1) is x\r\n assert addbroadcast(unbroadcast(x, 1), 1) is not x\r\n\r\n # The first broadcast is remove the broadcast, so the second\r\n # should not make one\r\n assert unbroadcast(unbroadcast(x, 0), 0).owner.inputs[0] is x\r\n\r\n # Test that consecutive Rebroadcast op are fused\r\n x = TensorType(dtype='float64', broadcastable=(True, True))()\r\n assert unbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is x",
"def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def test_op_sub_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a - offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_op_iadd_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_push_pull(queue):\n assert queue.is_connected\n\n # Test without a topic\n queue.put('hello')\n assert queue.get() == ('default', 'hello')\n\n # Test with a specified topic\n queue.put('hello', topic='priority')\n assert queue.get() == ('priority', 'hello')\n\n # Test with an unspecified topic\n with pytest.raises(AssertionError):\n queue.put('hello', 'not_a_topic')"
]
| [
"0.81022996",
"0.6435133",
"0.63489884",
"0.6338958",
"0.6273451",
"0.59371763",
"0.5863565",
"0.58035696",
"0.558139",
"0.54165804",
"0.5360265",
"0.5321667",
"0.52746713",
"0.52081335",
"0.5206156",
"0.5201949",
"0.5191096",
"0.51699805",
"0.5164772",
"0.5162587",
"0.51462203",
"0.51119053",
"0.51037437",
"0.508876",
"0.5087604",
"0.50781536",
"0.50768113",
"0.50505435",
"0.5036477",
"0.5032054"
]
| 0.84918064 | 0 |
Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors. | def test_byteps_push_pull_inplace(self):
size = bps.size()
dtypes = ['float16', 'float32', 'float64']
dims = [1, 2, 3]
count = 0
ctx = self._current_context()
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
multiplied = tensor.copy()
bps.byteps_declare_tensor("tensor_" + str(count))
bps.byteps_push_pull(tensor, name="tensor_" + str(count))
max_difference = mx.nd.max(mx.nd.subtract(tensor, multiplied))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
if max_difference > threshold:
print("self", count, dtype, dim, max_difference, threshold)
print("tensor", bps.rank(), tensor)
print("multiplied", bps.rank(), multiplied)
assert max_difference <= threshold, 'bps.byteps_push_pull produces \
incorrect results for self'
print('test_byteps_push_pull_inplace passed') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_byteps_push_pull(self):\n dtypes = ['float16', 'float32', 'float64']\n dims = [1, 2, 3]\n count = 0\n ctx = self._current_context()\n shapes = [(), (17), (17, 17), (17, 17, 17)]\n for dtype, dim in itertools.product(dtypes, dims):\n # MXNet uses gpu_id as part of the seed, so to get identical seeds\n # we must set a context.\n mx.random.seed(10 + 10 * bps.rank(), ctx=ctx)\n tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],\n ctx=ctx)\n tensor = tensor.astype(dtype)\n input = tensor.asnumpy()\n\n bps.byteps_declare_tensor(\"tensor_\" + str(count))\n bps.byteps_push_pull(tensor, name=\"tensor_\"+str(count))\n tensor.wait_to_read()\n output = tensor.asnumpy()\n assert np.allclose(input, output)\n count += 1\n\n print('test_byteps_push_pull passed')",
"def test_unstack3():\n x = np.arange(1, 49).reshape((4, 3, 2, 2)).astype(np.float64)\n axis = 1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack2():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = 2\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack1():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = -1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack():\n x = np.arange(1, 13).reshape((3, 2, 2)).astype(np.int32)\n axis = 0\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def push_pull(tensor, scope='', average=None, device_dense='', device_sparse='',\n compression=Compression.none, op=None, enable_async=False):\n op = handle_average_backwards_compatibility(op, average).value\n # Averaging happens in framework code, so translate that to Sum for the actual call\n true_op = Sum if op == Average else op\n\n with tf.device(device_dense):\n byteps_size = tf.cast(size(), dtype=tensor.dtype)\n tensor_compressed, ctx = compression.compress(tensor)\n summed_tensor_compressed = _push_pull(tensor_compressed, scope)\n summed_tensor = compression.decompress(summed_tensor_compressed, ctx)\n if not enable_async:\n _div = tf.div if hasattr(tf, 'div') else tf.math.divide\n new_tensor = (_div(summed_tensor, byteps_size)\n if op == Average else summed_tensor)\n else: # no need to average for async training\n new_tensor = summed_tensor\n return new_tensor",
"def test_remote_buffer() -> None:\n # Prepare the input and output data\n shape_1 = (1, 3, 5)\n shape_2 = (7, 11)\n d_type_1 = np.dtype(\"float32\")\n d_type_2 = np.dtype(\"float16\")\n\n data: Dict[str, np.ndarray] = {}\n\n # Store and load data for the first tensor\n data[\"store_in_1\"] = np.random.rand(*shape_1).astype(d_type_1)\n data[\"load_in_1\"] = np.zeros(shape_1).astype(d_type_1)\n data[\"load_in_1_inplace\"] = np.zeros(shape_1).astype(d_type_1)\n # Store and load data for the second tensor\n data[\"store_in_2\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_2\"] = np.zeros(shape_2).astype(d_type_2)\n # Store and load data for the third tensor\n data[\"store_in_3\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_3_inplace\"] = np.zeros(shape_2).astype(d_type_2)\n\n ir, d2h_streams = build_model(data)\n\n # Get the tensor_ids\n labels = (\n \"load_in_1\",\n \"load_in_1_inplace\",\n \"load_out_1\",\n \"load_out_1_inplace\",\n \"load_in_2\",\n \"load_in_3_inplace\",\n \"load_out_2\",\n \"load_out_3_inplace\",\n )\n tensor_d2h = {label: d2h_streams[label] for label in labels}\n\n session = popxl.Session(ir, \"ipu_model\")\n with session:\n outputs = session.run()\n\n # Assert that the tensors are correct\n remote_load_scenarios = (\n \"1\",\n \"1_inplace\",\n \"2\",\n \"3_inplace\",\n )\n for scenario in remote_load_scenarios:\n print(f\"Now asserting remote load scenario {scenario}\")\n # Get data to assert\n store_in_data = data[f\"store_in_{scenario.replace('_inplace', '')}\"]\n load_in_data_before_op_call = data[f\"load_in_{scenario}\"]\n load_in_data_after_op_call = outputs[tensor_d2h[f\"load_in_{scenario}\"]]\n load_out_data = outputs[tensor_d2h[f\"load_out_{scenario}\"]]\n shape = shape_1 if \"1\" in scenario else shape_2\n d_type = d_type_1 if \"1\" in scenario else d_type_2\n inplace = True if \"inplace\" in scenario else False\n # Assert shape and type\n assert load_in_data_after_op_call.shape == shape\n assert load_in_data_after_op_call.dtype == d_type\n assert load_out_data.shape == shape\n assert load_out_data.dtype == d_type\n\n # Assert that the data has been loaded\n assert np.allclose(store_in_data, load_out_data)\n if inplace:\n # Assert that the load in data has been overwritten\n assert np.allclose(load_in_data_after_op_call, store_in_data)\n else:\n # Assert that the load in data has not been overwritten\n assert np.allclose(load_in_data_after_op_call, load_in_data_before_op_call)",
"def test_radd(self):\n tensor = Tensor([2, 4, 6, 8])\n result = 1 + tensor\n result_np = np.array(1) + tensor\n result_arr = [1, 1, 1, 1] + tensor\n\n assert result.data.tolist() == [3, 5, 7, 9]\n assert result_np.data.tolist() == [3, 5, 7, 9]\n assert result_arr.data.tolist() == [3, 5, 7, 9]",
"def test_add_op_jit():\n x = np.array([1, 2, 3, 4, 5, 6, 7])\n paddle_x = paddle.to_tensor(x).astype(\"float32\")\n paddle_x.stop_gradient = False\n print(paddle_x)\n a = 1\n b = 5\n out = custom_ops.slice_test(paddle_x, a, b)\n print(\"out: \", out)\n print(\"numpy out: \", x[a:b])\n assert np.allclose(out.numpy(), x[a:b])\n print(\"run success\")",
"def test13(self):\n a = bcolz.ones((self.N, 1))\n b = bcolz.zeros(a.shape)\n b = bcolz.eval('a + b')\n self.assertEqual(b.sum(), self.N)",
"def test_op_add_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_op_add_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_network_unflow_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n forward_flow, backward_flow, forward_flows, backward_flows = self.pwc_net.get_bidirectional(\n input_image_a, input_image_b)\n self.assertEqual(6, len(forward_flows))\n self.assertEqual(6, len(backward_flows))\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Test output shapes.\n outputs = self.sess.run([forward_flow, backward_flow] + forward_flows + backward_flows,\n feed_dict={input_image_a: image_a, input_image_b: image_b})\n self.assertTupleEqual((batch_size, height, width, 2), outputs[0].shape)\n self.assertTupleEqual((batch_size, height, width, 2), outputs[1].shape)\n self.assertTupleEqual((batch_size, height / 64, width / 64, 2), outputs[2].shape)\n self.assertTupleEqual((batch_size, height / 32, width / 32, 2), outputs[3].shape)\n self.assertTupleEqual((batch_size, height / 16, width / 16, 2), outputs[4].shape)\n self.assertTupleEqual((batch_size, height / 8, width / 8, 2), outputs[5].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[6].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[7].shape)\n self.assertTupleEqual((batch_size, height / 64, width / 64, 2), outputs[8].shape)\n self.assertTupleEqual((batch_size, height / 32, width / 32, 2), outputs[9].shape)\n self.assertTupleEqual((batch_size, height / 16, width / 16, 2), outputs[10].shape)\n self.assertTupleEqual((batch_size, height / 8, width / 8, 2), outputs[11].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[12].shape)\n self.assertTupleEqual((batch_size, height / 4, width / 4, 2), outputs[13].shape)\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(forward_flow, trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n grad_op = tf.gradients(backward_flow, trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the loss.\n training_loss, _, _, _, _ = self.pwc_net.get_unflow_training_loss(input_image_a, input_image_b,\n forward_flows, backward_flows)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b})\n self.assertNotAlmostEqual(loss_value, 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))",
"def sliding_window_decoding( model, X, input_shape, overlapping = 32 ) :\n patch_bboxes = get_patch_bboxes( X.shape, input_shape, overlapping )\n n_samples, n_chs, height, width = X.shape\n Z = np.zeros( X.shape, dtype = np.float32 )\n C = np.zeros( X.shape, dtype = np.float32 )\n pad_before, pad_after = min( input_shape ) // 4, min( input_shape ) // 4\n for top, bot, left, right in patch_bboxes :\n x = X[ :, :, top:bot, left:right ]\n z = model.predict( x )\n if ( top == 0 ) and ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot,left:right] += z\n C[:,:,top:bot,left:right] += 1. \n elif ( left == 0 ) :\n Z[:,:,top:bot,left:right-pad_after] += z[:,:,:,:-pad_after]\n C[:,:,top:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top:bot,left+pad_before:right] += z[:,:,:,pad_before:]\n C[:,:,top:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot,left+pad_before:right-pad_after] += z[:,:,:,pad_before:-pad_after]\n C[:,:,top:bot,left+pad_before:right-pad_after] += 1. \n elif ( top == 0 ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top:bot-pad_after,left:right] += z[:,:,:-pad_after,:]\n C[:,:,top:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top:bot-pad_after,left:right-pad_after] += z[:,:,:-pad_after,:-pad_after]\n C[:,:,top:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top:bot-pad_after,left+pad_before:right] += z[:,:,:-pad_after,pad_before:]\n C[:,:,top:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,:-pad_after,pad_before:-pad_after]\n C[:,:,top:bot-pad_after,left+pad_before:right-pad_after] += 1.\n elif ( bot == height ) :\n if ( left == 0 ) and ( right == width ):\n Z[:,:,top+pad_before:bot,left:right] += z[:,:,pad_before:,:]\n C[:,:,top+pad_before:bot,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot,left:right-pad_after] += z[:,:,pad_before:,:-pad_after]\n C[:,:,top+pad_before:bot,left:right-pad_after] += 1.\n elif ( right == width ) :\n Z[:,:,top+pad_before:bot,left+pad_before:right] += z[:,:,pad_before:,pad_before:]\n C[:,:,top+pad_before:bot,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += z[:,:,pad_before:,pad_before:-pad_after]\n C[:,:,top+pad_before:bot,left+pad_before:right-pad_after] += 1.\n else :\n if ( left == 0 ) and ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right] += z[:,:,pad_before:-pad_after,:]\n C[:,:,top+pad_before:bot-pad_after,left:right] += 1.\n elif ( left == 0 ) :\n Z[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += z[:,:,pad_before:-pad_after,:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left:right-pad_after] += 1. \n elif ( right == width ) :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += z[:,:,pad_before:-pad_after,pad_before:]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right] += 1.\n else :\n Z[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += z[:,:,pad_before:-pad_after,pad_before:-pad_after]\n C[:,:,top+pad_before:bot-pad_after,left+pad_before:right-pad_after] += 1.\n return Z / C",
"def test_op_isub_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)",
"def test_np_memory_layout_add_input_tensor_pystepiocallback():\n\n def _test(transposedInput, transposedOutput):\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedInput:\n input1Value = np.transpose(input1Value, [1, 0])\n output1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedOutput:\n output1Value = np.transpose(output1Value, [1, 0])\n\n with pytest.raises(\n (Exception, RuntimeError, popart.popart_exception)\n ) as e_info:\n\n # pylint: disable=unused-argument\n def input_callback(id, prefetch):\n return input1Value\n\n def input_complete_callback(_): # id is an unused parameter\n pass\n\n def output_callback(_): # id is an unused parameter\n return output1Value\n\n def output_complete_callback(_): # id is an unused parameter\n pass\n\n stepio = popart.PyStepIOCallback(\n input_callback,\n input_complete_callback,\n output_callback,\n output_complete_callback,\n )\n\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]\n\n _test(transposedInput=True, transposedOutput=False)\n _test(transposedInput=False, transposedOutput=True)",
"def test_bit_add_multiple_bytes(self):\n ops = [bitwise_operations.bit_add(self.test_bin_zeroes, 8, 16, 65535, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [255] * 2 + [0] * 2)\n assert bins[self.test_bin_zeroes] == expected_result",
"def test_op_isub_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)",
"def test_np_memory_layout_add_input_tensor_pystepio():\n\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n anchors = session.initAnchorArrays()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n input1Value = np.transpose(input1Value, [1, 0])\n\n with pytest.raises((RuntimeError, popart.popart_exception)) as e_info:\n stepio = popart.PyStepIO({input1: input1Value}, anchors)\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]",
"def test_op_sub_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a - offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def __call__(self, sample):\n temp = sample['stack']/255.0\n totensor = transforms.ToTensor()\n sample['stack'] = totensor(temp.transpose((1, 2, 0)))\n return sample",
"def test_add_get_tensor_2D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 2D tensors of all data types\n data_2D = mock_data.create_data((10, 10))\n add_get_arrays(dataset, data_2D)",
"def PrePush(self, image):\n pass",
"def test_unbroadcast_addbroadcast(self):\r\n\r\n x = matrix()\r\n assert unbroadcast(x, 0) is x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is x\r\n assert unbroadcast(x, 0, 1) is x\r\n\r\n assert addbroadcast(x, 0) is not x\r\n assert addbroadcast(x, 1) is not x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 0), 0) is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is not x\r\n x = row()\r\n assert unbroadcast(x, 0) is not x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is not x\r\n assert unbroadcast(x, 0, 1) is not x\r\n\r\n assert addbroadcast(x, 0) is x\r\n assert addbroadcast(x, 1).owner.inputs[0] is x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n assert addbroadcast(x, 0, 1).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 1), 1) is x\r\n assert addbroadcast(unbroadcast(x, 1), 1) is not x\r\n\r\n # The first broadcast is remove the broadcast, so the second\r\n # should not make one\r\n assert unbroadcast(unbroadcast(x, 0), 0).owner.inputs[0] is x\r\n\r\n # Test that consecutive Rebroadcast op are fused\r\n x = TensorType(dtype='float64', broadcastable=(True, True))()\r\n assert unbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is x",
"def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def test_op_sub_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a - offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_op_iadd_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_push_pull(queue):\n assert queue.is_connected\n\n # Test without a topic\n queue.put('hello')\n assert queue.get() == ('default', 'hello')\n\n # Test with a specified topic\n queue.put('hello', topic='priority')\n assert queue.get() == ('priority', 'hello')\n\n # Test with an unspecified topic\n with pytest.raises(AssertionError):\n queue.put('hello', 'not_a_topic')"
]
| [
"0.848981",
"0.6435212",
"0.63493204",
"0.63393897",
"0.627382",
"0.59382117",
"0.5861871",
"0.5806715",
"0.55826056",
"0.5418833",
"0.53611726",
"0.5323135",
"0.52746826",
"0.5208869",
"0.52074116",
"0.52009845",
"0.5188586",
"0.5170058",
"0.51654714",
"0.5162977",
"0.51436746",
"0.51134205",
"0.5105027",
"0.50885403",
"0.50852334",
"0.507998",
"0.50754684",
"0.5051485",
"0.50383884",
"0.50295734"
]
| 0.80997765 | 1 |
scan the pzt_motor (e.g., pzt_dcm_th2), detectors can be any signal or motor (e.g., Andor, dcm.th2) | def pzt_scan(pzt_motor, start, stop, steps, detectors=[Vout2], sleep_time=1, md=None):
if Andor in detectors:
exposure_time = yield from bps.rd(Andor.cam.acquire_time)
yield from mv(Andor.cam.acquire, 0)
yield from mv(Andor.cam.image_mode, 0)
yield from mv(Andor.cam.num_images, 1)
Andor.cam.acquire_period.put(exposure_time)
motor = pzt_motor.setpos
motor_readback = pzt_motor.pos
motor_ini_pos = motor_readback.get()
detector_set_read = [motor, motor_readback]
detector_all = detector_set_read + detectors
_md = {
"detectors": [det.name for det in detector_all],
"detector_set_read": [det.name for det in detector_set_read],
"motors": [motor.name],
"XEng": XEng.position,
"plan_args": {
"pzt_motor": pzt_motor.name,
"start": start,
"stop": stop,
"steps": steps,
"detectors": "detectors",
"sleep_time": sleep_time,
},
"plan_name": "pzt_scan",
"hints": {},
"motor_pos": wh_pos(print_on_screen=0),
"operator": "FXI",
}
_md.update(md or {})
try:
dimensions = [(pzt_motor.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
@stage_decorator(list(detector_all))
@run_decorator(md=_md)
def pzt_inner_scan():
my_var = np.linspace(start, stop, steps)
print(my_var)
for x in my_var:
print(x)
yield from mv(motor, x)
yield from bps.sleep(sleep_time)
yield from trigger_and_read(list(detector_all))
yield from mv(motor, motor_ini_pos)
uid = yield from pzt_inner_scan()
h = db[-1]
scan_id = h.start["scan_id"]
det = [det.name for det in detectors]
det_name = ""
for i in range(len(det)):
det_name += det[i]
det_name += ", "
det_name = "[" + det_name[:-2] + "]"
txt1 = get_scan_parameter()
txt2 = f"detectors = {det_name}"
txt = txt1 + "\n" + txt2
insert_text(txt)
print(txt)
return uid
# def pzt_scan(moving_pzt, start, stop, steps, read_back_dev, record_dev, delay_time=5, print_flag=1, overlay_flag=0):
"""
Input:
-------
moving_pzt: pv name of the pzt device, e.g. 'XF:18IDA-OP{Mir:DCM-Ax:Th2Fine}SET_POSITION.A'
read_back_dev: device (encoder) that changes with moving_pzt, e.g., dcm.th2
record_dev: signal you want to record, e.g. Vout2
delay_time: waiting time for device to response
""" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pzt_scan_multiple(\n moving_pzt,\n start,\n stop,\n steps,\n detectors=[Vout2],\n repeat_num=2,\n sleep_time=1,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt = f\"pzt_scan_multiple(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\\n Consisting of:\\n\"\n insert_text(txt)\n\n current_eng = XEng.position\n df = pd.DataFrame(data=[])\n\n for num in range(repeat_num):\n yield from pzt_scan(\n moving_pzt, start, stop, steps, detectors=detectors, sleep_time=sleep_time\n )\n yield from abs_set(XEng, current_eng, wait=True)\n print(\"\\nscan finished, ploting and saving data...\")\n fig = plt.figure()\n for num in reversed(range(repeat_num)):\n h = db[-1 - num]\n scan_id = h.start[\"scan_id\"]\n detector_set_read = h.start[\"detector_set_read\"]\n col_x_prefix = detector_set_read[1]\n col_x = col_x_prefix + \" #\" + \"{}\".format(scan_id)\n\n motor_readout = np.array(list(h.data(col_x_prefix)))\n df[col_x] = pd.Series(motor_readout)\n\n detector_signal = h.start[\"detectors\"]\n\n for i in range(len(detector_signal)):\n det = detector_signal[i]\n\n if (det == \"Andor\") or (det == \"detA1\"):\n det = det + \"_stats1_total\"\n det_readout = np.array(list(h.data(det)))\n col_y_prefix = det\n col_y = col_y_prefix + \" #\" + \"{}\".format(scan_id)\n df[col_y] = pd.Series(det_readout)\n plt.subplot(len(detector_signal), 1, i + 1)\n plt.plot(df[col_x], df[col_y])\n plt.ylabel(det)\n\n plt.subplot(len(detector_signal), 1, len(detector_signal))\n plt.xlabel(col_x_prefix)\n plt.subplot(len(detector_signal), 1, 1)\n plt.title(\"X-ray Energy: {:2.1f}keV\".format(current_eng))\n\n now = datetime.now()\n year = np.str(now.year)\n mon = \"{:02d}\".format(now.month)\n day = \"{:02d}\".format(now.day)\n hour = \"{:02d}\".format(now.hour)\n minu = \"{:02d}\".format(now.minute)\n current_date = year + \"-\" + mon + \"-\" + day\n fn = (\n save_file_dir\n + \"pzt_scan_\"\n + \"{:2.1f}keV_\".format(current_eng)\n + current_date\n + \"_\"\n + hour\n + \"-\"\n + minu\n )\n fn_fig = fn + \".tiff\"\n fn_file = fn + \".csv\"\n df.to_csv(fn_file, sep=\"\\t\")\n fig.savefig(fn_fig)\n print(\"save to: \" + fn_file)\n txt_finish = '## \"pzt_scan_multiple()\" finished'\n insert_text(txt_finish)",
"def __find_motors(self):\n # Start with Motor Info declaration\n # rospy.loginfo('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self.max_motor_id))\n print('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self.max_motor_id))\n self.motors = []\n self.motor_static_info = {}\n self.motor_info = {}\n\n # Getting Motor IDs\n for motor_id in range(self.min_motor_id, self.max_motor_id + 1):\n for trial in range(self.num_ping_retries):\n try:\n result = self.packet_handler.ping(self.port_handler, motor_id)\n\n except Exception as ex:\n rospy.logerr('Exception thrown while pinging motor %d - %s' % (motor_id, ex))\n continue\n\n if not result[1]: # If no error was returned, add motor ID\n self.motors.append(motor_id)\n break\n\n # Failure to find any motors :-(\n if not self.motors:\n rospy.logfatal('%s: No motors found.' % self.port_namespace)\n sys.exit(1)\n \n counts = defaultdict(int)\n\n to_delete_if_error = []\n # rospy.loginfo(\"Getting motor numbers.......\")\n print(\"Getting motor numbers.......\")\n\n # Find the model numbers asscoiated with the Motor IDs\n for motor_id in self.motors:\n for trial in range(self.num_ping_retries):\n model_number = self.packet_handler.read2ByteTxRx(self.port_handler, motor_id, 0)\n # rospy.logwarn(\"MOTOR_ID: \" + str(motor_id))\n # rospy.logwarn(\"MODEL_NUMBER: \" + str(model_number[0]))\n # rospy.logwarn(\"ERROR: \" + str(model_number[1]))\n print(\"MOTOR_ID: \" + str(motor_id))\n print(\"MODEL_NUMBER: \" + str(model_number[0]))\n print(\"ERROR_NUMBER: \" + str(model_number[1]))\n\n # Fill Motor Parameters\n self.__fill_motor_parameters(motor_id, model_number[0])\n counts[model_number[0]] += 1\n self.motor_info[str(motor_id)] = {\"model_number\": model_number[0]} # IRVIN\n break\n \n # If any errors happen, remove the motor from the list\n for motor_id in to_delete_if_error:\n self.motors.remove(motor_id)\n\n # rospy.set_param('dynamixel/%s/connected_ids' % self.port_namespace, self.motors)\n\n # rospy.set_param('dynamixel/%s/motor_info' % self.port_namespace, self.motor_info) # IRVIN\n\n status_str = '%s: Found %d motors - ' % (self.port_namespace, len(self.motors))\n\n # Get status of each motor\n for model_number,count in counts.items():\n if count:\n if model_number in MODEL_NUMBER_2_MOTOR_NAME:\n model_name = MODEL_NUMBER_2_MOTOR_NAME[model_number]['name']\n status_str += '%d %s [' % (count, model_name)\n \n for motor_id in self.motors:\n if motor_id in self.motor_static_info and self.motor_static_info[motor_id]['model'] == model_name:\n status_str += '%d, ' % motor_id \n \n status_str = status_str[:-2] + '], '\n\n # rospy.loginfo('%s, initialization complete.' % status_str[:-2])\n print('%s, initialization complete.' % status_str[:-2])",
"def infinite_scan(detectors, motor, points, duration=None,\n per_step=None, md=None):\n if per_step is None:\n per_step = bps.one_nd_step\n\n if md is None:\n md = {}\n\n md.update(motors=[motor.name])\n start = time.time()\n\n #@bpp.stage_decorator(list(detectors) + [motor])\n @bpp.reset_positions_decorator()\n @bpp.run_decorator(md=md)\n def inner():\n # Where last position is stored\n pos_cache = defaultdict(lambda: None)\n while duration is None or time.time() - start < duration:\n for pt in points:\n step = {motor: pt}\n yield from per_step(detectors, step, pos_cache)\n\n return (yield from inner())",
"def get_detector_par(self, det, hdu=None):\n # Binning\n binning = '1,1' if hdu is None else self.get_meta_value(self.get_headarr(hdu), 'binning')\n\n # Detector 1\n\n detector_dict1 = dict(\n binning = binning,\n det = 1,\n dataext = 1,\n specaxis = 0,\n specflip = False,\n spatflip = False,\n platescale = 0.135,\n darkcurr = 0.0,\n saturation = 65535.,\n nonlinear = 0.7, # Website says 0.6, but we'll push it a bit\n mincounts = -1e10,\n numamplifiers = 1,\n ronoise = np.atleast_1d([2.8]),\n )\n\n # Detector 2. \n detector_dict2 = detector_dict1.copy()\n detector_dict2.update(dict(\n det=2,\n dataext=2,\n ronoise=np.atleast_1d([3.1])\n ))\n\n\n # Detector 3,. \n detector_dict3 = detector_dict1.copy()\n detector_dict3.update(dict(\n det=3,\n dataext=3,\n ronoise=np.atleast_1d([3.1])\n ))\n\n # Set gain \n # https://www2.keck.hawaii.edu/inst/hires/instrument_specifications.html\n if hdu is None or hdu[0].header['CCDGAIN'].strip() == 'low':\n detector_dict1['gain'] = np.atleast_1d([1.9])\n detector_dict2['gain'] = np.atleast_1d([2.1])\n detector_dict3['gain'] = np.atleast_1d([2.1])\n elif hdu[0].header['CCDGAIN'].strip() == 'high':\n detector_dict1['gain'] = np.atleast_1d([0.78])\n detector_dict2['gain'] = np.atleast_1d([0.86])\n detector_dict3['gain'] = np.atleast_1d([0.84])\n else:\n msgs.error(\"Bad CCDGAIN mode for HIRES\")\n \n # Instantiate\n detector_dicts = [detector_dict1, detector_dict2, detector_dict3]\n return detector_container.DetectorContainer( **detector_dicts[det-1])",
"def get_detector_par(self, det, hdu=None):\n binning = '1,1' if hdu is None else self.get_meta_value(self.get_headarr(hdu), 'binning')\n #detwin2 = '[1:4102,300:600]' if self.par['rdx']['quicklook'] else '[1:4102,52:1920]'\n\n # Detector 1\n detector_dict1 = dict(\n binning = binning,\n det = 1,\n dataext = 1, # Not sure this is used\n specaxis = 0,\n specflip = False,\n spatflip = False,\n platescale = 0.127, # arcsec per pixel\n darkcurr = 0.0,\n saturation = 65535., # ADU\n nonlinear = 0.95,\n mincounts = 0,\n numamplifiers = 1,\n gain = np.atleast_1d([0.95]),\n ronoise = np.atleast_1d([4.5]),\n datasec = np.atleast_1d('[1:4102,280:2048]'),\n oscansec = np.atleast_1d('[1:4102,6:44]')\n )\n # Detector 2\n detector_dict2 = dict(\n binning = binning,\n det = 2,\n dataext = 2, # Not sure this is used\n specaxis = 0,\n specflip = False,\n spatflip = False,\n platescale = 0.127,\n darkcurr = 0.0,\n saturation = 65535., # ADU\n nonlinear = 0.95,\n mincounts = 0,\n numamplifiers = 1,\n gain = np.atleast_1d([0.95]),\n ronoise = np.atleast_1d([4.5]),\n datasec = np.atleast_1d('[1:4102,52:1920]'),\n oscansec = np.atleast_1d('[1:4102,6:40]')\n )\n\n detectors = [detector_dict1, detector_dict2]\n # Return\n return detector_container.DetectorContainer(**detectors[det-1])",
"def init_motors(self):\n # self.maxVelocity = 576# -> 5 m/s\n # self.maxTorque = 30\n\n # motor init\n for m in self.motors:\n m.setPosition(float('inf'))\n m.setVelocity(1.)\n\n # Propeller PID control params tunned with Ziegler–Nichols PID\n K_u = 150.\n T_u = 342.857 / 1000. # ms\n # no overshoot\n params_roll = {'P': K_u / 5., 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u * T_u / 15., 'sp': 0.}\n self.rollPID = PID(params_roll['P'], params_roll['I'],\n params_roll['D'], setpoint=params_roll['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n\n K_u = 150.\n T_u = 682.66 / 1000. # ms\n # no overshoot\n params_pitch = {'P': K_u/5.,\n 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u*T_u/15.,\n 'sp': 0.}\n self.pitchPID = PID(params_pitch['P'], params_pitch['I'],\n params_pitch['D'], setpoint=params_pitch['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n K_u = 20.\n T_u = 1621.33 / 1000. # ms\n # PD\n params_yaw = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_yaw}\n self.yawPID = PID(params_yaw['P'], params_yaw['I'], params_yaw['D'],\n setpoint=params_yaw['sp'], output_limits=(-2., 2.),\n sample_time=self.deltaT, error_map=pi_clip)\n\n K_u = 20.\n T_u = 2668.8 / 1000. # ms\n # PD\n params_vert = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_altitude}\n self.vertPID = PID(params_vert['P'], params_vert['I'],\n params_vert['D'], setpoint=params_vert['sp'],\n output_limits=(-5., 5.), sample_time=self.deltaT)\n\n return True",
"def pzt_overnight_scan(\n moving_pzt,\n start,\n stop,\n steps,\n detectors=[dcm.th2, Vout2],\n repeat_num=10,\n sleep_time=1,\n night_sleep_time=3600,\n scan_num=12,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n\n eng_ini = XEng.position\n print(\"current X-ray Energy: {:2.1f}keV\".format(current_def))\n print(\"run {0:d} times at {1:d} seconds interval\".format(repeat_num, scan_num))\n for i in range(scan_num):\n print(\"scan num: {:d}\".format(i))\n yield from pzt_scan_multiple(\n pzt_dcm_th2,\n start,\n stop,\n steps,\n detectors,\n repeat_num=repeat_num,\n sleep_time=sleep_time,\n fn=save_file_dir,\n )\n yield from bps.sleep(night_sleep_time)",
"def get_detector_par(self, det, hdu=None):\n binning = '1,1' if hdu is None else self.get_meta_value(self.get_headarr(hdu), 'binning')\n gain = 1.90 if hdu is None else self.get_headarr(hdu)[0]['GAIN']\n ronoise = 4.3 if hdu is None else self.get_headarr(hdu)[0]['RDNOISE']\n\n # Detector 1\n detector_dict1 = dict(\n binning = binning,\n det = 1,\n dataext = 0,\n specaxis = 1,\n specflip = True,\n spatflip = False,\n platescale = 0.125, # arcsec per pixel\n darkcurr = 5.0,\n saturation = 65535., # ADU\n nonlinear = 0.95,\n mincounts = 0,\n numamplifiers = 1,\n gain = np.atleast_1d([gain]),\n ronoise = np.atleast_1d([ronoise]),\n datasec = np.atleast_1d('[180:4112,50:4096]'),\n oscansec = np.atleast_1d('[180:4112,8:46]') # Trim down the oscansec - looks like some bad pixels\n )\n\n detectors = [detector_dict1]\n # Return\n return detector_container.DetectorContainer(**detectors[det-1])",
"def pzt_energy_scan(\n moving_pzt,\n start,\n stop,\n steps,\n eng_list,\n detectors=[dcm.th2, Vout2],\n repeat_num=1,\n sleep_time=1,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt = f\"pzt_energy_scan(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, eng_list, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\\neng+list={eng_list}\\n Consisting of:\\n\"\n insert_text(txt)\n eng_ini = XEng.position\n yield from abs_set(shutter_open, 1)\n yield from bps.sleep(1)\n yield from abs_set(shutter_open, 1)\n print(\"shutter open\")\n for eng in eng_list:\n yield from abs_set(XEng, eng, wait=True)\n current_eng = XEng.position\n yield from bps.sleep(1)\n print(\"current X-ray Energy: {:2.1f}keV\".format(current_eng))\n yield from pzt_scan_multiple(\n pzt_dcm_th2,\n start,\n stop,\n steps,\n detectors,\n repeat_num=repeat_num,\n sleep_time=sleep_time,\n fn=fn,\n )\n yield from abs_set(XEng, eng_ini, wait=True)\n yield from abs_set(shutter_close, 1)\n yield from bps.sleep(1)\n yield from abs_set(shutter_close, 1)\n txt_finish = '## \"pzt_energy_scan()\" finished'\n insert_text(txt_finish)",
"def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO",
"def scan(self):\n VIDS = '_tivo-videos._tcp.local.'\n names = []\n\n self.logger.info('Scanning for TiVos...\\n')\n\n # Get the names of servers offering TiVo videos\n browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))\n\n # Give them a second (or more if no one has responded in the 1st second) to respond\n time.sleep(1)\n max_sec_to_wait = 10\n sec_waited = 0\n while not names and sec_waited < max_sec_to_wait:\n sec_waited += 1\n time.sleep(1)\n\n # Any results?\n if names:\n config.tivos_found = True\n\n # Now get the addresses -- this is the slow part\n for name in names:\n info = self.rz.get_service_info(VIDS, name + '.' + VIDS)\n log_serviceinfo(self.logger, info)\n\n if info:\n # zeroconf v2.7 removed ServiceInfo address member says use addresses instead.\n # Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)\n self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\\n')\n\n tsn = info.properties.get(b'TSN')\n if config.get_togo('all'):\n tsn = info.properties.get(b'tsn', tsn)\n if tsn:\n if isinstance(tsn, bytes):\n tsn = tsn.decode('utf-8')\n address = socket.inet_ntoa(info.addresses[0])\n port = info.port\n config.tivos[tsn] = {'name': name, 'address': address,\n 'port': port}\n # info.properties has bytes keys and values, but we'd rather\n # deal with str keys and values, so convert them before adding\n # them to our tivos dict.\n config.tivos[tsn].update(bytes2str(info.properties))\n\n# Debugging information on what services have been found:\n# try:\n# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)\n# self.logger.info(\"All services found\")\n# for s in all_services:\n# self.logger.info(\" {}\".format(s))\n# except Exception as e:\n# self.logger.error(e)\n\n\n return names",
"def get_detector_par(self, hdu, det):\n\n # http://www.not.iac.es/instruments/detectors/CCD14/\n\n # Detector 1\n detector_dict = dict(\n binning =self.get_meta_value(self.get_headarr(hdu), 'binning'),\n det = 1,\n dataext = 1,\n specaxis = 0,\n specflip = True,\n spatflip = False,\n xgap = 0.,\n ygap = 0.,\n ysize = 1.,\n platescale = 0.2138,\n mincounts = -1e10,\n darkcurr = 1.3, # e-/pix/hr\n saturation = 700000., # ADU\n nonlinear = 0.86,\n datasec = np.atleast_1d('[:,{}:{}]'.format(1, 2062)), # Unbinned\n oscansec = None,\n numamplifiers = 1,\n )\n\n # Parse datasec, oscancsec from the header\n head1 = hdu[1].header\n detector_dict['gain'] = np.atleast_1d(head1['GAIN']) # e-/ADU\n detector_dict['ronoise'] = np.atleast_1d(head1['RDNOISE']) # e-\n\n # Return\n return detector_container.DetectorContainer(**detector_dict)",
"def multi_calib_scan(detectors: list, *args, num: int, exposure: float = None, wait_per_step: float = 0.,\n calib_map: list = None, md: dict = None):\n if not md:\n md = {}\n if not calib_map:\n print(\"No calib_map. This is a calibration run.\")\n elif len(calib_map) != num:\n raise ValueError(\"The length of calib_map must be equal to num: {} != {}\".format(len(calib_map), num))\n if exposure:\n yield from configure_area_det_expo(exposure)\n # get the motors and points\n if len(args) < 3:\n raise ValueError(\"There must be at least 3 arguments: motor, start, end.\")\n motors = list(args[::3])\n # add hints\n x_fields = []\n for motor in motors:\n x_fields.extend(getattr(motor, 'hints', {}).get('fields', []))\n default_dimensions = [(x_fields, 'primary')]\n default_hints = {}\n if len(x_fields) > 0:\n default_hints.update(dimensions=default_dimensions)\n md['hints'] = default_hints\n # calculate the positions\n if num <= 0:\n raise ValueError(\"Number of points must be positive.\")\n starts, ends = args[1::3], args[2::3]\n lines = [np.linspace(start, end, num).tolist() for start, end in zip(starts, ends)]\n\n # start run\n\n def get_positions(j):\n lst = []\n for m, l in zip(motors, lines):\n lst.append(m)\n lst.append(l[j])\n return lst\n\n all_detectors = detectors + motors\n for i in range(num):\n yield from bps.mv(*get_positions(i))\n yield from bps.checkpoint()\n if calib_map:\n calib_md = calib_map[i]\n md[\"calibration_md\"] = calib_md\n plan = bp.count(all_detectors, md=md)\n plan = bpp.subs_wrapper(plan, LiveTable(all_detectors))\n yield from bps.sleep(wait_per_step)\n yield from open_shutter_stub()\n yield from plan\n yield from close_shutter_stub()\n yield from bps.mv(*get_positions(0))\n yield from bps.checkpoint()",
"def analyze(self, event):\n\t\tJets = Collection(event, \"Jet\")\n\t\tjets = [j for j in Jets if j.pt >= 20]\n\t\tgenpart = Collection(event, \"GenPart\")\n\t\tgenParts = [l for l in genpart]\n\t\t# get the particles when they have a mother ---> getting the daughters only \n\t\tdaughters = [l for l in genpart if l.genPartIdxMother>= 0 ]\n\t\tevent.nIsr = 0\n\t\tfor jet in jets:\n\t\t\tif jet.pt <30.0: continue\n\t\t\tif abs(jet.eta )>2.4: continue\n\t\t\tmatched = False\n\t\t\tfor i,mc in enumerate(genParts):\n\t\t\t\t# if it's matched doesn't make sence to correct it\n\t\t\t\tif matched: break\n\t\t\t\t# check if it's quark from top or not\n\t\t\t\tif (mc.status!=23 or abs(mc.pdgId)>5): continue\n\t\t\t\tmomid = abs(genParts[mc.genPartIdxMother].pdgId)\n\t\t\t\tif not (momid==6 or momid==23 or momid==24 or momid==25 or momid>1e6): continue\n\t\t\t\tfor idau in range(len(daughters)) :\n\t\t\t\t\t# look for the products of the jet and match jet with gen daughters of the quark \n\t\t\t\t\tif i == daughters[idau].genPartIdxMother:\n\t\t\t\t\t\tdR = math.sqrt(deltaR2(jet.eta,jet.phi, daughters[idau].eta,daughters[idau].phi))\n\t\t\t\t\t\tif dR<0.3:\n\t\t\t\t\t\t\t# if matched escape\n\t\t\t\t\t\t\tmatched = True\n\t\t\t\t\t\t\tbreak\n\t\t\t# if not matched correct it \n\t\t\tif not matched:\n\t\t\t\tevent.nIsr+=1\n\t\t# fill the output with nisr\n\t\tself.out.fillBranch(\"nIsr\",event.nIsr)\n\t\tnISRweight = 1\n\t\t#https://indico.cern.ch/event/592621/contributions/2398559/attachments/1383909/2105089/16-12-05_ana_manuelf_isr.pdf\n\t\tISRweights_Mar17 = { 0: 1, 1 : 0.920, 2 : 0.821, 3 : 0.715, 4 : 0.662, 5 : 0.561, 6 : 0.511}\n\t\tISRweights_ICHEP16 = { 0: 1, 1 : 0.882, 2 : 0.792, 3 : 0.702, 4 : 0.648, 5 : 0.601, 6 : 0.515}\n\t\tISRweightssyst_Mar17 = { 0: 0.0, 1 : 0.040, 2 : 0.090, 3 : 0.143, 4 : 0.169, 5 : 0.219, 6 : 0.244}\n\t\tISRweightssyst_ICHEP16 = { 0: 0.0, 1 : 0.059, 2 : 0.104, 3 : 0.149, 4 : 0.176, 5 : 0.199, 6 : 0.242}\n\t\t\n\t\tif self.ICHEP16 == True and self.Mar17 == False:\n\t\t\tISRweights = ISRweights_ICHEP16\n\t\t\tISRweightssyst = ISRweightssyst_ICHEP16\n\t\t\t\n\t\telif self.ICHEP16 == False and self.Mar17 == True: \n\t\t\tISRweights = ISRweights_Mar17\n\t\t\tISRweightssyst = ISRweightssyst_Mar17\n\t\t\t\n\t\tnISRforWeights = int(event.nIsr)\n\t\tif event.nIsr > 6:\n\t\t\tnISRforWeights = 6\n\t\tC_ISR = 1.090\n\t\tC_ISR_up = 1.043\n\t\tC_ISR_down = 1.141\n\t\tnISRweight = C_ISR * ISRweights[nISRforWeights]\n\t\tnISRweightsyst_up = C_ISR_up * (ISRweights[nISRforWeights] + ISRweightssyst[nISRforWeights])\n\t\tnISRweightsyst_down = C_ISR_down * (ISRweights[nISRforWeights] - ISRweightssyst[nISRforWeights])\n\t\t\n\t\tself.out.fillBranch(\"nISRweight\",nISRweight)\n\t\tself.out.fillBranch(\"nISRttweightsyst_up\",nISRweightsyst_up)\n\t\tself.out.fillBranch(\"nISRttweightsyst_down\",nISRweightsyst_down)\n\n\n # ------ Forwarded Message --------\n # Subject: Re: question for ttbar ISR reweighting\n # Date: Sat, 14 Jan 2017 20:24:14 +0100\n # From: Manuel Franco Sevilla <[email protected]>\n #The [Nom, Up, Down] values we find for the events with Nisr = 0 are:\n #[1.090, 1.043, 1.141]: TTJets_Tune\n #[1.096, 1.046, 1.151]: TTJets_SingleLeptFromT\n #[1.116, 1.055, 1.185]: TTJets_DiLept\n\t\t\n\t\t\n\t\treturn True",
"def setup_det_trigger(motor, det, motion_calc, trigger_calc, increment=2.5):\n motion_calc.reset()\n motion_calc.desc.put(\"motion increment\")\n motion_calc.channels.A.input_pv.put(motor.user_readback.pvname)\n motion_calc.channels.B.value.put(increment)\n motion_calc.calc.put(\"floor(A/B)\")\n motion_calc.oopt.put(\"Every Time\")\n motion_calc.scan.put(\"I/O Intr\")\n\n trigger_calc.reset()\n trigger_calc.desc.put(\"detector trigger\")\n trigger_calc.channels.A.input_pv.put(trigger_calc.channels.B.value.pvname)\n trigger_calc.channels.B.input_pv.put(motion_calc.val.pvname)\n trigger_calc.channels.C.input_pv.put(motor.direction_of_travel.pvname)\n trigger_calc.calc.put(\"C&&(A!=B)\")\n trigger_calc.oopt.put(\"Transition To Non-zero\")\n trigger_calc.outn.put(det.cam.prefix + \"Acquire\")\n trigger_calc.scan.put(\"I/O Intr\")\n \n det.cam.image_mode.put(\"Single\")\n det.hdf1.enable.put(\"Disable\")\n \"\"\"\n typical acquisition sequence:\n \n det_pre_acquire(det)\n det.cam.acquire.put() # as many frames as needed\n det_post_acquire(det)\n \"\"\"",
"def __init__(self, prefix, n_detectors=16, bad=None):\r\n class pvs:\r\n pass\r\n self.pvs = pvs()\r\n t = Med.Med.__init__(self, n_detectors) # Invoke base class initialization\r\n self.pvs.start = epicsPV.epicsPV(prefix + 'StartAll', wait=0)\r\n self.pvs.erasestart = epicsPV.epicsPV(prefix + 'EraseStart', wait=0)\r\n self.pvs.stop = epicsPV.epicsPV(prefix + 'StopAll', wait=0)\r\n self.pvs.erase = epicsPV.epicsPV(prefix + 'EraseAll', wait=0)\r\n self.pvs.read = epicsPV.epicsPV(prefix + 'ReadAll', wait=0)\r\n self.pvs.elive = epicsPV.epicsPV(prefix + 'ElapsedLive', wait=0)\r\n self.pvs.ereal = epicsPV.epicsPV(prefix + 'ElapsedReal', wait=0)\r\n self.pvs.plive = epicsPV.epicsPV(prefix + 'PresetLive', wait=0)\r\n self.pvs.preal = epicsPV.epicsPV(prefix + 'PresetReal', wait=0)\r\n self.pvs.dwell = epicsPV.epicsPV(prefix + 'Dwell', wait=0)\r\n self.pvs.channel_advance = epicsPV.epicsPV(prefix + 'ChannelAdvance', wait=0)\r\n self.pvs.prescale = epicsPV.epicsPV(prefix + 'Prescale', wait=0)\r\n self.pvs.acquiring = epicsPV.epicsPV(prefix + 'Acquiring', wait=0)\r\n self.pvs.client_wait = epicsPV.epicsPV(prefix + 'ClientWait', wait=0)\r\n self.pvs.enable_client_wait = epicsPV.epicsPV(prefix + 'EnableClientWait', wait=0)\r\n good_detectors = range(1, self.n_detectors+1)\r\n if (bad != None):\r\n for b in bad:\r\n del good_detectors[b-1]\r\n self.n_detectors = len(good_detectors)\r\n self.good_detectors = good_detectors\r\n for i in range(self.n_detectors):\r\n pv = prefix + 'mca' + str(self.good_detectors[i])\r\n self.mcas[i] = epicsMca.epicsMca(pv)\r\n self.pvs.elive.setMonitor()\r\n self.pvs.ereal.setMonitor()\r\n self.pvs.acquiring.setMonitor()\r\n self.pvs.client_wait.setMonitor()\r\n # Wait for all PVs to connect\r\n self.pvs.client_wait.pend_io(30.)\r\n # Read the first MCA to get the number of channels\r\n t = self.mcas[0].get_data()\r\n # Read the environment from the first MCA\r\n self.environment = self.mcas[0].get_environment()",
"def scanChecks(motor, start, stop, step, param1, param2=-1, param3=-1):\n\tgenericScanChecks(True, False, motor, start, stop, step, param1, param2, param3)",
"def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)",
"def get_all_motors():\n return mc.get('motor_values')",
"def motor_inferencia(x):\n\n # Defino mis operaciones borrosas\n AND = min # Tambien se llama conjuncion o interseccion\n OR = max # Tambien se llama disyuncion o union\n # FUERZA = min # Elijo la conjuncion. Tambien se pueden usar la disyuncion\n\n # --------------------------------------------------------\n # - CALCULO DEL VALOR DE PERTENENCIA DE LOS ANTECEDENTES -\n # --------------------------------------------------------\n\n # Guardo los antecedentes en las variables\n A_MN = []\n A_N = []\n A_Z = []\n A_P = []\n A_MP = []\n\n # Fila 0: P is MN and\n A_MP.append(AND(x[0], x[5])) # V is MN # then F is MP\n A_MP.append(AND(x[0], x[6])) # V is N # then F is MP\n A_MP.append(AND(x[0], x[7])) # V is Z # then F is MP\n A_MP.append(AND(x[0], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[0], x[9])) # V is MP # then F is MP\n\n # Fila 1: P is N and\n A_MN.append(AND(x[1], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[1], x[6])) # V is N # then F is MN\n A_N.append(AND(x[1], x[7])) # V is Z # then F is N\n A_N.append(AND(x[1], x[8])) # V is P # then F is N\n A_N.append(AND(x[1], x[9])) # V is MP # then F is N\n\n # Fila 2: P is Z and\n A_MN.append(AND(x[2], x[5])) # V is MN # then F is MN\n A_N.append(AND(x[2], x[6])) # V is N # then F is N\n A_Z.append(AND(x[2], x[7])) # V is Z # then F is Z\n A_P.append(AND(x[2], x[8])) # V is P # then F is P\n A_MP.append(AND(x[2], x[9])) # V is MP # then F is MP\n\n # Fila 3: P is P and\n A_P.append(AND(x[3], x[5])) # V is MN # then F is P\n A_P.append(AND(x[3], x[6])) # V is N # then F is P\n A_P.append(AND(x[3], x[7])) # V is Z # then F is P\n A_MP.append(AND(x[3], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[3], x[9])) # V is MP # then F is MP\n\n # Fila 4: P is MP and\n A_MN.append(AND(x[4], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[4], x[6])) # V is N # then F is MN\n A_MN.append(AND(x[4], x[7])) # V is Z # then F is MN\n A_MN.append(AND(x[4], x[8])) # V is P # then F is MN\n A_MN.append(AND(x[4], x[9])) # V is MP # then F is MN\n\n # ------------------------------------------------------------------------------------------\n # - COMBINACION DE LOS ANTECEDENTES Y RESOLUCION DE LA IMPLICACION -\n # ------------------------------------------------------------------------------------------\n\n # [ F_MN, F_N, F_Z, F_P, F_MP ]\n F = [OR(A_MN), OR(A_N), OR(A_Z), OR(A_P), OR(A_MP)]\n\n return F",
"def cmd_motors(self, motor1, motor2, motor3, motor4):\n pass",
"def ultrasonicChecker() -> None:\n ...",
"def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 35):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))",
"def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 50):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()",
"def __init__(self, detector, detectors, nodename, viewname):\n\n self.detector_type = detector\n # set detect_state function to detector_type (e.g., light or motion)\n if detector == 'light':\n self.detect_state = self.detect_light\n if 'threshold' in detectors[detector]:\n self.threshold = detectors[detector]['threshold']\n else:\n self.threshold = 100 # 100 is a default for testing\n if 'min_frames' in detectors[detector]:\n self.min_frames = detectors[detector]['min_frames']\n else:\n self.min_frames = 5 # 5 is default\n # need to remember min_frames of state history to calculate state\n self.state_history_q = deque(maxlen=self.min_frames)\n\n elif detector == 'motion':\n self.detect_state = self.detect_motion\n self.moving_frames = 0\n self.still_frames = 0\n self.total_frames = 0\n if 'delta_threshold' in detectors[detector]:\n self.delta_threshold = detectors[detector]['delta_threshold']\n else:\n self.delta_threshold = 5 # 5 is a default for testing\n if 'min_area' in detectors[detector]:\n self.min_area = detectors[detector]['min_area']\n else:\n self.min_area = 3 # 3 is default percent of ROI\n if 'min_motion_frames' in detectors[detector]:\n self.min_motion_frames = detectors[detector]['min_motion_frames']\n else:\n self.min_motion_frames = 3 # 3 is default\n if 'min_still_frames' in detectors[detector]:\n self.min_still_frames = detectors[detector]['min_still_frames']\n else:\n self.min_still_frames = 3 # 3 is default\n self.min_frames = max(self.min_motion_frames, self.min_still_frames)\n if 'blur_kernel_size' in detectors[detector]:\n self.blur_kernel_size = detectors[detector]['blur_kernel_size']\n else:\n self.blur_kernel_size = 15 # 15 is default blur_kernel_size\n if 'print_still_frames' in detectors[detector]:\n self.print_still_frames = detectors[detector]['print_still_frames']\n else:\n self.print_still_frames = True # True is default print_still_frames\n\n if 'ROI' in detectors[detector]:\n self.roi_pct = literal_eval(detectors[detector]['ROI'])\n else:\n self.roi_pct = ((0, 0), (100, 100))\n if 'draw_roi' in detectors[detector]:\n self.draw_roi = literal_eval(detectors[detector]['draw_roi'])\n self.draw_color = self.draw_roi[0]\n self.draw_line_width = self.draw_roi[1]\n else:\n self.draw_roi = None\n # name of the ROI detector section\n if 'roi_name' in detectors[detector]:\n self.roi_name = detectors[detector]['roi_name']\n else:\n self.roi_name = ''\n # include ROI name in log events\n if 'log_roi_name' in detectors[detector]:\n self.log_roi_name = detectors[detector]['log_roi_name']\n else:\n self.log_roi_name = False\n # draw timestamp on image\n if 'draw_time' in detectors[detector]:\n self.draw_time = literal_eval(detectors[detector]['draw_time'])\n self.draw_time_color = self.draw_time[0]\n self.draw_time_width = self.draw_time[1]\n if 'draw_time_org' in detectors[detector]:\n self.draw_time_org = literal_eval(detectors[detector]['draw_time_org'])\n else:\n self.draw_time_org = (0, 0)\n if 'draw_time_fontScale' in detectors[detector]:\n self.draw_time_fontScale = detectors[detector]['draw_time_fontScale']\n else:\n self.draw_time_fontScale = 1\n else:\n self.draw_time = None\n send_frames = 'None Set'\n self.frame_count = 0\n # send_frames option can be 'continuous', 'detected event', 'none'\n if 'send_frames' in detectors[detector]:\n send_frames = detectors[detector]['send_frames']\n if not send_frames: # None was specified; send 0 frames\n self.frame_count = 0\n if 'detect' in send_frames:\n self.frame_count = 10 # detected events default; adjusted later\n elif 'continuous' in send_frames:\n self.frame_count = -1 # send continuous flag\n elif 'none' in send_frames: # don't send any frames\n self.frame_count = 0\n else:\n self.frame_count = -1 # send continuous flag\n # send_count option is an integer of how many frames to send if event\n if 'send_count' in detectors[detector]:\n self.send_count = detectors[detector]['send_count']\n else:\n self.send_count = 5 # default number of frames to send per event\n # send_test_images option: if True, send test images like ROI, Gray\n if 'send_test_images' in detectors[detector]:\n self.send_test_images = detectors[detector]['send_test_images']\n else:\n self.send_test_images = False # default is NOT to send test images\n\n # self.event_text is the text message for this detector that is\n # sent when the detector state changes\n # example: JeffOffice Window|light|dark\n # example: JeffOffice Window|light|lighted\n # self.event_text will have self.current_state appended when events are sent\n node_and_view = ' '.join([nodename, viewname]).strip()\n self.event_text = '|'.join([node_and_view, self.detector_type])\n\n # An event is a change of state (e.g., 'dark' to 'lighted')\n # Every detector is instantiated with all states = 'unknown'\n self.current_state = 'unknown'\n self.last_state = 'unknown'\n\n self.msg_image = np.zeros((2, 2), dtype=\"uint8\") # blank image tiny\n if self.send_test_images:\n # set the blank image wide enough to hold message of send_test_images\n self.msg_image = np.zeros((5, 320), dtype=\"uint8\") # blank image wide",
"def camera_scan(h, cam, direction):\n if direction == \"right\":\n offset = -1.0\n elif direction == \"left\":\n offset = 1.0\n else:\n offset = 0.0\n\n\n cam.set_pan_tilt(offset * 1.3, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3)\n time.sleep(4)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n\n\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 + 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n \n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 - 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)",
"def scan(self):\n for angle in range(self.MIDPOINT-400, self.MIDPOINT+401, 100):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))",
"def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):\n if detection_dir is None:\n detection_dir = mot_dir\n try:\n os.makedirs(output_dir)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(output_dir):\n pass\n else:\n raise ValueError(\n \"Failed to created output directory '%s'\" % output_dir)\n\n for sequence in os.listdir(mot_dir):\n print(\"Processing %s\" % sequence)\n sequence_dir = os.path.join(mot_dir, sequence)\n\n # image_dir = os.path.join(sequence_dir, \"img1\")\n image_dir = sequence_dir\n image_filenames = {\n int(f[6:10]): os.path.join(image_dir, f) \n for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f))}\n\n detection_file = os.path.join(\n detection_dir, sequence, \"det/det.txt\")\n detections_in = np.loadtxt(detection_file, delimiter=' ')\n detections_out = []\n\n frame_indices = detections_in[:, 0].astype(np.int)\n min_frame_idx = frame_indices.astype(np.int).min()\n max_frame_idx = frame_indices.astype(np.int).max()\n for frame_idx in range(min_frame_idx, max_frame_idx + 1):\n print(\"Frame %05d/%05d\" % (frame_idx, max_frame_idx))\n mask = frame_indices == frame_idx\n rows = detections_in[mask]\n\n if frame_idx not in image_filenames:\n print(\"WARNING could not find image for frame %d\" % frame_idx)\n continue\n bgr_image = cv2.imread(\n image_filenames[frame_idx], cv2.IMREAD_COLOR)\n features = encoder(bgr_image, rows[:, 2:6].copy())\n detections_out += [np.r_[(row, feature)] for row, feature\n in zip(rows, features)]\n\n output_filename = os.path.join(output_dir, \"%s.npy\" % sequence)\n np.save(\n output_filename, np.asarray(detections_out), allow_pickle=False)",
"def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc",
"def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes"
]
| [
"0.5739672",
"0.55752635",
"0.5421364",
"0.5333477",
"0.5325447",
"0.5255563",
"0.51745707",
"0.51531947",
"0.51229215",
"0.5022708",
"0.50104445",
"0.49869722",
"0.4959905",
"0.49559098",
"0.49386626",
"0.49105185",
"0.49098516",
"0.4906843",
"0.48975998",
"0.48923635",
"0.48900166",
"0.48352626",
"0.48164323",
"0.48137537",
"0.47678602",
"0.4741256",
"0.4722799",
"0.46749383",
"0.46664324",
"0.46650916"
]
| 0.70551413 | 0 |
Repeat scanning the pzt (e.g. pzt_dcm_ch2, pzt_dcm_th2), and read the detector outputs. Images and .csv data file will be saved | def pzt_scan_multiple(
moving_pzt,
start,
stop,
steps,
detectors=[Vout2],
repeat_num=2,
sleep_time=1,
fn="/home/xf18id/Documents/FXI_commision/DCM_scan/",
):
det = [det.name for det in detectors]
det_name = ""
for i in range(len(det)):
det_name += det[i]
det_name += ", "
det_name = "[" + det_name[:-2] + "]"
txt = f"pzt_scan_multiple(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\n Consisting of:\n"
insert_text(txt)
current_eng = XEng.position
df = pd.DataFrame(data=[])
for num in range(repeat_num):
yield from pzt_scan(
moving_pzt, start, stop, steps, detectors=detectors, sleep_time=sleep_time
)
yield from abs_set(XEng, current_eng, wait=True)
print("\nscan finished, ploting and saving data...")
fig = plt.figure()
for num in reversed(range(repeat_num)):
h = db[-1 - num]
scan_id = h.start["scan_id"]
detector_set_read = h.start["detector_set_read"]
col_x_prefix = detector_set_read[1]
col_x = col_x_prefix + " #" + "{}".format(scan_id)
motor_readout = np.array(list(h.data(col_x_prefix)))
df[col_x] = pd.Series(motor_readout)
detector_signal = h.start["detectors"]
for i in range(len(detector_signal)):
det = detector_signal[i]
if (det == "Andor") or (det == "detA1"):
det = det + "_stats1_total"
det_readout = np.array(list(h.data(det)))
col_y_prefix = det
col_y = col_y_prefix + " #" + "{}".format(scan_id)
df[col_y] = pd.Series(det_readout)
plt.subplot(len(detector_signal), 1, i + 1)
plt.plot(df[col_x], df[col_y])
plt.ylabel(det)
plt.subplot(len(detector_signal), 1, len(detector_signal))
plt.xlabel(col_x_prefix)
plt.subplot(len(detector_signal), 1, 1)
plt.title("X-ray Energy: {:2.1f}keV".format(current_eng))
now = datetime.now()
year = np.str(now.year)
mon = "{:02d}".format(now.month)
day = "{:02d}".format(now.day)
hour = "{:02d}".format(now.hour)
minu = "{:02d}".format(now.minute)
current_date = year + "-" + mon + "-" + day
fn = (
save_file_dir
+ "pzt_scan_"
+ "{:2.1f}keV_".format(current_eng)
+ current_date
+ "_"
+ hour
+ "-"
+ minu
)
fn_fig = fn + ".tiff"
fn_file = fn + ".csv"
df.to_csv(fn_file, sep="\t")
fig.savefig(fn_fig)
print("save to: " + fn_file)
txt_finish = '## "pzt_scan_multiple()" finished'
insert_text(txt_finish) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\r\n self.create_output_dirs()\r\n data = self.read_input()\r\n while (data):\r\n # Initiate ORB detector\r\n orb = cv2.ORB_create()\r\n\r\n if (self.continu):\r\n current_frame_nr = data[\"frameNr\"]\r\n if(current_frame_nr > 1):\r\n self.determine_flow(orb, current_frame_nr)\r\n else:\r\n #Read first image\r\n self.previous_frame_path = os.path.join(self.frames_dir, '%05d.png' % 1)\r\n self.previous_frame = cv2.imread(self.previous_frame_path, 0) # queryImage\r\n # Find the keypoints and descriptors with ORB\r\n self.kp_previous_frame, self.des_previous_frame = orb.detectAndCompute(self.previous_frame, None)\r\n elif(self.stitch_completed):\r\n self.write_stitched_image()\r\n self.continu = False\r\n self.stitch_completed = True\r\n return\r\n data = self.read_input()",
"def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")",
"def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )",
"def drr(input_filename,output_directory,calibration_files,output_extension,threshold,min_out,max_out,default_pixel_value,rot,t,cor,n_cam,res,size,transformed_vol,verbose):\n \n click.echo(' inputFileName : {}'.format(input_filename))\n click.echo(' out_directory : {}'.format(output_directory))\n click.echo(' outputExtension : {}'.format(output_extension))\n click.echo(' Verbose Status : {}'.format(verbose))\n click.echo(' Pixel Size : {}'.format(res))\n click.echo(' Output image Size : {}'.format(size))\n click.echo(' Translation : {}'.format(t))\n click.echo(' Rotation : {}'.format(rot))\n click.echo(' Centre of Rotation : {}'.format(cor))\n click.echo(' Threshold : {}'.format(threshold))\n click.echo(' Number of Cameras : {}'.format(n_cam)) \n click.echo(' Minimum Out : {}'.format(min_out))\n click.echo(' Maximum Out : {}'.format(max_out))\n click.echo(' Calibration Files : {}'.format(calibration_files)) \n \n if len(calibration_files) != n_cam :\n raise Exception('Number of Calibration files', len(calibration_files),'do not correspond with the number of Cameras',n_cam)\n \n #%%------------------ Starting the main body of the code ---------------- \n # -------------------- Reader -------------------------\n InputPixelType = itk.ctype(\"short\")\n OutputPixelType = itk.ctype(\"short\")\n ScalarType = itk.D\n DimensionIn = 3\n DimensionOut = 3\n \n InputImageType = itk.Image[InputPixelType , DimensionIn ]\n OutputImageType = itk.Image[OutputPixelType, DimensionOut]\n \n \n ReaderType = itk.ImageFileReader[InputImageType]\n reader = ReaderType.New()\n reader.SetFileName(input_filename)\n \n try:\n print(\"Reading image: \" + input_filename)\n reader.Update()\n print(\"Image Read Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()\n \n inputImage = reader.GetOutput()\n \n if verbose :\n print(inputImage)\n \n \n #%% ------------------ Transformation \n # This part is inevitable since the interpolator (Ray-cast) and resample Image\n # image filter uses a Transformation -- Here we set it to identity. \n TransformType = itk.CenteredEuler3DTransform[itk.D]\n transform = TransformType.New()\n \n transform.SetRotation(numpy.deg2rad(rot[0]),numpy.deg2rad(rot[1]),numpy.deg2rad(rot[2])) # Setting the rotation of the transform\n transform.SetTranslation(itk.Vector.D3(t)) # Setting the translation of the transform\n transform.SetComputeZYX(True) # The order of rotation will be ZYX. \n \n imOrigin = inputImage.GetOrigin() # Get the origin of the image.\n inRes = inputImage.GetSpacing() # Get the resolution of the input image.\n inSiz = inputImage.GetBufferedRegion().GetSize() # Get the size of the input image.\n \n center = itk.Point.D3(imOrigin) + numpy.multiply(inRes,inSiz)/2. # Setting the center of rotation as center of 3D object + offset determined by cor. \n \n transform.SetCenter(center) # Setting the center of rotation. \n \n if verbose :\n print(transform)\n \n #%% \n for ii in range(n_cam):\n imageCalibrationInfo = CalibrationUsingJointTrack.CalibrationTool() # Setting up the image calibration info class. \n imageCalibrationInfo.SetCalibrationInfo(calibration_files[ii]) # Assign the information from the calibration file to the imageCalibrationInfo class. \n \n spaceOutput= imageCalibrationInfo.GetPixelSize() # The resolution (spacing) along x,y,z directions of output image\n \n imageCalibrationInfo.SetOutputImageSize(size[0],size[1],1) # Setting the size of the output image. \n imageCalibrationInfo.SetGlobalOriginForImagePlane() # Setting the global origin of the output image. \n \n originOutput = imageCalibrationInfo.GetGlobalOriginForImagePlane() # Setting the output origin. \n \n directionOutput = imageCalibrationInfo.GetDirectionMatrix() # Direction of Image plane 3x3 matrix. \n focalPoint = imageCalibrationInfo.GetFocalPoint() # Position of the x-ray source. \n \n #%% ----------------- Ray Cast Interpolator \n # In this part the Ray Cast interpolator is defined and applied to the input\n # image data. \n \n InterpolatorType = itk.RayCastInterpolateImageFunction[InputImageType,ScalarType] # Defining the interpolator type from the template. \n interpolator = InterpolatorType.New() # Pointer to the interpolator\n \n interpolator.SetInputImage(inputImage) # Setting the input image data\n interpolator.SetThreshold(threshold) # Setting the output threshold\n interpolator.SetFocalPoint(itk.Point.D3(focalPoint)) # Setting the focal point (x-ray source location)\n interpolator.SetTransform(transform) # Setting the transform (here identity)\n \n if verbose:\n print(interpolator)\n #%%----------------- Resample Image Filter ------------------------\n # In this part the resample image filter to map a 3D image to 2D image plane with desired specs is designed\n \n FilterType = itk.ResampleImageFilter[InputImageType,OutputImageType] # Defining the resample image filter type. \n resamplefilter = FilterType.New() # Pointer to the filter\n resamplefilter.SetInput(inputImage) # Setting the input image data \n resamplefilter.SetDefaultPixelValue(default_pixel_value) # Setting the default Pixel value\n resamplefilter.SetInterpolator(interpolator) # Setting the interpolator\n resamplefilter.SetTransform(transform) # Setting the transform\n resamplefilter.SetSize([size[0],size[1],1]) # Setting the size of the output image. \n resamplefilter.SetOutputSpacing(itk.Vector.D3([spaceOutput[0],spaceOutput[1],1])) # Setting the spacing(resolution) of the output image. \n resamplefilter.SetOutputOrigin(originOutput) # Setting the output origin of the image\n Functions.ChangeImageDirection(oldDirection=resamplefilter.GetOutputDirection(),newDirection=directionOutput,DimensionOut=3) # Setting the output direction of the image --- resamplefilter.SetImageDirection(args) was not working properly\n \n resamplefilter.Update() # Updating the resample image filter.\n \n if verbose:\n print(resamplefilter)\n #%%---------------- Rescaler Image Filter --------------------------\n RescalerFilterType = itk.RescaleIntensityImageFilter[InputImageType,OutputImageType] # Defining the rescale image filter. \n rescaler = RescalerFilterType.New() # Pointer to the rescale filter\n rescaler.SetOutputMinimum(min_out) # Minimum output\n rescaler.SetOutputMaximum(max_out) # Maximum output \n rescaler.SetInput(resamplefilter.GetOutput()) # Setting the input to the image filter. \n rescaler.Update() \n \n if verbose:\n print(rescaler)\n \n #%% ------------------ Writer ------------------------------------\n # The output of the resample filter can then be passed to a writer to\n # save the DRR image to a file.\n WriterType = itk.ImageFileWriter[OutputImageType]\n writer = WriterType.New()\n \n outputPath = os.path.join(output_directory,'Cam')+str(ii+1)\n \n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n \n if ii == 0:\n time = datetime.datetime.now() \n dummy = ('rx'+str(int(rot[0]))+'ry'+str(int(rot[1]))+'rz'+str(int(rot[2]))+'tx'\n + str(int(t[0]))+'ty'+str(int(t[1]))+'tz'+str(int(t[2]))+'y'+str(time.year)+'m'+str(time.month)\n +'d'+str(time.day)+'hr'+str(time.hour)+'m'+str(time.minute)+'s'+str(time.second)+ output_extension)\n \n outputName = 'Cam'+str(ii+1)+dummy\n output_filename = str(os.path.join(outputPath,outputName))\n \n writer.SetFileName(output_filename)\n # writer.SetFileName('/Volumes/Storage/Payam/Desktop/output.nii') \n writer.SetInput(rescaler.GetOutput())\n \n try:\n print(\"Writing image: \" + output_filename)\n writer.Update()\n print(\"Image Printed Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()\n \n \n # Writing the transformed volume\n if transformed_vol:\n WriterType=itk.ImageFileWriter[InputImageType]\n writer3d=WriterType.New()\n \n output_filename3d = os.path.join(output_directory,'TransformedVolume'+output_extension)\n writer3d.SetFileName(output_filename3d)\n writer3d.SetInput(resamplefilter.GetOutput())\n \n try:\n print(\"Writing the transformed Volume at : \" + output_filename3d)\n writer.Update()\n print(\"Volume Printed Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()",
"def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts",
"def _run_unlabelled_extraction(self, dataset_type: DatasetType, device: str) -> None:\n dataset = self.image_datasets.get_dataset(dataset_type)\n self.extractor_model = self.extractor_model.to(device)\n\n for image, file_id in tqdm(\n dataset, desc=\"Extracting features - competition\",\n ):\n feature_tensor = self.extractor_model(image.unsqueeze(0).to(device))\n self._save_tensor(DatasetType.Competition, feature_tensor, file_id)",
"def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()",
"def PETLoadScans(IMG_PATH,MAX_IMAGE_NUM,IMAGE_SIZE,MASK_FLAG,ORIENTATION):\n\n files = [f for f in listdir(IMG_PATH) if isfile(join(IMG_PATH,f))]\n cnt = 0\n if MASK_FLAG == 0:\n print(\"Reading the following PET scans:\")\n else:\n print(\"Reading the following PET masks:\")\n for file in files:\n if cnt == 0:\n SITK_IMG = sitk.ReadImage(os.path.join(IMG_PATH,file))\n SITK_ARR = sitk.GetArrayFromImage(SITK_IMG)\n SITK_ARR = SITK_ARR.astype(np.float32)\n if ORIENTATION == \"RL_AP\":\n pass\n elif ORIENTATION == \"AP_SI\":\n SITK_ARR = np.swapaxes(SITK_ARR,1,0)\n SITK_ARR = np.swapaxes(SITK_ARR,2,0)\n elif ORIENTATION == \"RL_SI\":\n SITK_ARR = np.swapaxes(SITK_ARR,1,0)\n ORIG_SIZE = SITK_ARR.shape\n if MASK_FLAG == 0:\n SITK_ARR = PETImageProcess(SITK_ARR)\n SITK_ARR = resizeResliceImage(SITK_ARR,IMAGE_SIZE,IMAGE_SIZE) \n if MASK_FLAG == 1:\n #Note to self, after interpolation values change from 1. This resets those values to 1.\n SITK_ARR[SITK_ARR > 0.8] = 1\n SITK_ARR[SITK_ARR == 0.8] = 1\n SITK_ARR[SITK_ARR < 0.8] = 0\n IMGS_LIST = SITK_ARR\n cnt += 1\n print(file)\n elif cnt == MAX_IMAGE_NUM:\n print(\"\\n\")\n break\n else:\n SITK_IMG = sitk.ReadImage(os.path.join(IMG_PATH, file))\n SITK_ARR = sitk.GetArrayFromImage(SITK_IMG)\n SITK_ARR = SITK_ARR.astype(np.float32)\n if ORIENTATION == \"RL_AP\":\n pass \n elif ORIENTATION == \"AP_SI\":\n SITK_ARR = np.swapaxes(SITK_ARR,1,0)\n SITK_ARR = np.swapaxes(SITK_ARR,2,0)\n elif ORIENTATION == \"RL_SI\":\n SITK_ARR = np.swapaxes(SITK_ARR,1,0)\n if MASK_FLAG == 0:\n SITK_ARR = PETImageProcess(SITK_ARR)\n SITK_ARR = resizeResliceImage(SITK_ARR,IMAGE_SIZE,IMAGE_SIZE) \n if MASK_FLAG == 1: \n SITK_ARR[SITK_ARR > 0.8] = 1\n SITK_ARR[SITK_ARR == 0.8] = 1\n SITK_ARR[SITK_ARR < 0.8] = 0\n IMGS_LIST = np.concatenate((IMGS_LIST, SITK_ARR))\n cnt += 1\n print(file)\n return IMGS_LIST, ORIG_SIZE",
"def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")",
"def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )",
"def highpass_filter(display):\r\n for trainOrTest in trainTest:\r\n resultPath = os.path.join('hpf_data', trainOrTest)\r\n originalPath = 'original_data'\r\n for pokemon in pokemons:\r\n pokeData = os.path.join(originalPath, trainOrTest, pokemon)\r\n files = os.listdir(pokeData)\r\n for picture in files:\r\n # Setting path\r\n path = os.path.join(pokeData, picture)\r\n\r\n # Reading image\r\n Img = dip.im_to_float(cv2.imread(path, 1))\r\n\r\n # Splitting the image into blue, green, red portions\r\n b, g, r = cv2.split(Img)\r\n\r\n # Splitting image, taking mean\r\n avg = np.mean([np.mean(b.flatten()), np.mean(g.flatten()), np.mean(r.flatten())])\r\n\r\n # Finding acceptable frequency\r\n precision = 0.002\r\n target = avg / 12\r\n _, j = hpf(b, target, precision)\r\n\r\n # Running hpf\r\n b_out, _ = hpf(b, target, precision, j)\r\n g_out, _ = hpf(g, target, precision, j)\r\n r_out, _ = hpf(r, target, precision, j)\r\n\r\n # Normalizing mean to 1\r\n b_out = b_out * (1 / np.max(b_out))\r\n g_out = g_out * (1 / np.max(g_out))\r\n r_out = r_out * (1 / np.max(r_out))\r\n\r\n # Combiner (Logic)\r\n std = 100 # how many standard deviations above mean for rgb parts\r\n sigmas = [np.var(b_out) ** 0.5, np.var(g_out) ** 0.5, np.var(r_out) ** 0.5]\r\n means = [np.mean(b_out), np.mean(g_out), np.mean(r_out)]\r\n output = combiner(b_out, g_out, r_out, means + sigmas * std)\r\n\r\n output = dip.float_to_im(output)\r\n\r\n if display:\r\n plt.subplot(1, 2, 1)\r\n plt.title('Original Image')\r\n plt.imshow(Img)\r\n plt.subplot(1, 2, 2)\r\n plt.title(\"High pass filter result\")\r\n plt.imshow(output)\r\n\r\n resultPic = os.path.join(resultPath, pokemon, picture)\r\n # Saving resultant image\r\n dip.im_write(output, resultPic)",
"def read_localthermo(paths):\n total_offset = 0\n frames = []\n for path in paths:\n print(H + \"Processing\", path)\n try:\n cut = np.loadtxt(path + CUTFILE)\n except OSError:\n cut = np.inf\n imglist = sorted(glob(path + IMGPATH),\n key=lambda s: int(s.split('_')[-1][:-4]))\n last = cv2.imread(imglist[0], cv2.IMREAD_ANYDEPTH)\n h, w = last.shape\n try:\n mask = cv2.imread(path + MASKFILE, 0).astype(float) / 255\n except AttributeError:\n print(H + W + \"Mask not found! Using default\")\n margin = .2 # 20% margin on the default mask\n mask = np.zeros((h, w))\n mask[int(margin * h):int((1 - margin) * h),\n int(margin * w):int((1 - margin) * w)] = 1\n tg = TimeGetter(path)\n if len(tg.tlist) != len(imglist):\n print(H + W + \"There are {} Ximea images and {} IR images 🤔\".format(\n len(tg.tlist), len(imglist)))\n imglist = imglist[:min(len(tg.tlist), len(imglist))]\n try:\n irthresh = int(np.loadtxt(IRTHRESH))\n except OSError:\n print(H + W + f\"{IRTHRESH} not found, using default value\")\n irthresh = 30\n r = []\n for imgname in imglist[1:]:\n t = tg.get(imgname)\n if t >= cut:\n break\n img = cv2.imread(imgname, cv2.IMREAD_ANYDEPTH).astype(float)\n diff = img - last\n last = img\n # r.append((t,np.sum(mask*diff**2)))\n mdiff = mask * (diff - diff[np.where(mask)].mean())\n r.append((t + total_offset, np.count_nonzero(mdiff > irthresh)))\n\n total_offset += min(cut, t)\n data = pd.DataFrame(r, columns=['t(s)', 'localthermo'])\n data['t(s)'] = pd.to_timedelta(data['t(s)'], unit='s')\n frames.append(data.set_index('t(s)'))\n return pd.concat(frames)",
"def main(filename, iterations, save_diagnostics, output_dir, burnin):\n #data = []\n #with open(filename,'rb') as json_data:\n #skip header\n #jsondata = json.load(json_data)\n #j=0\n #while j<271:\n #eruption_time = jsondata[j]['FIELD1']\n #waiting_time = jsondata[j]['FIELD2']\n #data.append([float(eruption_time), float(waiting_time)])\n #j=j+1\n\n #generate ida images\n data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[2230,5,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[1263,4,1],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,965,1303],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1257,55],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,965,1303],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1303],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[2230,1],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[1263,1],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,1303],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n \n ida.scatter_plot(data, '{0}/faithful_ida_scatter.png'.format(output_dir))\n ida.histogram(data, '{0}/faithful_ida_hist.png'.format(output_dir))\n ida.linear_regression(data, '{0}/faithful_ida_regression.png'.format(output_dir))\n\n #clustering\n km2 = __run_clustering(data, output_dir)\n\n #expectation-maximization\n __run_em(data, output_dir, km2)\n\n #build bayes fmm model\n __run_bayesfmm(data, iterations, save_diagnostics, output_dir, burnin, km2)",
"def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")",
"def pzt_scan(pzt_motor, start, stop, steps, detectors=[Vout2], sleep_time=1, md=None):\n if Andor in detectors:\n exposure_time = yield from bps.rd(Andor.cam.acquire_time)\n yield from mv(Andor.cam.acquire, 0)\n yield from mv(Andor.cam.image_mode, 0)\n yield from mv(Andor.cam.num_images, 1)\n Andor.cam.acquire_period.put(exposure_time)\n\n motor = pzt_motor.setpos\n motor_readback = pzt_motor.pos\n motor_ini_pos = motor_readback.get()\n detector_set_read = [motor, motor_readback]\n detector_all = detector_set_read + detectors\n\n _md = {\n \"detectors\": [det.name for det in detector_all],\n \"detector_set_read\": [det.name for det in detector_set_read],\n \"motors\": [motor.name],\n \"XEng\": XEng.position,\n \"plan_args\": {\n \"pzt_motor\": pzt_motor.name,\n \"start\": start,\n \"stop\": stop,\n \"steps\": steps,\n \"detectors\": \"detectors\",\n \"sleep_time\": sleep_time,\n },\n \"plan_name\": \"pzt_scan\",\n \"hints\": {},\n \"motor_pos\": wh_pos(print_on_screen=0),\n \"operator\": \"FXI\",\n }\n _md.update(md or {})\n try:\n dimensions = [(pzt_motor.hints[\"fields\"], \"primary\")]\n except (AttributeError, KeyError):\n pass\n else:\n _md[\"hints\"].setdefault(\"dimensions\", dimensions)\n\n @stage_decorator(list(detector_all))\n @run_decorator(md=_md)\n def pzt_inner_scan():\n my_var = np.linspace(start, stop, steps)\n print(my_var)\n for x in my_var:\n print(x)\n yield from mv(motor, x)\n yield from bps.sleep(sleep_time)\n yield from trigger_and_read(list(detector_all))\n yield from mv(motor, motor_ini_pos)\n\n uid = yield from pzt_inner_scan()\n\n h = db[-1]\n scan_id = h.start[\"scan_id\"]\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt1 = get_scan_parameter()\n txt2 = f\"detectors = {det_name}\"\n txt = txt1 + \"\\n\" + txt2\n insert_text(txt)\n print(txt)\n return uid\n\n # def pzt_scan(moving_pzt, start, stop, steps, read_back_dev, record_dev, delay_time=5, print_flag=1, overlay_flag=0):\n \"\"\"\n Input:\n -------\n moving_pzt: pv name of the pzt device, e.g. 'XF:18IDA-OP{Mir:DCM-Ax:Th2Fine}SET_POSITION.A'\n\n read_back_dev: device (encoder) that changes with moving_pzt, e.g., dcm.th2\n\n record_dev: signal you want to record, e.g. Vout2\n\n delay_time: waiting time for device to response\n \"\"\"",
"def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)",
"def collect_dust(self,print_iterations = False):\n \n input = self.construct_input(print_iterations)\n \n self.ic = Intcode(self.fileName,input=input,verbose = False, reset = False)\n self.ic.memory[0] = 2\n\n if not print_iterations:\n while True:\n cond, output = self.ic()\n if cond : return output\n else:\n return self.print_video_feed()",
"def extract_data(filename, images_dir, output_dir, trials_idx, block_nums, goal_dict):\n num_images = len(trials_idx) * len(block_nums)\n f = h5py.File(os.path.join(output_dir, filename), 'w')\n X = f.create_dataset('X', (num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS*NUM_FRAMES), dtype=TYPE)\n Y = f.create_dataset('Y', (num_images, 2), dtype=TYPE)\n\n image_count = 0\n for trial_num in trials_idx:\n for block_num in block_nums:\n print('Blocks ' + str(block_num) + ' Trial ' + str(trial_num))\n for frame_num in xrange(0, NUM_FRAMES):\n temp = imread(images_dir+'RTr_Bl'+str(block_num)+'_'+str(trial_num)+'_'+str(frame_num)+IMAGE_FORMAT)\n temp = imresize(temp, [temp.shape[0]//DOWN_SAMPLE, temp.shape[1]//DOWN_SAMPLE, temp.shape[2]])\n X[image_count, 0:temp.shape[0], 0:temp.shape[1], frame_num*NUM_CHANNELS:(frame_num+1)*NUM_CHANNELS] = temp\n label = goal_dict['RTr_Bl'+str(block_num)+'_'+str(trial_num)]\n Y[image_count, :] = [label, 1-label]\n image_count += 1\n\n f.close()\n\n # TODO Use pixel depth normalization???\n #data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH",
"def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')",
"def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n data = pd.DataFrame.from_csv(filename) \n\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n \n # [1:-1] is used to remove '[' and ']' from string \n cur_img_array = deserialize_image(cur_img)\n # cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test'+str(i)+'.jpg', image)",
"def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))",
"def pzt_energy_scan(\n moving_pzt,\n start,\n stop,\n steps,\n eng_list,\n detectors=[dcm.th2, Vout2],\n repeat_num=1,\n sleep_time=1,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt = f\"pzt_energy_scan(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, eng_list, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\\neng+list={eng_list}\\n Consisting of:\\n\"\n insert_text(txt)\n eng_ini = XEng.position\n yield from abs_set(shutter_open, 1)\n yield from bps.sleep(1)\n yield from abs_set(shutter_open, 1)\n print(\"shutter open\")\n for eng in eng_list:\n yield from abs_set(XEng, eng, wait=True)\n current_eng = XEng.position\n yield from bps.sleep(1)\n print(\"current X-ray Energy: {:2.1f}keV\".format(current_eng))\n yield from pzt_scan_multiple(\n pzt_dcm_th2,\n start,\n stop,\n steps,\n detectors,\n repeat_num=repeat_num,\n sleep_time=sleep_time,\n fn=fn,\n )\n yield from abs_set(XEng, eng_ini, wait=True)\n yield from abs_set(shutter_close, 1)\n yield from bps.sleep(1)\n yield from abs_set(shutter_close, 1)\n txt_finish = '## \"pzt_energy_scan()\" finished'\n insert_text(txt_finish)",
"def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True",
"def main(dataset, n, ms=False, out=sys.stdout):\n # build lists of paths previews files, tif, rpc and dzi files\n prv_paths = ' '.join([os.path.join(dataset, 'prv_%02d.jpg' % (i+1)) for i in xrange(n)])\n tif_paths = ' '.join([os.path.join(dataset, 'im_panchro_%02d.tif' % (i+1)) for i in xrange(n)])\n rpc_paths = ' '.join([os.path.join(dataset, 'rpc_%02d.xml' % (i+1)) for i in xrange(n)])\n dzi8_paths, dzi16_paths = None, None\n if ms:\n ms_paths = ' '.join([os.path.join(dataset, 'im_ms_%02d.tif' % (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_8BITS_01.dzi'))):\n dzi8_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_8BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_16BITS_01.dzi'))):\n dzi16_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_16BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n\n # read infos in DIM*.XML file\n dim_xml_file = os.path.join(dataset, 'dim_01.xml')\n tif_file = os.path.join(dataset, 'im_panchro_01.tif')\n if os.path.isfile(dim_xml_file): # check if the link points to an existing file\n date = grep_xml(dim_xml_file, \"IMAGING_DATE\")\n satellite = grep_xml(dim_xml_file, \"INSTRUMENT_INDEX\")\n elif os.path.isfile(tif_file):\n date = extract_date_from_pleiades_filename(os.readlink(tif_file))\n satellite = extract_satellite_from_pleiades_filename(os.readlink(tif_file))\n else:\n date = 'DD-MM-YYYY'\n satellite = 'Pleiades 1X'\n\n # print to stdout\n if dzi8_paths or dzi16_paths:\n print('[%s]' % dataset, file=out)\n print('files = ', prv_paths, file=out)\n print('tif = ', tif_paths, file=out)\n print('rpc = ', rpc_paths, file=out)\n if ms:\n print('clr = ', ms_paths, file=out)\n if dzi8_paths:\n print('dzi8 = ', dzi8_paths, file=out)\n if dzi16_paths:\n print('dzi16 = ', dzi16_paths, file=out)\n s = dataset.split(os.path.sep)\n if len(s) == 3: # ie the path is of the kind 'pleiades/reunion/dataset_1'\n print('title = %s (%s)' % (s[1].capitalize(), s[2][-1]), file=out) # ie 'Reunion (1)'\n elif len(s) == 2: # ie the path is of the kind 'pleiades/reunion'\n print('title = %s' % s[1].capitalize(), file=out) # ie 'Reunion'\n else:\n print('path %s not expected by the author of the script: ' % dataset, s, file=sys.stderr)\n print('date = %s' % date, file=out)\n print('satellite = Pleiades %s' % satellite, file=out)\n print('nb_img = %d' % n, file=out)\n if ms:\n print('color = panchro_xs', file=out)\n else:\n print('color = panchro', file=out)",
"def enumerate_detector(det, thresholds, shot_ok=None, tiles=None, nimages=np.inf, startimg=0, stopimg=np.inf, correction=False, progress=True):\n global terminated\n Ncorrect = 64\n correctionphotonthres = 3000\n if not isinstance(det, h5py.Group):\n raise TypeError('det should be a h5 group')\n if tiles is None:\n tiles = [k for k in det.keys() if 'tile' in k]\n else:\n newtiles = []\n for t in tiles:\n if t in det:\n newtiles.append(t)\n elif f'tile{t}' in det:\n newtiles.append(f'tile{t}')\n else:\n raise KeyError(f'tile {t} not found')\n tiles = newtiles\n datanames = [(f'{det.name}/{t}/data') for t in tiles]\n filename = det.file.filename\n\n nshots = det[f'{tiles[0]}/data'].shape[0]\n startimg = int(np.clip(startimg, 0, nshots))\n stopimg = int(np.clip(stopimg, startimg, nshots))\n tileshape = det[f'{tiles[0]}/data'].shape[1:]\n correctmask = [correctionmask(det[t]['absfft0/mean'], Ncorrect) for t in tiles]\n if shot_ok is None:\n shot_ok = np.ones(nshots, np.bool)\n ind_filtered = 0\n data = np.zeros((len(tiles), *tileshape))\n willread = np.copy(shot_ok)\n willread[:startimg] = False\n willread[stopimg:] = False\n with datasetreader(datanames, filename, sizecache=10, willread=willread) as reader:\n\n for ind_orig in range(startimg, stopimg):\n if not shot_ok[ind_orig]:\n continue\n if ind_filtered >= nimages or terminated != 0:\n return\n if progress and ind_filtered % 10 == 0:\n print(ind_filtered, end=' ', flush=True)\n\n for it, t in enumerate(tiles):\n cdat = np.array(reader[ind_orig, it], dtype=np.float, order='C')\n if correction:\n correct(cdat, correctionphotonthres, Ncorrect, correctmask[it])\n data[it, ...] = cdat\n ev, number, scatter = getstats(data, thresholds)\n\n yield (ind_filtered, ind_orig, data, ev, number, scatter)\n\n ind_filtered += 1",
"def makeDataPupilFn(settings, dither):\n index = 1\n\n for [bg, photons] in settings.photons:\n\n wdir = \"test_{0:02d}\".format(index)\n print(wdir)\n if not os.path.exists(wdir):\n os.makedirs(wdir)\n\n bg_f = lambda s, x, y, i3 : background.UniformBackground(s, x, y, i3, photons = bg)\n cam_f = lambda s, x, y, i3 : camera.Ideal(s, x, y, i3, settings.camera_offset)\n pp_f = lambda s, x, y, i3 : photophysics.AlwaysOn(s, x, y, i3, photons)\n psf_f = lambda s, x, y, i3 : psf.PupilFunction(s, x, y, i3, settings.pixel_size, settings.zmn)\n \n sim = simulate.Simulate(background_factory = bg_f,\n camera_factory = cam_f,\n photophysics_factory = pp_f,\n psf_factory = psf_f,\n dither = dither,\n x_size = settings.x_size,\n y_size = settings.y_size)\n \n sim.simulate(wdir + \"/test.tif\", \"grid_list.hdf5\", settings.n_frames)\n\n index += 1\n\n makePeakFile(settings)",
"def detect_report(day, json_conf):\n rpt = mk_css()\n rpt += \"<h1>Detection Report for \" + STATION_ID + \" on \" + day + \"</h1>\"; \n data_dir = \"/mnt/ams2/SD/proc2/\" + day + \"/data/\"\n image_dir = \"/mnt/ams2/SD/proc2/\" + day + \"/images/\"\n video_dir = \"/mnt/ams2/SD/proc2/\" + day + \"/hd_save/\"\n meteor_files = glob.glob(data_dir + \"*-meteor.json\")\n non_meteor_files = glob.glob(data_dir + \"*-nometeor.json\")\n detect_files = glob.glob(data_dir + \"*-detect.json\")\n rpt += \"<h2>\" + str(len(meteor_files)) + \" Meteors detected </h2>\" \n for mf in sorted(meteor_files):\n print(mf)\n fn = mf.split(\"/\")[-1]\n img = mf.replace(\"data/\", \"images/\")\n vid = mf.replace(\"data/\", \"\")\n img = img.replace(\"-meteor.json\", \"-stacked-tn.png\")\n vid = vid.replace(\"-meteor.json\", \".mp4\")\n if cfe(img) == 0:\n if cfe(vid) == 1:\n print(\"Image missing stack vid\")\n stack = quick_video_stack(vid)\n thumb = thumb = cv2.resize(stack, (PREVIEW_W, PREVIEW_H)) \n cv2.imwrite(img, thumb)\n print(\"THUMB:\", thumb)\n \n rpt += \"<div class='float_div'>\"\n rpt += \"<img src=\" + img + \">\"\n rpt += \"<br><label style='text-align: center'>\" + fn + \" <br>\" \n rpt += \"</label></div>\"\n\n table = det_table(non_meteor_files, \"nometeor\")\n rpt += \"<div style='clear: both'></div>\"\n rpt += \"<h2>\" + str(len(non_meteor_files)) + \" Auto Rejected Meteor Detections</h2>\"\n rpt += table\n\n if False:\n table = det_table(detect_files, \"detect\")\n rpt += \"<div style='clear: both'></div>\"\n rpt += \"<h2>\" + str(len(detect_files)) + \" Non Meteor Detections</h2>\"\n rpt += table\n\n\n out = open(data_dir + \"report.html\", \"w\")\n out.write(rpt)\n out.close()\n print(data_dir + \"report.html\")",
"def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):\n if detection_dir is None:\n detection_dir = mot_dir\n try:\n os.makedirs(output_dir)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(output_dir):\n pass\n else:\n raise ValueError(\n \"Failed to created output directory '%s'\" % output_dir)\n\n for sequence in os.listdir(mot_dir):\n print(\"Processing %s\" % sequence)\n sequence_dir = os.path.join(mot_dir, sequence)\n\n # image_dir = os.path.join(sequence_dir, \"img1\")\n image_dir = sequence_dir\n image_filenames = {\n int(f[6:10]): os.path.join(image_dir, f) \n for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f))}\n\n detection_file = os.path.join(\n detection_dir, sequence, \"det/det.txt\")\n detections_in = np.loadtxt(detection_file, delimiter=' ')\n detections_out = []\n\n frame_indices = detections_in[:, 0].astype(np.int)\n min_frame_idx = frame_indices.astype(np.int).min()\n max_frame_idx = frame_indices.astype(np.int).max()\n for frame_idx in range(min_frame_idx, max_frame_idx + 1):\n print(\"Frame %05d/%05d\" % (frame_idx, max_frame_idx))\n mask = frame_indices == frame_idx\n rows = detections_in[mask]\n\n if frame_idx not in image_filenames:\n print(\"WARNING could not find image for frame %d\" % frame_idx)\n continue\n bgr_image = cv2.imread(\n image_filenames[frame_idx], cv2.IMREAD_COLOR)\n features = encoder(bgr_image, rows[:, 2:6].copy())\n detections_out += [np.r_[(row, feature)] for row, feature\n in zip(rows, features)]\n\n output_filename = os.path.join(output_dir, \"%s.npy\" % sequence)\n np.save(\n output_filename, np.asarray(detections_out), allow_pickle=False)"
]
| [
"0.6212296",
"0.58695394",
"0.5823236",
"0.57866335",
"0.5777293",
"0.5715863",
"0.5676956",
"0.5636035",
"0.5631047",
"0.55860215",
"0.5562953",
"0.55607384",
"0.554145",
"0.55249727",
"0.54934704",
"0.549295",
"0.54726213",
"0.5466943",
"0.54605633",
"0.5457797",
"0.5455398",
"0.543526",
"0.54323906",
"0.5428814",
"0.54150593",
"0.54068226",
"0.5401754",
"0.54013747",
"0.5394853",
"0.53905106"
]
| 0.6958013 | 0 |
With given energy list, scan the pzt multiple times and record the signal from various detectors, file will be saved to local folder. | def pzt_energy_scan(
moving_pzt,
start,
stop,
steps,
eng_list,
detectors=[dcm.th2, Vout2],
repeat_num=1,
sleep_time=1,
fn="/home/xf18id/Documents/FXI_commision/DCM_scan/",
):
det = [det.name for det in detectors]
det_name = ""
for i in range(len(det)):
det_name += det[i]
det_name += ", "
det_name = "[" + det_name[:-2] + "]"
txt = f"pzt_energy_scan(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, eng_list, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\neng+list={eng_list}\n Consisting of:\n"
insert_text(txt)
eng_ini = XEng.position
yield from abs_set(shutter_open, 1)
yield from bps.sleep(1)
yield from abs_set(shutter_open, 1)
print("shutter open")
for eng in eng_list:
yield from abs_set(XEng, eng, wait=True)
current_eng = XEng.position
yield from bps.sleep(1)
print("current X-ray Energy: {:2.1f}keV".format(current_eng))
yield from pzt_scan_multiple(
pzt_dcm_th2,
start,
stop,
steps,
detectors,
repeat_num=repeat_num,
sleep_time=sleep_time,
fn=fn,
)
yield from abs_set(XEng, eng_ini, wait=True)
yield from abs_set(shutter_close, 1)
yield from bps.sleep(1)
yield from abs_set(shutter_close, 1)
txt_finish = '## "pzt_energy_scan()" finished'
insert_text(txt_finish) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readenergy(self, filelist):\r\n \r\n energy=[]\r\n tmpenergy=[]\r\n for filename in filelist:\r\n if not(os.path.exists(filename)):\r\n if self._resultfile: self._resultfile.write('Output file: \"'+filename+'\" does not exist. Restart your calculation. \\n')\r\n else: print 'Output file: \"'+filename+'\" does not exist. Restart your calculation. \\n'\r\n sys.exit()\r\n else:\r\n tmpdat=[]\r\n for key in self._keydict:\r\n infile=open(filename)\r\n lenstart=len(key['start'])\r\n lenblock=len(key['startblock'])\r\n if lenblock:\r\n tmp=''\r\n readlist=[]\r\n readout=0\r\n startblock=0\r\n startcol=[]\r\n lcount=0\r\n count=0\r\n for tmpc in infile:\r\n if tmp>'':\r\n # We are in the area that we should read.\r\n if tmpc.count(key['stop']):\r\n tmp=''\r\n else:\r\n # We are currently in a data block.\r\n if readout:\r\n if (ischar and tmpc.count(key['stopblock'])) or (not(ischar) and len(tmpc)<=1):\r\n readout=readout-1\r\n else:\r\n # Read the columns.\r\n if lcount==0:\r\n for i in range(0, len(startcol)):\r\n tmpenergy.append([float(key['factor'])*float(tmpc[startcol[i][0]:startcol[i][0]+startcol[i][1]]),''])\r\n ilast=startlen\r\n count=0\r\n # Get the labeling information form the block.\r\n if key['type']=='label' or key['type']=='': \r\n # For type=label we read the symmetry of the orbitals. And put it at the end of the list containing the energy values.\r\n for i in range(ilast, len(tmpc)-1):\r\n if tmpc[i:i+1]==' ':\r\n if i-ilast>1:\r\n if int(tmpc[ilast:i]): \r\n if len(tmpenergy[ecount][1]): tmpenergy[ecount][1]=tmpenergy[ecount][1]+'<->'+readlist[lcount+count]\r\n else: tmpenergy[ecount][1]=readlist[lcount+count]\r\n count=count+1\r\n ilast=i\r\n ecount=ecount+1\r\n else:\r\n if self._resultfile: self._resultfile.write('ERROR: (parsedirac.dat). Type of data block is not implemented. '+key['type']+' key= '+key['key']+' \\n')\r\n else: print 'ERROR: (parsedirac.dat). Type of data block is not implemented. type=\"'+key['type']+'\" key= '+key['key']+' \\n'\r\n sys.exit()\r\n elif tmpc.count(key['startblock']):\r\n try:\r\n readout=float(key['stopblock'])\r\n ischar=0\r\n except:\r\n readout=1\r\n ischar=1\r\n # If readout=0 we only need to read to the end of this line.\r\n if readout==0:\r\n ilast=tmpc.index(key['startblock'])+len(key['startblock'])\r\n for i in range(ilast, len(tmpc)):\r\n if tmpc[i:i+1] in (' ', '\\n'):\r\n if (i-ilast)>1:\r\n try:\r\n tmpdat.append(key['factor']*float(tmpc[ilast+1:i]))\r\n except:\r\n if self._resultfile: self.resultfile.write('ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n')\r\n else: print 'ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n'\r\n ilast=i\r\n else:\r\n # Read the positions of the columns containing energy information.\r\n for col in key['columns']:\r\n if tmpc.count(col):\r\n startcol.append((tmpc.index(col),len(col))) \r\n startlen=tmpc.index(key['startblock'])+len(key['startblock'])\r\n ilast=startlen\r\n lcount=lcount+count\r\n count=0\r\n ecount=0\r\n # Read the labels after the block start commando.\r\n for i in range(ilast, len(tmpc)-1):\r\n if tmpc[i:i+1]==' ':\r\n if i-ilast>1:\r\n readlist.append(tmpc[ilast+1:i])\r\n count=count+1\r\n ilast=i\r\n elif tmpc.count(key['start']):\r\n tmp=key['start']\r\n tmpenergy=[]\r\n tmpdat.append(tmpenergy)\r\n else:\r\n # If we read only one line, find it and read all numbers on it.\r\n for tmpc in infile: \r\n tmpn=tmpc.count(key['start'])\r\n if tmpn:\r\n ilast=tmpc.index(key['start'])+len(key['start'])\r\n for i in range(ilast, len(tmpc)):\r\n if tmpc[i:i+1] in (' ', '\\n'):\r\n if (i-ilast)>1:\r\n try:\r\n tmpdat.append(key['factor']*float(tmpc[ilast+1:i]))\r\n except:\r\n if self._resultfile: self.resultfile.write('ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n')\r\n else: print 'ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n'\r\n ilast=i\r\n infile.close()\r\n energy.append(tmpdat)\r\n return energy",
"def save_band_for_path(self, path, filename):\n with open(filename, 'ab') as file_hander:\n for point in path:\n energies = self.problem.energy_eigenvalues(point[0], point[1])\n np.savetxt(file_hander, energies)",
"def addfiles(self, filelist):\r\n for tmpc in filelist:\r\n self._filelist.append(tmpc)\r\n tmp_energy=self.readenergy(filelist)\r\n for tmpdat in tmp_energy:\r\n self._energy.append(tmpdat)\r\n return tmp_energy",
"def setfiles(self, filelist):\r\n self._filelist=filelist\r\n self._energy=self.readenergy(filelist)",
"def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()",
"def use_dimer_files(numb_files, filenameEs, fname_save_interpE, fname_savewfns):\n for i in range(1, numb_files + 1):\n # parse_ob = ParseGaussian() # need to add this\n\n engy_file = np.load(file=filenameEs + str(i) + \".npy\")\n interp_ob = Interpolate1D(grid_arr, engy_file, 2000) # grid_arr is hard coded\n new_xOH, new_yE = interp_ob.get_interp()\n # np.save(file=fname_save_interpE + str(i), arr=new_yE)\n # np.save(file=\"xOH5\", arr=new_xOH) # saved in bohr # EDIT EACH TIME\n\n dvr_ob = DVR(new_xOH, new_yE, 1728.3085005881399)\n wfn_data, eval_data = dvr_ob.run_dvr()\n np.save(file=fname_savewfns + str(i), arr=wfn_data)\n\n\n return None",
"def pzt_scan_multiple(\n moving_pzt,\n start,\n stop,\n steps,\n detectors=[Vout2],\n repeat_num=2,\n sleep_time=1,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt = f\"pzt_scan_multiple(moving_pzt={moving_pzt.name}, start={start}, stop={stop}, steps={steps}, detectors={det_name}, repeat_num={repeat_num}, sleep_time={sleep_time}, fn={fn})\\n Consisting of:\\n\"\n insert_text(txt)\n\n current_eng = XEng.position\n df = pd.DataFrame(data=[])\n\n for num in range(repeat_num):\n yield from pzt_scan(\n moving_pzt, start, stop, steps, detectors=detectors, sleep_time=sleep_time\n )\n yield from abs_set(XEng, current_eng, wait=True)\n print(\"\\nscan finished, ploting and saving data...\")\n fig = plt.figure()\n for num in reversed(range(repeat_num)):\n h = db[-1 - num]\n scan_id = h.start[\"scan_id\"]\n detector_set_read = h.start[\"detector_set_read\"]\n col_x_prefix = detector_set_read[1]\n col_x = col_x_prefix + \" #\" + \"{}\".format(scan_id)\n\n motor_readout = np.array(list(h.data(col_x_prefix)))\n df[col_x] = pd.Series(motor_readout)\n\n detector_signal = h.start[\"detectors\"]\n\n for i in range(len(detector_signal)):\n det = detector_signal[i]\n\n if (det == \"Andor\") or (det == \"detA1\"):\n det = det + \"_stats1_total\"\n det_readout = np.array(list(h.data(det)))\n col_y_prefix = det\n col_y = col_y_prefix + \" #\" + \"{}\".format(scan_id)\n df[col_y] = pd.Series(det_readout)\n plt.subplot(len(detector_signal), 1, i + 1)\n plt.plot(df[col_x], df[col_y])\n plt.ylabel(det)\n\n plt.subplot(len(detector_signal), 1, len(detector_signal))\n plt.xlabel(col_x_prefix)\n plt.subplot(len(detector_signal), 1, 1)\n plt.title(\"X-ray Energy: {:2.1f}keV\".format(current_eng))\n\n now = datetime.now()\n year = np.str(now.year)\n mon = \"{:02d}\".format(now.month)\n day = \"{:02d}\".format(now.day)\n hour = \"{:02d}\".format(now.hour)\n minu = \"{:02d}\".format(now.minute)\n current_date = year + \"-\" + mon + \"-\" + day\n fn = (\n save_file_dir\n + \"pzt_scan_\"\n + \"{:2.1f}keV_\".format(current_eng)\n + current_date\n + \"_\"\n + hour\n + \"-\"\n + minu\n )\n fn_fig = fn + \".tiff\"\n fn_file = fn + \".csv\"\n df.to_csv(fn_file, sep=\"\\t\")\n fig.savefig(fn_fig)\n print(\"save to: \" + fn_file)\n txt_finish = '## \"pzt_scan_multiple()\" finished'\n insert_text(txt_finish)",
"def import_raw(self,read_teff=False,**kwargs):\n \n #build dirs\n self._build_em_dirs()\n\n for tn in self.Tn:\n #initialize counter and flag\n counter=0\n #build wait-time specific path\n tn_path = self.root_path%tn\n #create em results dir if needed\n em_res_tn_dir=os.path.join(self.em_res_top_dir,'tn%d'%tn)\n if not os.path.exists(em_res_tn_dir):\n os.makedirs(em_res_tn_dir)\n for pfile in os.listdir(tn_path):\n if 'heat_amp' not in pfile and 'dem' not in pfile:\n self.logger.debug(\"Reading %s\"%pfile)\n data = np.loadtxt(os.path.join(tn_path,pfile))\n n_index = 2\n t_index = 1\n if 'electron' in tn_path or 'ion' in tn_path: n_index += 1\n if read_teff:\n n_index = 3\n t_index = 2\n t,T,n = data[:,0],data[:,t_index],data[:,n_index]\n #calculate emission measure distribution\n self.binner.set_data(t,T,n)\n self.binner.build_em_dist()\n #save data\n with open(os.path.join(em_res_tn_dir,os.path.splitext(pfile)[0]+'.pickle'),'wb') as f:\n pickle.dump({'T':self.binner.T_em_flat, 'em':self.binner.em_flat/self.aspect_ratio_factor, 'bins':self.binner.T_em_histo_bins},f)\n #increment counter\n counter += 1\n else:\n continue\n\n #Estimate percentage of files read\n self.logger.info(\"Tn = %d s finished, Estimated total # of events simulated: %.2f %%\"%(tn, counter*int(np.ceil(t[-1]/(self.tpulse+tn)))))",
"def collect_amber_energies(top,crd):\n input_files = glob.glob(crd)\n dict_energy = {}\n\n for f in input_files:\n phi =int( f.split(\"/\")[-2]) # to be more consistent, we know that in -2 there's phi\n psi =int( f.split(\"/\")[-1].split(\".crd\")[0].split(\"structure_\")[1])\n #first fix phi and psi values:\n if phi > 180.0:\n phi = phi - 360.0\n if psi > 180.0 :\n psi = psi - 360.0\n #Amber energies\n #Here we have to create a dummy.dat file as a quantum input for Paramfit\n cmd =\"\"\" echo \"0.000\" > dummy.dat \"\"\"\n os.system(cmd)\n #Then we create a input file for Paramfit, where we say:\n #job.in is the name of the file\n #Do not use any algorithm\n #The number of structures we want the energy to be evaluated is 1\n #the coordinate are given in restart format\n cmd =\"\"\" cat> job.in << EOF\nALGORITHM=NONE\nNSTRUCTURES=1\nCOORDINATE_FORMAT=RESTART\nEOF\"\"\"\n os.system(cmd)\n #store the energy in this file tmp.dat\n amber_energy = open(\"tmp.dat\",\"w\")\n #evaluation of the potential energy with paramfit:\n cmd = \"\"\"paramfit -i job.in -p %s -c %s -q dummy.dat | grep \"Calculated energy with initial parameters\" | awk '{print $10'} > tmp.dat\"\"\" %(top,f)\n os.system(cmd)\n os.system(\"wait\")\n amber_energy.close()\n #now read the tmp.dat file and save the energy in the dictionary\n read_energy = open(\"tmp.dat\",\"r\").readlines()\n for val in read_energy:\n dict_energy[phi,psi]=float(val)\n print(\"Apparently amber energies were correctly extracted\")\n print(\"Cleaning directory\")\n cmd =\"rm tmp.dat job.in dummy.dat\"\n os.system(cmd)\n return dict_energy",
"def pulsEphem(self):\n\n hduMain = fits.open(self.ft1)\n\n # --------------------------------------------------------------------------------------------- #\n # Split the FT1 file every 4000 events\n noEv = 0\n deltEv = 5000\n count = 0\n wfil = open(os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'), 'w')\n while noEv <= self.nevents:\n hduCols = []\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:noEv+deltEv], format=form, unit=uni) )\n # Updte the tstart and tstop in the header in order for tempo2 to work...\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header) \n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n noEv += deltEv\n count += 1\n if noEv != self.nevents:\n hduCols = []\n noEv -= deltEv\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:self.nevents], format=form, unit=uni) )\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header)\n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n wfil.close()\n\n hduMain.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run tempo2 for each piece of the FT1\n rfil = open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r')\n percent = 0\n nbFiles = sum(1 for line in open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r'))\n count = 0\n for tmpFil in rfil:\n # Print a progression bar every 5%\n if ( count / np.floor(nbFiles) * 100 ) >= percent:\n self._progressBar(percent, printEvery=5)\n percent += 5\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(['/dsm/fermi/fermifast/glast/tempo2-2013.9.1/tempo2',\n '-gr', 'fermi', '-ft1', tmpFil[:-1], '-ft2', self.ft2, '-f', self.ephem,\n '-phase'], stdout=devnull, stderr=subprocess.STDOUT)\n count += 1\n # Replace the old ft1 by the new one with the PULSE_PHASE column\n #os.remove()\n self._gtSelect(data = os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'))\n\n\n\n\n #self.nevents\n #J2032+4127_54683_57791_chol_pos.par\n #os.popen(\"tempo2 -gr fermi -ft1 {} -ft2 {} -f {} -phase\".format(self.ft1, self.ft2, self.ephem))",
"def collect_quantum_energies(quantum_outputs):\n #here we will cycle throught the outputs in order to detect SCF enery\n input_files = glob.glob(quantum_outputs)\n dict_energy = {}\n #now cycle through all the output gaussian files\n for f in input_files:\n #to be sure we take the last indexes\n phi =int( f.split(\"/\")[-2]) # to be more consistent, we know that in -2 there's phi\n psi =int( f.split(\"/\")[-1].split(\".out\")[0].split(\"structure_\")[1])\n #first fix phi and psi values:\n #plot from -180 to 180 so we can compare with Ramachandran\n if phi > 180.0:\n phi = phi - 360.0\n if psi > 180.0 :\n psi = psi - 360.0\n #open the output file\n gout = open(f,\"r\").readlines()\n #Extract energies\n scf = []\n for line in gout:\n if \"SCF Done\" in line:\n scf.append(line.split()[4])\n dict_energy[phi,psi] = float(scf[-1])*627.50\n print(\"Apparently quantum energies were correctly extracted\")\n\n return dict_energy",
"def set_gsenergy(self, gsenergy=None):\n self.status()\n if not gsenergy:\n if self.__cod == 'vasp': \n #getData = VASP()\n getData = vasp.Energy()\n outfile = 'vasprun.xml'\n elif self.__cod == 'espresso':\n getData = espresso.Energy()\n outfile = 'espresso.out'\n elif self.__cod == 'wien':\n getData = wien.Energy()\n \n elif self.__cod == 'exciting':\n getData = exciting.Energy()\n outfile = 'INFO.OUT'\n elif self.__cod == 'emto':\n getData = emto.Energy(funct=self.__funct)\n outfile = '%s/prn/%s'%(self.__pname,self.__emtoout)\n gsenergy=[] \n for atoms in sorted(self.__structures.items()):\n \n if self.__cod == 'wien': \n outfile = atoms[1].path.split('/')[-1] + '.scf'\n \n if not atoms[1].status:\n #print atoms[1].status\n atoms[1].gsenergy = 0\n continue\n if atoms[1].exclude:\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n if os.path.exists(atoms[1].path+'/exclude'):\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n \n \n #getData.set_outfile('%s/%s/'%atoms[0] + outfile)\n #getData.set_gsEnergy()\n #print atoms[1].path, self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile)\n if 'eta' in atoms[1].path.split('/')[-1] and self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s/%s'%(atoms[1].path.split('/')[-3],atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n elif 'eta' in atoms[1].path.split('/')[-1] and not self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n else: getData.set_fname(self.__workdir + '%s'%(atoms[1].path.split('/')[-1])+'/' + outfile)\n print getData.get_fname()\n getData.set_gsenergy()\n if self.__thermodyn and self.__mod!='structures_phonons':\n outfile_ph = 'F_TV'\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n #getData.T = self.__T\n \n getData.set_phenergy(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n atoms[1].phenergy = getData.get_phenergy()\n atoms[1].T = getData.T\n #atoms[1].gsenergy = getData.get_gsEnergy()\n atoms[1].gsenergy = getData.get_gsenergy()/125.\n else:\n atoms[1].gsenergy = getData.get_gsenergy()\n gsenergy.append(atoms[1].gsenergy)\n \n if self.delPoints:\n for atoms in sorted(self.__structures.items()):\n \n #print [atoms[1].eta for atoms in sorted(self.__structures.items())], gsenergy\n coeff = np.polyfit([atoms[1].eta for atoms in self.__structures.items()], gsenergy, 2)\n p = np.poly1d(coeff)\n k=0\n for (etas,energy) in zip(self.__structures.items(),gsenergy):\n #print (energy-p(etas[1].eta))**2.\n if (energy-p(etas[1].eta))**2. > 0.0004: \n gsenergy[k]=0.\n atoms[1].gsenergy = 0. \n k+=1\n self.__gsenergy = gsenergy",
"def dailyanalysis(experiment):\n import os\n for fn in os.listdir('/network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment):\n if fn.find('exe.fms') == -1 and fn.find('mppnccombine.ifc') == -1:\n storedaily('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + fn + '/combine/',\n '/network/aopp/hera/mad/bakerh/data/FMS/output/' +\n experiment + '/' + fn + '/history/')\n print('Completed ' + fn)",
"def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)",
"def get_energies(self):\n # Total energy is the sum of kinetic plus potential energy.\n energy = self.potential_energies() + self.kinetic_energies()\n # self.energies will be used for plotting.\n self.energies[0].append(self.time)\n # e_str = str(energy)\n # # Getting 10 significant figures as energies are not exact due to the\n # # numerical approach.\n # self.energies[1].append(float(e_str[:10] + e_str[e_str.find('e'):]))\n self.energies[1].append(energy)\n\n # If it's the first iteration overwrite the file, else add to it.\n if self.time == 0:\n mode = \"w\"\n else:\n mode = \"a\"\n # Write the energy to the file.\n text = (\"Time: \" + str(self.time) + \"s. Energy: \"\n + str(energy) + \"J.\\n\")\n with open(\"energies.txt\", mode) as energies_file:\n energies_file.write(text)",
"def beforeLoop(self):\n\t\tself.loadInputFiles()\n\t\tself.loadOutputFile()\n\t\tself.addHistogram1D(\"True_Enu\", \"True Neutrino Energy [GeV]\", 100, 0.0, 10.0)#Histogram of neutrino energy\n\t\tself.addHistogram1D(\"True_Enu_Delta\", \"True Neutrino Energy from Delta producing events [GeV]\", 100, 0.0, 10.0)#Histogram of neutrino energy from Delta producing events\n\t\tself.addHistogram1D(\"Interaction_Mode_Delta\", \"NEUT interaction codes of Delta producing events\", 53, 0, 53)#Histogram of interaction modes of Delta producing events\n\t\tself.addHistogram2D(\"Vertex_Location_XY\", \"Location of interaction vertices in the X-Y plane of the detector\",100,-3000,3000,100,-3000,3000)#Histogram of vertex location in XY plane\n\t\tself.addHistogram2D(\"Vertex_Location_YZ\", \"Location of interaction vertices in the Y-Z plane of the detector\",100,-3000,3000,100,-3000,3000)#Histogram of vertex location in YZ plane\n\t\tself.addHistogram1D(\"True_Enu_Delta_inFGD\", \"Neutrino energies of FGD Delta producing events (GeV)\",100,0,10)#Histogram of neutrino energy of Deltas produced in the FGD\n\t\tself.addHistogram1D(\"Delta_Momentum\", \"Momentum of Delta baryons (GeV/c)\",100,0,5)#Histogram of neutrino energy of Deltas produced in the FGD\n\t\tself.addHistogram1D(\"Proton_Momentum\", \"Momentum of Protons from Delta decays (GeV/c)\",100,0,4)#Histogram of proton momentum from Delta decays\n\t\tself.addHistogram1D(\"Pion_Momentum\", \"Momentum of Pions from Delta decays (GeV/c)\",100,0,4)#Histogram of pion momentum from Delta decays",
"def Format_and_Merge(temperaturefile, energyfile, outfile = None, writeout = \"a\"):\n\n if not outfile:\n fout = sys.stdout #sets default output as stdout\n else:\n fout = open(outfile, writeout)\n\n with open(temperaturefile,\"r\") as fh: #open water temperature file as a file object \n temperature = list(filter(None, (fh.read().splitlines()[3:])))\n #read each line into a list and strip the newline character, omitting the header and removing empty strings\n\n temptimes = []\n for record in temperature:\n assert type(record)==str, \"temperature variable must be list of strings\" #checks for proper data type\n temptimes.append(re.sub(r'(\\d+)/(\\d+)/(\\d+)', r'20\\3-\\1-\\2', record)) \n #modify dates to match date format in energy file and append to a new list\n\n with open(energyfile,\"r\") as fh: #open energy file as a file object \n energy = fh.read().splitlines()[1:-2] # read each line and strip the newline character, omitting the header and footer\n assert type(energy[0])==str, \"energy variable must be list of strings\" #checks for proper data type\n energytimes = list(filter(None, ([times.split(',')[0] for times in energy]))) \n #split strings in $energy list at \",\" and keep time values; remove empty strings; shortened with list comprehension\n energyWh = list(filter(None, ([Wh.split(',')[-1] for Wh in energy]))) \n #split strings in $energy at \",\" and keep Wh values; remove empty strings\n\n t=0 #set value that will be used to index lists\n fout.write(\"RecordNum,Date Time,Temp(°F),Energy(Wh)\\n\") #write header in $outfile\n for time in temptimes: \n energydate = (re.search(r'\\d+-\\d+-\\d+', energytimes[t]).group(0)) #define dates in $energytimes list as $energydate\n tempdate = (re.search(r'\\d+-\\d+-\\d+', time).group(0)) #define dates in temptimes list as $tempdate\n assert tempdate >= energydate, \"data are not sorted or do not overlap\" \n #checks that dates in $energytimes and $temptimes overlap\n if tempdate == energydate: \n fout.write(\"\\n\") \n #ensures that different $temptimes strings will appear on separate lines when $tempdate == $energydate is true consecutively\n fout.write(time) #write current $temptime string\n fout.write(\",\") # write comma to ensure that $energyWh appears in new column when $tempdate > $energydate becomes Ture\n elif tempdate > energydate: #$tempdate changes when this becomes True\n fout.write(energyWh[t+1]) #write $energyWh string at index[t+1] to the existing line\n fout.write(\"/1000\") #divides $energyWh string by 1000; division is not actually resolved since $energyWh is a string\n fout.write(\"\\n\") #writes a newline\n fout.write(time) #writes the first $temptimes string at the new $tempdate\n t = t+1 #changes $energydate so that $tempdate == $energydate is true; changes the index of $energyWh\n return",
"def file_setup(self):\n #output a .edf file if the input is txt\n if self.args.input_format == 'txt':\n signals = []\n headers = []\n \n #read sample frequency from a .xml file\n if self.args.is_test:\n self.sample_rate = 1024\n else:\n xml_file = open(self.args.input_path + self.args.input_name + '.xml', \"r\")\n xml_content = xml_file.read()\n my_ordered_dict = xmltodict.parse(xml_content)\n dict = json.loads(json.dumps(my_ordered_dict))\n self.sample_rate = eval(dict['RECORD_INFO']['Record']['SamplesFreq'])\n \n #define header, needed for .edf file\n# header = {'label':'ch_name', \n# 'dimension': 'uV',\n# 'sample_rate': self.sample_rate,\n# 'physical_max': 5000,\n# \"physical_min\": -5000,\n# 'digital_max': 5000,\n# 'digital_min': -5000,\n# 'transducer': 'None',\n# 'prefilter': 'None'}\n\n# j = 0\n for i in self.files:\n if i[-3:] != 'xml' and i[-4:] != 'xysw':\n raw = np.loadtxt(self.args.input_path + i)\n self.physical_max.append(np.max(raw))\n self.physical_min.append(np.min(raw))\n \n \n signals.append(raw)\n# new_header = header.copy()\n# new_header['label'] = 'ch' + str(j)\n# new_header['physical_max'] = np.max(raw)\n# new_header['physical_min'] = np.min(raw)\n\n# j = j+1\n# headers.append(new_header)\n self.ch_num = self.ch_num+1\n \n #write edf\n with open(self.output_edf_original, 'w') as output:\n flag = pyedflib.highlevel.write_edf_quick(output.name, signals, self.sample_rate, digital=False)\n if flag == False:\n print('unable to save file into .edf')\n exit()\n else:\n print('txt data loaded into edf, edf saved at ./output_edf as: ' + self.output_edf_original)\n self.raw=mne.io.read_raw_edf(self.output_edf_original,preload=True)\n self.ch_names = self.raw.ch_names\n \n #if already a .edf\n elif self.args.input_format == 'bdf':\n self.raw = mne.io.read_raw_bdf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format == 'edf':\n self.raw = mne.io.read_raw_edf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format =='mne':\n mne_exp = mne.datasets.eegbci.load_data(1, 2, path=None, force_update=False, update_path=None, base_url='https://physionet.org/files/eegmmidb/1.0.0/', verbose=None)[0]\n self.raw = mne.io.read_raw_edf(mne_exp, preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n \n \n return self.raw",
"def main():\n args = get_args()\n\n entries = []\n\n noe_dim = \"h1\" if args.hch else \"c1\" # save the name of the noe dimension\n\n with open(args.sparkylist) as lines:\n\n lines = lines.readlines()\n # lines = set(lines) # remove duplicate lines\n peak = 1\n for idx, line in enumerate(lines):\n idx = idx + 1\n\n try:\n label, c1, c2, h2, intensity, *rest = line.split()\n\n c1 = float(c1) # convert these to floats\n c2 = float(c2)\n h2 = float(h2)\n intensity = float(intensity)\n\n label = f\"peak{peak}\"\n peak += 1\n\n except ValueError:\n print(f\"invalid NOE definition on line {idx}\")\n continue\n\n dic = {\"label\": label, noe_dim: c1,\n \"c2\": c2, \"h2\": h2, \"intensity\": intensity}\n\n entries.append(dic)\n\n # create dataframe and write out\n csv = pd.DataFrame(entries)\n order = [\"label\", noe_dim, \"c2\", \"h2\", \"intensity\"]\n csv.to_csv(args.output, columns=order, index=False)",
"def find_echo(file_name): #define a function\r\n\r\n data_list = [] #variable for storing data from a single file\r\n \r\n with open(file_name, 'r') as file: #read the file \r\n text = file.read().replace('\\n','')\r\n items = text.split(',')\r\n items_array = np.array(items) #put the data into a numpy array\r\n items_clean = np.delete(items_array,-1)\r\n numbers = items_clean.astype('int32') #convert the data from string to integer\r\n \r\n data_list.append(numbers) #apppend all amplitdue values to a list\r\n \r\n amp_array = np.vstack(data_list) #transform a list into numpy array\r\n \r\n peaks = scipy.signal.find_peaks((amp_array[0]), height = 200) #find all peaks in the amplitude data\r\n\r\n peaks_high_tof = peaks[0]>3000 #find the peaks above 7.5 microseconds (that is where the first echo peak appears for this battery)\r\n \r\n amp_value = peaks[1]['peak_heights'][peaks_high_tof].max() #find the 1st echo peak amplitude value\r\n\r\n tof_index = np.where(peaks[1]['peak_heights'] == amp_value)[0][-1] #find the index of ToF of 1st echo peak\r\n tof_value = tof[peaks[0][tof_index]] #find the corresponding ToF value\r\n \r\n return tof_value, amp_value #this spits out two values for every file\r",
"def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")",
"def exp_scan(self, exposure_time_list):\n self.generic_scan(self.exp, exposure_time_list)",
"def main(file_list):\n data_store = {}\n \n for file in file_list:\n sample_id = get_sample_id(file)\n data_store[sample_id] = {}\n data_store[sample_id][\"sample_type\"], data_store[sample_id][\"out_filename\"], data_store[sample_id][\"out_location\"] = check_name(file, sample_id)\n data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"df_values\"], data_store[sample_id][\"df_parameters_for_values\"] = data_in(file, sample_id)\n if data_store[sample_id][\"sample_type\"] == \"EFC\":\n binder_mass = efc_calcs(data_store[sample_id][\"df_parameters\"])\n elif data_store[sample_id][\"sample_type\"] == \"OPC\":\n binder_mass = opc_calcs(data_store[sample_id][\"df_parameters\"])\n data_store[sample_id][\"df_values\"] = tidy_val_df(data_store[sample_id][\"df_values\"], binder_mass)\n data_store[sample_id][\"df_parameters\"] = tidy_param_df(sample_id, data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"out_filename\"])\n for key, value in data_store.items():\n write_to_excel(key, value[\"df_parameters\"], value[\"df_values\"], value[\"df_parameters_for_values\"], value[\"out_location\"])",
"def simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A):\n\tfilenames = glob.glob(grbdir + \"/MM_out/*\")\n\tbadpixfile = glob.glob(grbdir + \"/*badpix.fits\")[0]\n\tfilenames.sort()\n\tpix_cnts = np.zeros((16384,len(filenames)))\n\terr_pix_cnts = np.zeros((16384,len(filenames)))\n\ten = np.arange(5, 261., .5)\n\tsel = (en>=100) & (en <= 150)\n\ten_range = np.zeros(len(filenames))\n\tfor f in range(len(filenames)):\n\t\ten_range[f] = filenames[f][20:26]\n\terr_100_500 = (100.0 <= en_range.astype(np.float)) & (en_range.astype(np.float) <= 500.0)\n\terr_500_1000 = (500.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 1000.0)\n\terr_1000_2000 = (1000.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 2000.0)\n\texist_1000_2000 = np.where(err_1000_2000 == True)\n\tE = np.array([])\n\t\n\tprint \"Indices where energy is in between 1000 and 2000 :\",exist_1000_2000[0]\n\t\n\tfor i,f in enumerate(filenames):\n\t\t\tdata = fits.getdata(f + \"/SingleEventFile.fits\")\n\t\t\tE = np.append(E, float(f[20:26]))\n\t\t\terror = np.sqrt(data) \n\t\t\tdata[:,~sel] = 0.\n\t\t\terror[:,~sel] = 0.\n\t\t\tpix_cnts[:,i] = data.sum(1)*model(E[i], alpha, beta, E0, A,typ)/55.5\n\t\t\terr_pix_cnts[:,i] = np.sqrt(((error*model(E[i], alpha, beta, E0, A,typ)/55.5)**2).sum(1))\t\t\n\t\t\t\n\tpix_cnts_total = np.zeros((16384,))\n\terr_100_500_total = np.sqrt((err_pix_cnts[:,err_100_500]**2).sum(1))*(E[err_100_500][1]-E[err_100_500][0])\n\terr_500_1000_total = np.sqrt((err_pix_cnts[:,err_500_1000]**2).sum(1))*(E[err_500_1000][1]-E[err_500_1000][0])\n\n\tif (len(exist_1000_2000[0]) != 0):\n\t\terr_1000_2000_total = np.sqrt((err_pix_cnts[:,err_1000_2000]**2).sum(1))*(E[err_1000_2000][1]-E[err_1000_2000][0])\n\telse :\n\t\terr_1000_2000_total = 0\n\t\n\terr_pix_cnts_total = np.sqrt(err_100_500_total**2 + err_500_1000_total**2 + err_1000_2000_total**2) # dE is 5 from 100-500, 10 from 500-1000, 20 from 1000-2000\n\n\tfor i in range(16384):\n\t\t\tpix_cnts_total[i] = simps(pix_cnts[i,:], E)\t\t\t\n\n\tquad0pix = pix_cnts_total[:4096]\n\tquad1pix = pix_cnts_total[4096:2*4096]\n\tquad2pix = pix_cnts_total[2*4096:3*4096]\n\tquad3pix = pix_cnts_total[3*4096:]\n\t\t\n\terr_quad0pix = err_pix_cnts_total[:4096]\n\terr_quad1pix = err_pix_cnts_total[4096:2*4096]\n\terr_quad2pix = err_pix_cnts_total[2*4096:3*4096]\n\terr_quad3pix = err_pix_cnts_total[3*4096:]\n\t\n\tquad0 = np.reshape(quad0pix, (64,64), 'F')\n\tquad1 = np.reshape(quad1pix, (64,64), 'F')\n\tquad2 = np.reshape(quad2pix, (64,64), 'F')\n\tquad3 = np.reshape(quad3pix, (64,64), 'F')\n\t\t\n\terr_quad0 = np.reshape(err_quad0pix, (64,64), 'F')\n\terr_quad1 = np.reshape(err_quad1pix, (64,64), 'F')\n\terr_quad2 = np.reshape(err_quad2pix, (64,64), 'F')\n\terr_quad3 = np.reshape(err_quad3pix, (64,64), 'F')\n\t\n\tsim_DPH = np.zeros((128,128), float)\n\tsim_err_DPH = np.zeros((128,128), float)\n\t\n\tsim_DPH[:64,:64] = np.flip(quad0, 0)\n\tsim_DPH[:64,64:] = np.flip(quad1, 0)\n\tsim_DPH[64:,64:] = np.flip(quad2, 0)\n\tsim_DPH[64:,:64] = np.flip(quad3, 0)\n\t\n\tsim_err_DPH[:64,:64] = np.flip(err_quad0, 0)\n\tsim_err_DPH[:64,64:] = np.flip(err_quad1, 0)\n\tsim_err_DPH[64:,64:] = np.flip(err_quad2, 0)\n\tsim_err_DPH[64:,:64] = np.flip(err_quad3, 0)\n\n\tbadpix = fits.open(badpixfile)\n\tdphmask = np.ones((128,128))\n\t\n\tbadq0 = badpix[1].data # Quadrant 0\n\tbadpixmask = (badq0['PIX_FLAG']!=0)\n\tdphmask[(63 - badq0['PixY'][badpixmask]) ,badq0['PixX'][badpixmask]] = 0\n\n\tbadq1 = badpix[2].data # Quadrant 1\n\tbadpixmask = (badq1['PIX_FLAG']!=0)\n\tdphmask[(63 - badq1['PixY'][badpixmask]), (badq1['PixX'][badpixmask]+64)] = 0\n\n\tbadq2 = badpix[3].data # Quadrant 2\n\tbadpixmask = (badq2['PIX_FLAG']!=0)\n\tdphmask[(127 - badq2['PixY'][badpixmask]), (badq2['PixX'][badpixmask]+64)] = 0\n\n\tbadq3 = badpix[4].data # Quadrant 3\n\tbadpixmask = (badq3['PIX_FLAG']!=0)\n\tdphmask[(127 - badq3['PixY'][badpixmask]), badq3['PixX'][badpixmask]] = 0\n\t\t\t\n\toneD_sim = (sim_DPH*dphmask).flatten()\n\n\treturn oneD_sim*t_src,sim_DPH*t_src,dphmask,sim_err_DPH*t_src",
"def __init__(self,files_atte,files_emis,states,lifetime):\n self.files_atte = files_atte #!\n self.files_emis = files_emis #!\n self.beam_emis = [] #!\n self.beam_atte = [] #!\n print('The wavelength assumes an hydrogen atom')\n self.n_low = states[0] #!\n self.n_high = states[1] #!\n self.E0 = -13.6\n self.lifetime = lifetime\n self.read_adas()\n\n # compute the interpolant\n self.atte_tck_dens = [] #!\n self.emis_tck_dens = [] #!\n self.atte_tck_temp = [] #!\n self.emis_tck_temp = [] #!\n for i in range(len(self.beam_atte)):\n # get data\n ldensities = np.log(self.get_list_density('atte',i))\n lbeams = np.log(self.get_list_beams('atte',i))\n coef_dens = self.get_coef_density('atte',i)\n lbeams, ldens = np.meshgrid(lbeams, ldensities)\n \n # interpolation over beam and density\n self.atte_tck_dens.append(interpolate.bisplrep(\n lbeams,ldens,coef_dens,kx=1,ky=1))\n \n # get data for the interpolation in temperature\n T = np.log(self.get_list_temperature('atte',i))\n coef_T = self.get_coef_T('atte',i)\n Tref = np.log(self.get_Tref('atte',i))\n index = abs((Tref-T)/Tref) < 1e-4\n \n #interpolation over the temperature\n self.atte_tck_temp.append(interpolate.splrep(\n T,coef_T/coef_T[index],k=1))\n\n for i in range(len(self.beam_emis)):\n # get data\n ldensities = np.log(self.get_list_density('emis',i))\n lbeams = np.log(self.get_list_beams('emis',i))\n coef_dens = self.get_coef_density('emis',i)\n lbeams, ldens = np.meshgrid(lbeams, ldensities)\n \n # interpolation over beam and density\n self.emis_tck_dens.append(interpolate.bisplrep(\n lbeams,ldens,coef_dens,kx=1,ky=1))\n\n # Get data for the interpolation in temperature\n T = np.log(self.get_list_temperature('emis',i))\n coef_T = self.get_coef_T('emis',i)\n Tref = np.log(self.get_Tref('emis',i))\n index = abs((Tref-T)/Tref) < 1e-4\n \n #interpolation over the temperature\n self.emis_tck_temp.append(interpolate.splrep(\n T,coef_T/coef_T[index],k=1))",
"def main(args):\n\tif args.verbose:\n\t\tprint(args)\n\n\tenergy = []\n\tprev_t = 0\n\n\tif args.filename:\n\t\tif args.verbose:\n\t\t\tprint(\"Opening \"+args.filename)\n\t\tsys.stdin = open(args.filename)\n\n\tfor line in sys.stdin:\n\t\titems = line.split(args.separator)\n\t\tt_str = items.pop(0)\n\t\tif t_str == \"timestamp\":\n\t\t\tcontinue\n\t\ttime = float(t_str)\n\t\tpower = [0 if i.strip() == \"NULL\" else float(i) for i in items]\n\n\t\tif prev_t > 0:\n\t\t\tdelta_t = time - prev_t\n\t\t\tif not energy:\n\t\t\t\tenergy = [p * delta_t for p in power]\n\t\t\telse:\n\t\t\t\tenergy = [e + p * delta_t for e,p in zip(energy, power)]\n\n\t\t# update time\n\t\tprev_t = time\n\n\tsys.stdin.close()\n\tif args.kwh:\n\t\tenergy = [e / 3600000 for e in energy]\n\tprint(\"\\t\".join(str(e) for e in energy))",
"def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()",
"def write_hot_start_file(file_name, hot_start_list):\n with open(file_name, 'w') as mesh_file:\n for ht in hot_start_list:\n mesh_file.write('DATASET\\nOBJTYPE \"mesh2d\"\\n')\n if len(ht.values.columns) > 1:\n mesh_file.write('BEGVEC\\n')\n else:\n mesh_file.write('BEGSCL\\n')\n mesh_file.write('ND {}\\n'.format(len(ht.values)))\n mesh_file.write('NC {}\\n'.format(ht.number_of_cells))\n mesh_file.write('NAME \"{}\"\\n'.format(ht.name))\n mesh_file.write('TS 0 0\\n')\n mesh_file.write(ht.values.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n mesh_file.write('ENDDS\\n')",
"def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all",
"def write_to_file_z(path):\n path1 = path + \"/z_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y0z%ske%s.mac\" %(dz*z + z_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y0z%ske%s.root\"\\n' %(dz*z + z_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 0 %s\\n\" % (dz*z + z_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")"
]
| [
"0.6509747",
"0.57093614",
"0.5667372",
"0.5663888",
"0.56243795",
"0.56148547",
"0.55704665",
"0.5513959",
"0.54973865",
"0.5366151",
"0.5344379",
"0.53401536",
"0.5294211",
"0.5282262",
"0.52793986",
"0.5203054",
"0.51673126",
"0.5163976",
"0.5157231",
"0.51282185",
"0.51024437",
"0.5095682",
"0.50886965",
"0.5088294",
"0.50807935",
"0.5076617",
"0.50682694",
"0.5067229",
"0.5050476",
"0.5041815"
]
| 0.620671 | 1 |
Calculate which row to update, factoring in the header row placed every $hdr_span years. | def year_span(target_year:int, base_year:int, yr_span:int, hdr_span:int, logger:lg.Logger = None) -> int:
if logger:
logger.debug(F"target year = {target_year}; base year = {base_year}; year span = {yr_span}; header span = {hdr_span}")
year_diff = target_year - base_year
hdr_adjustment = 0 if hdr_span <= 0 else (year_diff // hdr_span)
return (year_diff * yr_span) + hdr_adjustment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_irregular(rows):\n \n for data in rows:\n if len(data.findChildren([\"th\"])) < 1:\n row = Row(\"2006\", data, 6, 7)\n if any(r.year == \"2006\" and r.county == row.county for r in csv_rows) and row.county is not None:\n matching_row = next((match for match in csv_rows if match.year == \"2006\" and match.county == row.county ), None)\n matching_row.population = matching_row.population + row.population\n else:\n csv_rows.append(row)",
"def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):\n\n ### Merge range function takes the locations of the cells to merge, the data\n ### to write and the cell format. A sample input would look like:\n ### worksheet.merge_range(\"A0:B1\", \"Location\", cell_format_obj)\n ### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the\n ### value \"Location\". \n \n end_row = curr_row + CELL_HT[\"location\"]\n row_range = cols[0] + str(curr_row) + \":\" + cols[0] + str(end_row)\n worksheet.merge_range(row_range, \"Location\", header_format)\n \n num_pop_cols = sum(map(lambda i: \"pop\" in i, data_cols)) - 1\n num_tfr_cols = sum(map(lambda i: \"tfr\" in i, data_cols)) - 1\n\n col_end = 0\n for i, stage in enumerate(stages):\n \n if stage == \"pop\":\n unit_txt = \" (in millions)\"\n stage_txt = \"Population\"\n col_range = num_pop_cols\n else:\n unit_txt = \"\"\n stage_txt = \"Total Fertility Rate\"\n col_range = num_tfr_cols\n \n col_st = col_end + 1\n col_end = col_st + col_range\n \n curr_row_copy = curr_row\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n\n row_range = (\n cols[col_st] + str(curr_row_copy) + \":\" +\n cols[col_end] + str(end_row)\n )\n \n col_txt = stage_txt + unit_txt\n worksheet.merge_range(row_range, col_txt, header_format)\n\n curr_row_copy = end_row + 1\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n \n col_st_copy = col_st\n \n for column in data_cols:\n if stage in column: \n row_range = cols[col_st_copy] + str(curr_row_copy)\n worksheet.write(row_range, COL_NAME_MAP[column], header_format)\n col_st_copy += 1\n \n\n return end_row + 1",
"def chainwork_of_header_at_height(self, height: int) -> int:\n chunk_idx = height // 2016 - 1\n target = self.get_target(chunk_idx)\n # Todo-Calvin: What are these values from?\n work = ((2 ** 256 - target - 1) // (target + 1)) + 1\n return work",
"def generate_header_row(self):\n weekday_abbrev = (\"M\",\"T\",\"W\",\"T\",\"F\",\"S\",\"S\")\n weeknames = self._create_week_dates_text()\n self.add_element('<thead class=\"fixedHeader\" style=\"width: 986px;\">',True,0)\n self.add_element('<tr id=\"datesRow\">', True, 2)\n self.add_element('<th class=\"attTblHeaderName attTblHeaderDateFill\" id=\"id_tablehdr_fill\" ></th>', True, 4)\n th = '<th class=\"attTblHeaderDate\" id=\"id_tablehdr_firstweek\" colspan=\"7\">%s</th>' %weeknames[0]\n self.add_element(th, True, 4)\n th = '<th class=\"attTblHeaderDate\" id=\"id_tablehdr_firstweek\" colspan=\"7\">%s</th>' %weeknames[1]\n self.add_element(th, True, 4)\n self.add_element('</tr>' , True, 2)\n self.add_element('<tr id=\"daysRow\">', True, 2)\n th_name = \\\n '<th class=\"attTblHeaderBase attTblHeaderName ui-widget-header\" name=\"headerTdName\" id=\"id_table_header\">Name</td>'\n self.add_element(th_name, True, 4)\n for column_index in xrange(0, self.total_days_count):\n day = self.start_date + timedelta(column_index)\n day_text = weekday_abbrev[day.weekday()]\n week = self._compute_week_index(column_index)\n date_ordinal = day.toordinal()\n if (not SchoolDB.models.StudentAttendanceRecord.is_valid(\n self.dayperiod_type[column_index* 2])):\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" Future day so it cannot be set.\"\n th_type = \"headerTdNed ui-state-disabled\"\n elif (not SchoolDB.models.StudentAttendanceRecord.is_schoolday(\n self.dayperiod_type[column_index * 2])):\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" \" + self.day_description[column_index] + \\\n \" so it cannot be set.\"\n th_type = \"headerTdNsd ui-state-disabled\"\n else:\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" \" + self.day_description[column_index]\n th_type = \"headerTdSd\"\n #fix for last to clean up border\n if (column_index == self.total_days_count - 1):\n modifier = \"attTblHeaderRight\"\n else:\n modifier = \"\"\n th_text = \\\n '<th id=\"attDay-%s\" class=\"attTblHeaderBase attSelectable ui-widget-header %s %s\" title=\"%s\" >%s</th>' \\\n %(column_index, th_type, modifier, title_text, day_text)\n self.add_element(th_text, True, 4)\n #th_text = '<th class=\"attTblHeaderBase attTblHeaderFiller\" name=\"headerTdFiller\" id=\"headerTdFiller\" colspan=\"2\"></th>'\n #self.add_element(th_text, True, 4)\n self.add_element('</tr>', True, 2, True)\n self.add_element('</thead>', True, 0)\n self.add_element('</table>')\n return self.html_table",
"def fix_years(self, row):\n raise NotImplementedError",
"def build_modify_row(self, row_gen, p, source, row):\n from xlrd import xldate_as_tuple\n from datetime import date\n\n row['year'] = int(source.time)\n row['facility_status_date'] = date(*xldate_as_tuple(row['facility_status_date'],row_gen.workbook.datemode)[:3])\n row.update(source.row_data.dict)",
"def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable):\n empty_flag = (numerical_subtable == '').mean(1) == 1\n empty_rows = list(np.where(empty_flag)[0])\n non_empty_rows = np.where(~empty_flag)[0]\n if len(empty_rows) > 0:\n if empty_rows[-1] != len(row_headers):\n empty_rows.append(len(row_headers))\n all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)]\n for i, append_rows in zip(empty_rows, all_append_rows):\n for append_row in append_rows:\n row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row]\n row_headers = [row_headers[i] for i in non_empty_rows]\n numerical_subtable = numerical_subtable[non_empty_rows]\n return row_headers, numerical_subtable",
"def update_header(arr_imgs,obj,filter_i):\n \n for img in arr_imgs:\n warnings.simplefilter('ignore', category=AstropyUserWarning)\n try:\n hdulist = fits.open(img,ignore_missing_end=True)\n #if there is only a primary header get the data from it\n if len(hdulist) == 1:\n data = getdata(img, 0, header=False)\n #if there is more than one header get data from the 'SCI' extension\n else:\n data = getdata(img, 1, header=False)\n #Get value of EXPTIME and PHOTZPT keyword from primary header and \n #set CCDGAIN to a default value of 1\n EXPTIME = hdulist[0].header['EXPTIME']\n PHOTFLAM = hdulist[1].header['PHOTFLAM']\n PHOTZPT = hdulist[1].header['PHOTZPT']\n CCDGAIN = 1.0\n #First pass locating value for gain\n for i in range(2):\n if len(hdulist) == 1:\n break\n #Go through primary and secondary header and ignore the \n #BinTable formatted header\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['CCDGAIN']\n break\n if 'GAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['GAIN']\n break\n if 'ATODGAIN' in hdulist[i].header:\n CCDGAIN = hdulist[i].header['ATODGAIN']\n break\n \n #Locating units of image\n print('Doing BUNIT check')\n for i in range(2):\n #If there is only one header then this is the only place to \n #check\n if len(hdulist) == 1:\n bunit = hdulist[0].header['D001OUUN']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'counts':\n ### Rescaling zeropoint\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n hdulist[0].header.set('BUNIT','COUNTS/S')\n hdulist[0].header.set('MAGZPT',ZPT_NEW)\n print('BUNIT is {0}'.format(hdulist[0].\\\n header['BUNIT']))\n \n #If there are multiple headers then they all have to be checked\n else:\n if 'BUNIT' in hdulist[i].header:\n bunit = hdulist[i].header['BUNIT']\n print('BUNIT was {0}'.format(bunit))\n if bunit == 'COUNTS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*EXPTIME) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/EXPTIME)*pixmod\n if bunit == 'ELECTRONS':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN*EXPTIME) \\\n + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/(CCDGAIN*EXPTIME))*pixmod\n if bunit == 'ELECTRONS/S':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n if bunit == 'ELECTRONS/SEC':\n ZPT_NEW = 30.0\n ZPT_OLD = -2.5*np.log10(PHOTFLAM*CCDGAIN) + PHOTZPT\n pixmod = 10**(-0.4*(ZPT_OLD-ZPT_NEW))\n data = (data/CCDGAIN)*pixmod\n hdulist[i].header['BUNIT'] = 'COUNTS/S'\n hdulist[i].header['MAGZPT'] = ZPT_NEW\n ###\n print('BUNIT is {0}'.format(hdulist[i].\\\n header['BUNIT']))\n print('PHOTZPT is {0}'.format(hdulist[i].\\\n header['MAGZPT']))\n print('Done changing BUNIT')\n \n #Second pass to assign gain and exptime to headers\n for i in range(2):\n if len(hdulist) == 1:\n break\n if not isinstance(hdulist[i],astropy.io.fits.hdu.table.\\\n BinTableHDU):\n if 'CCDGAIN' not in hdulist[i].header:\n hdulist[i].header.set('CCDGAIN',CCDGAIN)\n if 'EXPTIME' not in hdulist[i].header:\n hdulist[i].header.set('EXPTIME',EXPTIME)\n \n #Make new versions of images in interim/obj1 folder\n os.chdir(path_to_interim + obj)\n #Remove .fits extension\n img = os.path.splitext(img)[0]\n #If there was only one header write that header's data to new\n #version of fits image\n if len(hdulist) == 1:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[0].\\\n header,output_verify='ignore')\n #Else write the 'SCI' header's data to new version of fits image\n else:\n fits.writeto(img+'_test_'+filter_i+'.fits',data,hdulist[1].\\\n header,output_verify='ignore')\n hdulist.close()\n os.chdir(path_to_raw + obj)\n \n #This is to catch 'empty or corrupt FITS file' or any other IOError\n #and write it to a text file along with the object name and the \n #filter name\n except IOError as e:\n os.chdir('..')\n dir_path = os.getcwd()\n if os.path.basename(dir_path) == 'raw':\n os.chdir(path_to_interim)\n with open('Error_swarp.txt','a') as newfile: \n newfile.write('Object {0} and image {1} raises {2}'.\\\n format(obj,img,e))\n newfile.write('\\n')\n newfile.close()\n os.chdir(path_to_raw + obj)\n \n os.chdir(path_to_interim + obj)\n #For this object and filter combination grab all the new versions made\n arr = glob('*test_'+filter_i+'.fits')\n print(len(arr))\n if len(arr) >= 1: #avoid empty cases where files have been removed earlier\n #or don't exist at all since the dictionary also contains\n #pairs of objects and filters that didn't meet the swarp\n #requirements (didn't pass preliminary exptime or filter\n #checks so those folders/images don't exist)\n \n #If new versions exist then write their names to a text file \n with open(filter_i+'_img_list_testfil.txt','wb') as newfile2:\n for obj in arr:\n newfile2.write(obj)\n newfile2.write('\\n')\n newfile2.close()\n #If text file exists return the name\n return filter_i+'_img_list_testfil.txt'\n #If text file doesn't exist return this string\n return 'error'",
"def _get_hdr_dist_for_crispor(row):\n if 'hdr_dist' not in row.dtype.names:\n return None\n else:\n return int(row['hdr_dist']) * (\n 1 if row['target_loc'].strand == '+' else -1)",
"def yearlystats(self, table):\n # Drop unneeded header row\n tmp = table.iloc[1:,]\n # Create a header row removing spaces, /'s, and x's\n tmp.columns = [x.replace(\" \", \"\").replace(\"/\",\"\").replace(\".\",\"\") for x in tmp.iloc[0]]\n # Drop the row used to create header row\n tmp = tmp.drop(tmp.index[0])\n # Forward fill the year for analysis later \n tmp['Year'].fillna(method='ffill', inplace = True)\n # Create a new offense/defense variable \n tmp['OffenseDefense'] = tmp['Team']\n # Figure out which team we are working with \n curr_team = tmp.iloc[:1,1:3].values[0][0]\n # Create a new team variable\n tmp['Team'] = curr_team\n # In the offense defense variable, fill in the offense defense variable \n tmp['OffenseDefense'] = tmp['OffenseDefense'].apply(lambda x: 'Offense' if x == curr_team else x)\n return tmp",
"def update_header():\n print_debug_info()\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not (has_header() or suffix_is_supported()):\n # This file do not have a header, or it's format is unknown, quit.\n debug(\"cannot add header to a script of unknown format.\")\n return\n\n # if current buffer is not modified, do not bother to update it's date.\n if not modified():\n debug(\"Buffer not modified, just quit\")\n return\n\n row, column = vim.current.window.cursor\n header_template = globals().get(\"%s_header\" % SUFFIX).rstrip()\n\n # if line has the keyword, find the current for the keyword, get the line, re-render it and fill it in.\n head = CURRENT_BUFFER[:10]\n\n more_updates = vim.eval(\"g:BHUpdates\")\n\n update = {\n 'Maintained by': AUTHOR,\n 'Modified by': AUTHOR,\n 'Last modified': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'Filename': FILENAME,\n }\n update.update(more_updates)\n for index, line in enumerate(head):\n for keyword in update:\n if line.find(keyword) != -1:\n original_line = [_line for _line in header_template.splitlines() if _line.find(keyword) != -1]\n if original_line:\n original_line = original_line[0]\n else:\n continue\n debug(\"original line: %s\" % original_line)\n debug(\"line to be replaced: %s\" % line)\n rendered_line = original_line % {KEYWORDS[keyword]: update[keyword]}\n debug(\"rendered line: %s\" % rendered_line)\n CURRENT_BUFFER[index] = rendered_line\n\n vim.current.window.cursor = (row, column)",
"def combine_headers(hdr0, hdr1, dem_hdr):\n if not all([isinstance(a, dict) for a in [hdr0, hdr1, dem_hdr]]):\n raise GammaException('Header args need to be dicts')\n\n date0, date1 = hdr0[ifc.FIRST_DATE], hdr1[ifc.FIRST_DATE]\n if date0 == date1:\n raise GammaException(\"Can't combine headers for the same day\")\n elif date1 < date0:\n raise GammaException(\"Wrong date order\")\n\n chdr = {ifc.PYRATE_TIME_SPAN: (date1 - date0).days / ifc.DAYS_PER_YEAR,\n ifc.FIRST_DATE: date0,\n ifc.FIRST_TIME: hdr0[ifc.FIRST_TIME],\n ifc.SECOND_DATE: date1,\n ifc.SECOND_TIME: hdr1[ifc.FIRST_TIME],\n ifc.DATA_UNITS: RADIANS,\n ifc.PYRATE_INSAR_PROCESSOR: GAMMA}\n\n # set incidence angle to mean of first amd second image values\n inc_ang = hdr0[ifc.PYRATE_INCIDENCE_DEGREES]\n if np.isclose(inc_ang, hdr1[ifc.PYRATE_INCIDENCE_DEGREES], atol=1e-1):\n chdr[ifc.PYRATE_INCIDENCE_DEGREES] = (hdr0[ifc.PYRATE_INCIDENCE_DEGREES] + hdr1[\n ifc.PYRATE_INCIDENCE_DEGREES]) / 2\n else:\n msg = \"Incidence angles differ by more than 1e-1\"\n raise GammaException(msg)\n\n wavelen = hdr0[ifc.PYRATE_WAVELENGTH_METRES]\n if np.isclose(wavelen, hdr1[ifc.PYRATE_WAVELENGTH_METRES], atol=1e-6):\n chdr[ifc.PYRATE_WAVELENGTH_METRES] = wavelen\n else:\n args = (chdr[ifc.FIRST_DATE], chdr[ifc.SECOND_DATE])\n msg = \"Wavelength mismatch, check both header files for %s & %s\"\n raise GammaException(msg % args)\n # non-cropped, non-multilooked geotif process step information added\n chdr[ifc.DATA_TYPE] = ifc.ORIG\n\n chdr.update(dem_hdr) # add geographic data\n return chdr",
"def rowSpan(self, p_int, p_int_1): # real signature unknown; restored from __doc__\r\n return 0",
"def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0",
"def get_header_au(row):\n rules = [\"Time\", \"Smile\", \"AU\"]\n #header = row[0:2]\n header=row\n #print row\n result = []\n i = 0\n #for all values in the header\n for h in header:\n print h\n if h in rules or h[0:2] in rules or 'AU' in h:\n result.append([h, i])\n i = i + 1\n # print result\n return result",
"def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers",
"def find_header_row(regexp_str):\n regexp = re.compile(regexp_str, re.IGNORECASE|re.DOTALL)\n global HEADER_ROW, HEADER_STARTCOL\n HEADER_ROW = HEADER_STARTCOL = None\n for row in range(20):\n if HEADER_ROW:\n break\n for col in range(5):\n val = cellval(row, col)\n if (val and re.search(regexp, val)):\n HEADER_ROW = row\n HEADER_STARTCOL = col\n break\n if HEADER_ROW == None or HEADER_STARTCOL == None:\n parser_error(\"failed to parse this as a footprint spreadsheet. \"+\n \"No header row found: looked for \"+regexp_str)",
"def get_header(\n self,\n source: TestCaseReport,\n depth: int,\n row_idx: int,\n ) -> RowData:\n row_data = RowData(\n start=row_idx,\n content=const.EMPTY_ROW,\n style=RowStyle(line_below=(1, colors.black)),\n )\n\n row_data += super(MultiTestRowBuilder, self).get_header(\n source, depth, row_data.end\n )\n\n summary = \", \".join(\n [\n \"{} {}\".format(count, status)\n for count, status in source.counter.items()\n if status != \"total\"\n ]\n )\n\n if \"run\" in source.timer:\n summary += \", total run time: {}.\".format(\n format_duration(source.timer[\"run\"].elapsed)\n )\n\n row_data.append(\n content=[summary, \"\", \"\", \"\"],\n style=[\n RowStyle(\n font=(const.FONT, const.FONT_SIZE_SMALL),\n left_padding=const.INDENT * depth,\n end_column=0,\n ),\n RowStyle(bottom_padding=0, top_padding=0, valign=\"TOP\"),\n ],\n )\n\n return row_data",
"def linear_interpolate(df, offset, final_year=\"2050\", harmonize_year=\"2015\"):\n df = df.copy()\n x1, x2 = harmonize_year, final_year\n y1, y2 = offset + df[x1], df[x2]\n m = (y2 - y1) / (float(x2) - float(x1))\n b = y1 - m * float(x1)\n\n cols = [x for x in utils.numcols(df) if int(x) < int(final_year)]\n for c in cols:\n df[c] = m * float(c) + b\n return df",
"def calcUpdateByRow(self, row):\n\n # a) positive phase\n poshp = self.rbm.activate(row)\t# compute the posterior probability\n pos = outer(row, poshp) \t# fraction from the positive phase\n poshb = poshp\n posvb = row\n\n # b) the sampling & reconstruction\n sampled = self.sampler(poshp)\n recon = self.invRbm.activate(sampled)\t# the re-construction of data\n\n # c) negative phase\n neghp = self.rbm.activate(recon)\n neg = outer(recon, neghp)\n neghb = neghp\n negvb = recon\n\n # compute the raw delta\n # !!! note that this delta is only the 'theoretical' delta\n return self.updater(pos, neg, poshb, neghb, posvb, negvb)",
"def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d",
"def parseRowHeader(self, i, j) :\n rowHeaderValue = \"\"\n\n # Don't attach the cell value to the namespace if it's already a URI\n isURI = urlparse(str(self.source_cell.value))\n if isURI.scheme and isURI.netloc:\n rowHeaderValue = URIRef(self.source_cell.value)\n else:\n self.source_cell_value_qname = self.source_cell.value\n rowHeaderValue = Literal(self.source_cell_value_qname)\n \n # Get the properties to use for the row headers\n prop = self.property_dimensions[j]\n self.row_dimensions.setdefault(i,{})\n self.row_dimensions[i][self.namespaces['scope'][prop]]= rowHeaderValue\n \n return",
"def get_header_table(self , dt, ds = '' , all_ds = '', length = ''):\n index_low = self.unique_dates[ds]['indices'][dt]['low']\n #index_up = self.unique_dates[best_ds]['indices'][dt]['up'] \n hd = self.data[ds]['header_table'][index_low:index_low+length] \n hd['duplicates'] = all_ds \n \n return hd",
"def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header",
"def _combine_headers(headers, constant_only=False):\n\n # Allowing the function to gracefully handle being given a single header\n if len(headers) == 1:\n return headers[0]\n \n uniform_cards = []\n varying_keywords = []\n n_vk = 0\n \n for kwd in headers[0]:\n \n # Skip checksums etc\n if kwd in ('S_REGION', 'CHECKSUM', 'DATASUM'):\n continue\n \n if (np.array([x[kwd] for x in headers[1:]]) == headers[0][kwd]).all():\n uniform_cards.append(headers[0].cards[kwd])\n else:\n if constant_only: # Removing non-constant kewords in this case\n continue\n \n n_vk += 1\n for i, hdr in enumerate(headers):\n varying_keywords.append((f\"F{i+1:02}_K{n_vk:02}\", kwd, \"Keyword\"))\n varying_keywords.append((f\"F{i+1:02}_V{n_vk:02}\", hdr[kwd], \"Value\"))\n varying_keywords.append((f\"F{i+1:02}_C{n_vk:02}\", hdr.comments[kwd], \"Comment\"))\n\n # TODO: Add wcs checking? How?\n return fits.Header(uniform_cards+varying_keywords)",
"def check_header(self, magmap):\n for i in range(len(self.mag_map_list)):\n if magmap.fits_header['DATE-OBS'] == self.mag_map_list[i].fits_header['DATE-OBS']:\n return i",
"def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header",
"def increment_year(self):",
"def get_value( self, header, rowIndex ):\n return self.data[rowIndex, self.header2col[header]]",
"def buildStatsTableHeader(self, table):\n heading = table.thead.tr\n heading.th('No')\n heading.th('Begin probe')\n heading.th('End probe')\n heading.th('Min')\n heading.th('Max')\n heading.th('Median')\n heading.th('Mean')\n heading.th('{}%'.format(self.percentile1))\n heading.th('{}%'.format(self.percentile2))\n heading.th('Standard Deviation')"
]
| [
"0.5644471",
"0.53534263",
"0.5230461",
"0.5187199",
"0.51704454",
"0.51178247",
"0.5092591",
"0.5007937",
"0.5000446",
"0.48899803",
"0.48895955",
"0.4858776",
"0.48426914",
"0.4825437",
"0.48032025",
"0.47941107",
"0.47811443",
"0.47728986",
"0.47709316",
"0.476204",
"0.4761511",
"0.47304717",
"0.4706379",
"0.46898216",
"0.46407717",
"0.4635786",
"0.46322954",
"0.4626491",
"0.4621572",
"0.4611204"
]
| 0.6459912 | 0 |
Convert the string representation of a quarter to an int. | def get_int_quarter(p_qtr:str, logger:lg.Logger = None) -> int:
if logger:
logger.debug(F"quarter to convert = {p_qtr}")
msg = "Input MUST be a String of 0..4!"
if not p_qtr.isnumeric() or len(p_qtr) != 1:
if logger:
c_frame = inspect.currentframe().f_back
logger.error(msg, c_frame)
raise Exception(msg)
int_qtr = int( float(p_qtr) )
if int_qtr > MAX_QUARTER or int_qtr < MIN_QUARTER:
if logger:
c_frame = inspect.currentframe().f_back
logger.error(msg, c_frame)
raise Exception(msg)
return int_qtr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _quarter_number(self, dategiven):\n return RetailDate(dategiven).quarter",
"def dec2int(r: str) -> int:",
"def quarter(self) -> Index:\n warnings.warn(\n \"`quarter` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.quarter)",
"def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1",
"def quarter_from_month(month: int) -> int:\n return ((month - 1) // 3) + 1",
"def get_quarter(self):\n\n self.quarter_nums = cal_data.calc_quarter(self.active_date[2],\n self.active_date[1])\n self.quarter = cal_data.get_quarter(self.active_date[2],\n self.active_date[1])",
"def _converter(self,string_representation):\n assert len(string_representation) == 1\n\n hash_dic = {'T':10,'J':11,'Q':12,'K':13,'A':14}\n\n try:\n integer_representation=int(string_representation)\n except:\n integer_representation=hash_dic[string_representation]\n\n return integer_representation",
"def _(primitive_type: PrimitiveType, value_str: str) -> int:\n _, _, exponent = Decimal(value_str).as_tuple()\n if exponent != 0: # Raise if there are digits to the right of the decimal\n raise ValueError(f\"Cannot convert partition value, value cannot have fractional digits for {primitive_type} partition\")\n return int(float(value_str))",
"def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)",
"def get_fiscal_quarter(fiscal_reporting_period):\n if fiscal_reporting_period in [1, 2, 3]:\n return 1\n elif fiscal_reporting_period in [4, 5, 6]:\n return 2\n elif fiscal_reporting_period in [7, 8, 9]:\n return 3\n elif fiscal_reporting_period in [10, 11, 12]:\n return 4",
"def __int__(self):\n return int(self.q[0])",
"def __file_from_str_to_int(rank: str) -> int:\n # Warning, my own, not very well tested implementation of base26 converter\n values = []\n for letter in rank:\n values.append(ascii_lowercase.index(letter.lower()))\n index_value = 0\n counter = 0\n for value in reversed(values):\n if counter < 1:\n index_value += value\n else:\n index_value += (value * 26) ** counter\n counter += 1\n return index_value",
"def string_id_to_integer(front_type_string):\n\n check_front_type(front_type_string)\n if front_type_string == WARM_FRONT_STRING_ID:\n return WARM_FRONT_INTEGER_ID\n\n return COLD_FRONT_INTEGER_ID",
"def quote2int(self, float_number):\r\n return int(round(float_number * self.mult_quote))",
"def getInt(string, radix, needHexPrefix):\n return (0)",
"def to_int(s: str) -> int:\n try:\n return int(s.replace('_', ''))\n except ValueError:\n return int(ast.literal_eval(s))",
"def get_year(string): \n return int(string[11:15])",
"def perc_str_to_int(string: str) -> int:\n match = re.search(r\"\\((\\d+)%\\)$\", string)\n if match:\n return int(match.group(1))\n raise ValueError(\"Cannot find percentage in table\")",
"def get_int_year(target_year:str, base_year:int, logger:lg.Logger = None) -> int:\n if logger:\n logger.debug(F\"year = {target_year}; base year = {base_year}\")\n\n if not( target_year.isnumeric() and len(target_year) == 4 ):\n msg = \"Input MUST be the String representation of a RECENT year, e.g. '2013'!\"\n if logger:\n c_frame = inspect.currentframe().f_back\n logger.error(msg, c_frame)\n raise Exception(msg)\n\n int_year = int( float(target_year) )\n if int_year > now_dt.year or int_year < base_year:\n msg = F\"Input MUST be a Year between {base_year} and {now_dt.year}!\"\n if logger:\n c_frame = inspect.currentframe().f_back\n logger.error(msg, c_frame)\n raise Exception(msg)\n\n return int_year",
"def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0",
"def _to_int(self, int_or_card):\r\n if isinstance(int_or_card, Card):\r\n return int_or_card.rank\r\n return int_or_card",
"def getResolution(s) -> int:\n unit = getDurationUnit(s)\n #number of ticks is 1 / unit (if that is an integer)\n ticksPerQuarter = unit.denominator / unit.numerator\n if ticksPerQuarter.is_integer():\n return int(unit.denominator / unit.numerator)\n else:\n print(s.filePath, ' non integer number of ticks per Quarter')\n return 0",
"def _intTime(tStr):\n return int(float(tStr))",
"def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)",
"def hex2int(r: str) -> int:",
"def to_int(param, in_str):\n try:\n return int(in_str)\n except ValueError:\n return exit_msg(f\"Bad Request: Wrong type, expected 'int' for parameter '{param}'\")",
"def iris_quarter_name(self):\n return self._iris_quarter_name",
"def next_quarter_start(start_year:int, start_month:int, logger:lg.Logger = None) -> (int, int):\n if logger:\n logger.debug(F\"start year = {start_year}; start month = {start_month}\")\n # add number of months for a Quarter\n next_month = start_month + QTR_MONTHS\n # use integer division to find out if the new end month is in a different year,\n # what year it is, and what the end month number should be changed to.\n next_year = start_year + ( (next_month - 1) // YEAR_MONTHS )\n next_month = ( (next_month - 1) % YEAR_MONTHS ) + 1\n\n return next_year, next_month",
"def _convert_to_integer(srs, d):\n return srs.map(lambda x: d[x])",
"def as_int(self):\n number = 0\n n = 1\n for i in reversed(self.qubit_values):\n number += n*i\n n = n << 1\n return number"
]
| [
"0.64421225",
"0.6198249",
"0.56812286",
"0.5672642",
"0.5546351",
"0.5473638",
"0.54137665",
"0.5393882",
"0.5266237",
"0.52620816",
"0.51355624",
"0.5131029",
"0.50740594",
"0.50709116",
"0.5026049",
"0.49898073",
"0.49762765",
"0.49730146",
"0.49620914",
"0.4958292",
"0.49431697",
"0.49395707",
"0.49368998",
"0.49268803",
"0.4905148",
"0.49047375",
"0.4894815",
"0.48540112",
"0.48496035",
"0.48036414"
]
| 0.81801647 | 0 |
Get the year and month that start the FOLLOWING quarter. | def next_quarter_start(start_year:int, start_month:int, logger:lg.Logger = None) -> (int, int):
if logger:
logger.debug(F"start year = {start_year}; start month = {start_month}")
# add number of months for a Quarter
next_month = start_month + QTR_MONTHS
# use integer division to find out if the new end month is in a different year,
# what year it is, and what the end month number should be changed to.
next_year = start_year + ( (next_month - 1) // YEAR_MONTHS )
next_month = ( (next_month - 1) % YEAR_MONTHS ) + 1
return next_year, next_month | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prev_quarter_boundaries(now):\n first_of_month = datetime.datetime(now.year, now.month, 1)\n\n # 75 days before the 1st is always in the previous quarter\n date_in_prev_q = first_of_month - datetime.timedelta(days=75)\n\n q_y = date_in_prev_q.year\n q_start_m = int((date_in_prev_q.month-1) / 3)*3 + 1\n q_end_m = q_start_m + 2\n q_end_d = calendar.monthrange(q_y, q_end_m)[1]\n\n s_d = datetime.datetime(q_y, q_start_m, 1)\n e_d = datetime.datetime(q_y, q_end_m, q_end_d)\n\n return s_d, e_d",
"def get_quarter_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof.replace(month=(asof.month - 1) // 3 * 3 + 1, day=1)",
"def current_quarter_end(start_year:int, start_month:int, logger:lg.Logger = None) -> date:\n if logger:\n logger.info(F\"start year = {start_year}; start month = {start_month}\")\n end_year, end_month = next_quarter_start(start_year, start_month)\n # end date is one day back from the start of the next period\n return date(end_year, end_month, 1) - ONE_DAY",
"def last_quarter(today):\n quarter_date = today - relativedelta(months=1)\n while quarter_date.month % 3 != 0:\n quarter_date = quarter_date - relativedelta(months=1)\n return quarter_date.year, int(quarter_date.month / 3)",
"def quarter_from_month(month: int) -> int:\n return ((month - 1) // 3) + 1",
"def get_quarter_end(x: Optional[Date] = None) -> Date:\n return get_quarter_start(x or get_today()) + relativedelta(months=+3, days=-1)",
"def get_quarter(d, offset=0):\n\n # Get the first day of the following quarter minus one day\n month = (((d.month - 1) / 3 + 1) * 3 + 1) % 12\n # Wrap around the year if necessary\n year = d.year + 1 if month == 1 else d.year\n return (datetime.date(year, month, 1)\n + relativedelta(months=3*offset) # Add quarter offset\n - datetime.timedelta(days=1)) # Subtract one day",
"def get_quarter(self):\n\n self.quarter_nums = cal_data.calc_quarter(self.active_date[2],\n self.active_date[1])\n self.quarter = cal_data.get_quarter(self.active_date[2],\n self.active_date[1])",
"def generate_quarter_boundaries(start_year:int, start_month:int, num_qtrs:int, logger:lg.Logger = None) -> (date, date):\n if logger:\n logger.debug(F\"start year = {start_year}; start month = {start_month}; num quarters = {num_qtrs}\")\n for i in range(num_qtrs):\n yield date(start_year, start_month, 1), current_quarter_end(start_year, start_month)\n start_year, start_month = next_quarter_start(start_year, start_month)",
"def _quarter_number(self, dategiven):\n return RetailDate(dategiven).quarter",
"def test_fourth(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 12, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))",
"def get_quarter_end_stream(x: Optional[Date] = None) -> Iterator[Date]:\n ## Get asof date:\n asof = x or get_today()\n\n ## Get the last quarter end:\n e = get_quarter_start(asof) - TimeDelta(days=1)\n\n ## Yield this:\n yield e\n\n ## Forever:\n while True:\n e = get_month_end(e - relativedelta(months=3))\n yield e",
"def quarter_frame(self) -> None:\n pass",
"def quarter_frame(self) -> None:\n pass",
"def SearchMoonQuarter(startTime):\n angle = MoonPhase(startTime)\n quarter = int(1 + math.floor(angle / 90.0)) % 4\n time = SearchMoonPhase(90.0 * quarter, startTime, 10.0)\n if time is None:\n # The search should never fail. We should always find another lunar quarter.\n raise InternalError()\n return MoonQuarter(quarter, time)",
"def test_third(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 7, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 8, 4))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 9, 30))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))",
"def quarter(self) -> Index:\n warnings.warn(\n \"`quarter` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.quarter)",
"def NextMoonQuarter(mq):\n # Skip 6 days past the previous found moon quarter to find the next one.\n # This is less than the minimum possible increment.\n # So far I have seen the interval well contained by the range (6.5, 8.3) days.\n time = mq.time.AddDays(6.0)\n next_mq = SearchMoonQuarter(time)\n # Verify that we found the expected moon quarter.\n if next_mq.quarter != (1 + mq.quarter) % 4:\n raise InternalError()\n return next_mq",
"def is_quarter_start(self) -> Index:\n return Index(self.to_series().dt.is_quarter_start)",
"def test_first(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 1, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 2, 29))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 3, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))",
"def get_fiscal_quarter(fiscal_reporting_period):\n if fiscal_reporting_period in [1, 2, 3]:\n return 1\n elif fiscal_reporting_period in [4, 5, 6]:\n return 2\n elif fiscal_reporting_period in [7, 8, 9]:\n return 3\n elif fiscal_reporting_period in [10, 11, 12]:\n return 4",
"def get_year_half_end(x: Optional[Date] = None) -> Date:\n return get_year_half_start(x or get_today()) + relativedelta(months=+6, days=-1)",
"def get_year_half_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof.replace(month=((asof.month - 1) // 6) * 6 + 1, day=1)",
"def get_steps_per_quarter():\n return FLAGS.steps_per_quarter",
"def extract_and_append_year_month_quarter(self, data_frame, column_name):\n columns = self.extract_year_month_quarter(data_frame[column_name])\n periods_df = pd.DataFrame({\"Year\":columns[0],\"Monthly\": columns[1], \"Quarterly\": columns[2]})\n return pd.concat([data_frame.reset_index(), periods_df], axis='columns', join=\"inner\")",
"def test_fix_year_month_next(self):\n # 13 - 10 = next query of 3\n year, month, error = clean_year_month(2014, 13, 10)\n self.assertEqual(year, 2015)\n self.assertEqual(month, 1)\n self.assertEqual(error, False)",
"def test_all(self):\n\n # year = 1980 #unused\n date = datetime.date(1980, 1, 1)\n while date < datetime.date(1981, 1, 1):\n if date.month <= 3:\n mindate, maxdate = datetime.date(1980, 1, 1), datetime.date(1980, 3, 31)\n elif date.month <= 6:\n mindate, maxdate = datetime.date(1980, 4, 1), datetime.date(1980, 6, 30)\n elif date.month <= 9:\n mindate, maxdate = datetime.date(1980, 7, 1), datetime.date(1980, 9, 30)\n else:\n mindate, maxdate = datetime.date(1980, 10, 1), datetime.date(1980, 12, 31)\n\n startdate, enddate = get_quarterspan(date)\n self.assertTrue(startdate >= mindate)\n self.assertTrue(startdate <= maxdate)\n self.assertTrue(enddate >= mindate)\n self.assertTrue(enddate <= maxdate)\n\n date += datetime.timedelta(days=1)",
"def quarter_to_months(when):\n today = datetime.datetime.utcnow().date()\n if when.isdigit() and 2008 <= int(when) <= today.year:\n # Select the whole year.\n year = int(when)\n if year == today.year:\n out = ['%04d-%02d' % (year, i + 1) for i in range(today.month)]\n else:\n out = ['%04d-%02d' % (year, i + 1) for i in range(12)]\n else:\n quarter = re.match(r'^(\\d\\d\\d\\d-)[qQ]([1-4])$', when)\n if not quarter:\n return None\n prefix = quarter.group(1)\n # Convert the quarter into 3 months group.\n base = (int(quarter.group(2)) - 1) * 3 + 1\n out = ['%s%02d' % (prefix, i) for i in range(base, base+3)]\n\n logging.info('Expanded to %s' % ', '.join(out))\n return out",
"def is_quarter_end(self) -> Index:\n return Index(self.to_series().dt.is_quarter_end)",
"def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end"
]
| [
"0.7338803",
"0.7215223",
"0.7130429",
"0.7078758",
"0.6997591",
"0.690662",
"0.6612257",
"0.6464504",
"0.6396317",
"0.6276966",
"0.6186094",
"0.6084192",
"0.60228586",
"0.60228586",
"0.59833634",
"0.5969586",
"0.5969524",
"0.5858372",
"0.5796026",
"0.57202613",
"0.5689967",
"0.5675721",
"0.56057197",
"0.56031215",
"0.55891424",
"0.55702984",
"0.543084",
"0.5421336",
"0.539858",
"0.5374906"
]
| 0.73292184 | 1 |
Get the date that ends the CURRENT quarter. | def current_quarter_end(start_year:int, start_month:int, logger:lg.Logger = None) -> date:
if logger:
logger.info(F"start year = {start_year}; start month = {start_month}")
end_year, end_month = next_quarter_start(start_year, start_month)
# end date is one day back from the start of the next period
return date(end_year, end_month, 1) - ONE_DAY | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_quarter_end(x: Optional[Date] = None) -> Date:\n return get_quarter_start(x or get_today()) + relativedelta(months=+3, days=-1)",
"def last_quarter(today):\n quarter_date = today - relativedelta(months=1)\n while quarter_date.month % 3 != 0:\n quarter_date = quarter_date - relativedelta(months=1)\n return quarter_date.year, int(quarter_date.month / 3)",
"def is_quarter_end(self) -> Index:\n return Index(self.to_series().dt.is_quarter_end)",
"def get_quarter_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof.replace(month=(asof.month - 1) // 3 * 3 + 1, day=1)",
"def get_quarter(self):\n\n self.quarter_nums = cal_data.calc_quarter(self.active_date[2],\n self.active_date[1])\n self.quarter = cal_data.get_quarter(self.active_date[2],\n self.active_date[1])",
"def get_quarter_end_stream(x: Optional[Date] = None) -> Iterator[Date]:\n ## Get asof date:\n asof = x or get_today()\n\n ## Get the last quarter end:\n e = get_quarter_start(asof) - TimeDelta(days=1)\n\n ## Yield this:\n yield e\n\n ## Forever:\n while True:\n e = get_month_end(e - relativedelta(months=3))\n yield e",
"def prev_quarter_boundaries(now):\n first_of_month = datetime.datetime(now.year, now.month, 1)\n\n # 75 days before the 1st is always in the previous quarter\n date_in_prev_q = first_of_month - datetime.timedelta(days=75)\n\n q_y = date_in_prev_q.year\n q_start_m = int((date_in_prev_q.month-1) / 3)*3 + 1\n q_end_m = q_start_m + 2\n q_end_d = calendar.monthrange(q_y, q_end_m)[1]\n\n s_d = datetime.datetime(q_y, q_start_m, 1)\n e_d = datetime.datetime(q_y, q_end_m, q_end_d)\n\n return s_d, e_d",
"def get_quarter(d, offset=0):\n\n # Get the first day of the following quarter minus one day\n month = (((d.month - 1) / 3 + 1) * 3 + 1) % 12\n # Wrap around the year if necessary\n year = d.year + 1 if month == 1 else d.year\n return (datetime.date(year, month, 1)\n + relativedelta(months=3*offset) # Add quarter offset\n - datetime.timedelta(days=1)) # Subtract one day",
"def _quarter_number(self, dategiven):\n return RetailDate(dategiven).quarter",
"def quarter(self) -> Index:\n warnings.warn(\n \"`quarter` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.quarter)",
"def qToday():\n \n return _qDate.todaysDate().ISO()",
"def quarter_frame(self) -> None:\n pass",
"def quarter_frame(self) -> None:\n pass",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def computed_enddate(self):\n if self.enddate:\n # you need to add a day to enddate if your dates are meant to be inclusive\n offset = datetime.timedelta(days=1 if self.inclusive else 0)\n return (self.enddate + offset)",
"def is_quarter_start(self) -> Index:\n return Index(self.to_series().dt.is_quarter_start)",
"def get_today_end():\n return datetime.combine(datetime.today(), time.max)",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end",
"def end1(self):\n return self.ddmmyyyy(self.rowTime.end)",
"def end_date(self):\n return self.__end_date",
"def test_fourth(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 12, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))",
"def get_end_date(self, milestone):\n\n if milestone.due and milestone.due.date() < date.today():\n return milestone.due.date()\n # else we take yesterday to be the end date point for the x-axis\n return date.today() - timedelta(days=1)",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def get_year_half_end(x: Optional[Date] = None) -> Date:\n return get_year_half_start(x or get_today()) + relativedelta(months=+6, days=-1)",
"def NextMoonQuarter(mq):\n # Skip 6 days past the previous found moon quarter to find the next one.\n # This is less than the minimum possible increment.\n # So far I have seen the interval well contained by the range (6.5, 8.3) days.\n time = mq.time.AddDays(6.0)\n next_mq = SearchMoonQuarter(time)\n # Verify that we found the expected moon quarter.\n if next_mq.quarter != (1 + mq.quarter) % 4:\n raise InternalError()\n return next_mq"
]
| [
"0.8170659",
"0.7075324",
"0.6908958",
"0.6836984",
"0.66845727",
"0.6497959",
"0.64169747",
"0.62825865",
"0.62558955",
"0.6109868",
"0.6003702",
"0.60028976",
"0.60028976",
"0.5731196",
"0.5731196",
"0.57094854",
"0.57011384",
"0.5657141",
"0.5642791",
"0.5642791",
"0.55984735",
"0.55678755",
"0.5563913",
"0.55081457",
"0.54892117",
"0.54786634",
"0.54786634",
"0.54786634",
"0.5462743",
"0.5445202"
]
| 0.81834036 | 0 |
Generate the start and end dates for the quarters in the submitted range. | def generate_quarter_boundaries(start_year:int, start_month:int, num_qtrs:int, logger:lg.Logger = None) -> (date, date):
if logger:
logger.debug(F"start year = {start_year}; start month = {start_month}; num quarters = {num_qtrs}")
for i in range(num_qtrs):
yield date(start_year, start_month, 1), current_quarter_end(start_year, start_month)
start_year, start_month = next_quarter_start(start_year, start_month) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)",
"def test_third(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 7, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 8, 4))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 9, 30))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 7, 1))\n self.assertEqual(end_date, datetime.date(1980, 9, 30))",
"def test_fourth(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 10, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 12, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 10, 1))\n self.assertEqual(end_date, datetime.date(1980, 12, 31))",
"def test_all(self):\n\n # year = 1980 #unused\n date = datetime.date(1980, 1, 1)\n while date < datetime.date(1981, 1, 1):\n if date.month <= 3:\n mindate, maxdate = datetime.date(1980, 1, 1), datetime.date(1980, 3, 31)\n elif date.month <= 6:\n mindate, maxdate = datetime.date(1980, 4, 1), datetime.date(1980, 6, 30)\n elif date.month <= 9:\n mindate, maxdate = datetime.date(1980, 7, 1), datetime.date(1980, 9, 30)\n else:\n mindate, maxdate = datetime.date(1980, 10, 1), datetime.date(1980, 12, 31)\n\n startdate, enddate = get_quarterspan(date)\n self.assertTrue(startdate >= mindate)\n self.assertTrue(startdate <= maxdate)\n self.assertTrue(enddate >= mindate)\n self.assertTrue(enddate <= maxdate)\n\n date += datetime.timedelta(days=1)",
"def prev_quarter_boundaries(now):\n first_of_month = datetime.datetime(now.year, now.month, 1)\n\n # 75 days before the 1st is always in the previous quarter\n date_in_prev_q = first_of_month - datetime.timedelta(days=75)\n\n q_y = date_in_prev_q.year\n q_start_m = int((date_in_prev_q.month-1) / 3)*3 + 1\n q_end_m = q_start_m + 2\n q_end_d = calendar.monthrange(q_y, q_end_m)[1]\n\n s_d = datetime.datetime(q_y, q_start_m, 1)\n e_d = datetime.datetime(q_y, q_end_m, q_end_d)\n\n return s_d, e_d",
"def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)",
"def get_quarter(self):\n\n self.quarter_nums = cal_data.calc_quarter(self.active_date[2],\n self.active_date[1])\n self.quarter = cal_data.get_quarter(self.active_date[2],\n self.active_date[1])",
"def test_first(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 1, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 2, 29))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 3, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))",
"def quarters(self) -> localedata.LocaleDataDict:\n return self._data['quarters']",
"def create_date_list(start_date = start_date, end_date = end_date):",
"def to_stock_dataframe_range(self, start_date=None, end_date=None):\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n if start_date is None:\n start_date = self.dates[0]\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n class_data = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\") and\n type(getattr(self, i)) is pd.DataFrame]\n df = pd.DataFrame()\n for i in class_data:\n df = join_features(df, getattr(self, i), fill_method=FillMethod.FUTURE_KROGH)\n if end_date is \"Last\":\n print(df.ix[start_date:, :])\n return df.ix[start_date:, :]\n return df.ix[start_date:end_date, :]",
"def to_stock_data_range(self, start_date=None, end_date=None):\n # standardize dates\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n\n if start_date is None:\n start_date = self.dates[0]\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date is \"Last\":\n dates = list(self.dates)[list(self.dates).index(start_date):]\n else:\n dates = list(self.dates)[list(self.dates).index(start_date):list(self.dates).index(end_date)]\n\n # find functions to set\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n\n # transfer new data\n new_stock_data = StockData()\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n for i in dataframes:\n if end_date is not \"Last\":\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:end_date])\n else:\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:])\n\n for i in dictionaries:\n new_dict = {}\n for d in dates:\n new_dict[d] = getattr(self, i)[d]\n setattr(new_stock_data, i, new_dict)\n\n new_stock_data.dates = dates\n new_stock_data.str_dates = [str(d)[:USEFUL_TIMESTAMP_CHARS] for d in dates]\n\n return new_stock_data",
"def _phase_range(self, change_dates):\n start_dates = [self.dates[0], *change_dates]\n end_dates_without_last = [\n (\n datetime.strptime(date, self.DATE_FORMAT) - timedelta(days=1)\n ).strftime(self.DATE_FORMAT)\n for date in change_dates\n ]\n end_dates = [*end_dates_without_last, self.dates[-1]]\n return (start_dates, end_dates)",
"def date_range(start, end):\n session = Session(engine)\n \n sel = [func.min(measurement.tobs),\n func.max(measurement.tobs),\n func.avg(measurement.tobs)]\n \n range_data = session.query(*sel).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n \n session.close()\n \n range_x = list(np.ravel(range_data))\n\n return jsonify(range_x)",
"def get_quarterly_report_dates(self, symbol):\n return self.sec_file_ops.get_dates_of_saved_raw_quarterly_financial_statements(symbol)",
"def get_quarter_end_stream(x: Optional[Date] = None) -> Iterator[Date]:\n ## Get asof date:\n asof = x or get_today()\n\n ## Get the last quarter end:\n e = get_quarter_start(asof) - TimeDelta(days=1)\n\n ## Yield this:\n yield e\n\n ## Forever:\n while True:\n e = get_month_end(e - relativedelta(months=3))\n yield e",
"def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date",
"def current_quarter_end(start_year:int, start_month:int, logger:lg.Logger = None) -> date:\n if logger:\n logger.info(F\"start year = {start_year}; start month = {start_month}\")\n end_year, end_month = next_quarter_start(start_year, start_month)\n # end date is one day back from the start of the next period\n return date(end_year, end_month, 1) - ONE_DAY",
"def quandl_stocks(symbol, start_date=(2000, 1, 1), end_date=None):\n\n query_list = ['WIKI' + '/' + symbol + '.' + str(k) for k in range(1, 13)]\n\n start_date = datetime.date(*start_date)\n\n if end_date:\n end_date = datetime.date(*end_date)\n else:\n end_date = datetime.date.today()\n\n return quandl.get(query_list,\n returns='pandas',\n start_date=start_date,\n end_date=end_date,\n collapse='daily',\n order='asc'\n )",
"def _quarter_number(self, dategiven):\n return RetailDate(dategiven).quarter",
"def test_second(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 4, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 4, 1))\n self.assertEqual(end_date, datetime.date(1980, 6, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 5, 4))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 4, 1))\n self.assertEqual(end_date, datetime.date(1980, 6, 30))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 6, 30))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 4, 1))\n self.assertEqual(end_date, datetime.date(1980, 6, 30))",
"def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)",
"def _split_date_range(start, end, intv):\n previous = start\n diff = (end - start) / intv\n for i in range(1, intv):\n current = start + diff * i\n yield (previous, current)\n previous = current\n yield (previous, end)",
"def date_range(start_date, end_date):\n for ordinal in range(start_date.toordinal(), end_date.toordinal() + 1):\n yield datetime.date.fromordinal(ordinal)",
"def get_stock_data(company, start_date_inc, stop_date_inc):\n\n api_key = 'Bo9P_cJnmf5EsQPp1Bdp'\n desired_cols = 'date,close'\n\n# ticker = 'FB'\n# start_date_inc = '20170801'\n# end_date_inc = '20170831'\n\n # format and send the request\n payload = {\n 'date.gte': start_date_inc,\n 'date.lte': stop_date_inc,\n 'ticker': company,\n 'qopts.columns': desired_cols,\n 'api_key': api_key\n }\n meta_url = r'https://www.quandl.com/api/v3/datatables/WIKI/PRICES'\n r = requests.get(meta_url, params=payload)\n\n # convert to a pandas dataframe\n df = pd.DataFrame(r.json()['datatable']['data'])\n if not df.empty:\n df.columns = ['date', 'price']\n df['date'] = pd.to_datetime(df['date'])\n\n return df",
"def test_range():\n begin_date = datetime.datetime(2000, 1, 1)\n end_date = datetime.datetime.today()\n\n if os.path.isfile(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\"):\n dates_available = pickle.load(open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"r\"))\n else:\n prices_available = yahoo.webload_symbol_price(\"SPY\", begin_date, end_date)\n dates_available = set(timestamp.to_pydatetime() for timestamp in prices_available.index.tolist())\n pickle.dump(dates_available, open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"w\"))\n\n dates_expected = set([day for day in itertools.takewhile(\n lambda d: d <= end_date,\n CALENDAR.every_nth_between(begin_date, end_date, 1)\n )])\n\n dates_misaligned = dates_available.symmetric_difference(dates_expected)\n\n assert len(dates_misaligned) == 0",
"def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab",
"def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals",
"def construct_quar_index_mapping(df):\n quarters = (df[\"year\"].astype(\"str\") + \" q\" + df[\"quarter\"].astype(\"str\")).unique()\n quarter_to_index = {}\n for i in range(df.shape[0]):\n row = df.iloc[i, :]\n quarter = row[\"year\"].astype(\"str\") + \" q\" + row[\"quarter\"].astype(\"str\")\n quarter_to_index[quarter] = quarter_to_index.get(quarter, set())\n quarter_to_index[quarter].add(i)\n return quarter_to_index",
"def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")"
]
| [
"0.6163751",
"0.61617553",
"0.6156018",
"0.6087009",
"0.59538853",
"0.5751105",
"0.5747524",
"0.56566805",
"0.56298023",
"0.5623435",
"0.56176776",
"0.56133264",
"0.5609766",
"0.55975634",
"0.55484426",
"0.5538029",
"0.5492789",
"0.5463719",
"0.54399437",
"0.541501",
"0.5380435",
"0.5361859",
"0.5345183",
"0.52653164",
"0.5258237",
"0.5247482",
"0.5243801",
"0.52394754",
"0.5230193",
"0.52258277"
]
| 0.73519164 | 0 |
Sum values over given axis | def sum(self, axis: int = 0):
self.values = self.values.sum(axis=axis)
self.layers = [None]
return self.copy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum(tensor, axis=None):\n raise NotImplementedError",
"def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)",
"def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_PolyMesh(self.params)",
"def val_sum(self, axis = None):\n f = self\n if axis is None:\n axis = - np.arange(f.val_ndim) - 1\n axis = tuple(axis)\n else:\n ## make `axis` into a list\n try:\n axis = list(tuple(axis))\n except TypeError:\n axis = (axis, )\n \n ## force the `axis` to be positive\n axis = [i if i >= 0 else i + f.val_ndim for i in axis]\n assert all(0 <= i < f.val_ndim for i in axis )\n \n ##\n axis = np.array(axis) + f.batch_ndim + f.var_ndim\n axis = tuple(axis)\n \n #print(\"axis =\", axis)\n return Poly(\n coef = nptf.reduce_sum(\n f.coef, \n axis = axis\n ),\n batch_ndim = f.batch_ndim,\n var_ndim = f.var_ndim\n )",
"def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_TaylorGrid(self.params)",
"def sum(self, axis=None):\n if axis is None:\n return numpy.ma.sum(self.data)\n\n new_data = numpy.ma.sum(self.data, axis=axis)\n remaining_axes = numpy.setdiff1d(range(self.ndim), axis)\n remaining_edges = [self.bset.edges[ax] for ax in remaining_axes]\n\n # This is kind of a hack that breaks good OO design, but is there\n # a better solution?\n if len(remaining_edges) == 2:\n return IntensityMap2D(new_data, (remaining_edges,))\n else:\n return IntensityMap(new_data, (remaining_edges,))",
"def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def sumAxisPoints(self, var):\n varID = var.id\n var = cdutil.averager(var, axis=\"(%s)\" % self.axis.id, weight='equal',\n action='sum') \n var.id = varID\n return var",
"def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\r\n\r\n out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def my_sum(a, axis, count):\n if a.shape[axis] == count:\n return a.sum(axis)\n elif a.shape[axis] == 1:\n return count * a.sum(axis)\n else:\n raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))",
"def ordersum(self, values, axis=None):\n values = np.asarray(values)\n if axis is None:\n for axis in range(values.ndim):\n if values.shape[axis] == len(self):\n break\n else:\n raise ValueError('Cannot find axis of length {} in the given values!'.format(len(self)))\n\n values = np.moveaxis(values, axis, 0)\n output = np.zeros((self.max_order - self.min_order + 1, ) + values.shape[1:], dtype=values.dtype)\n for idx, order in enumerate(self.orders):\n output[idx] = np.sum(values[self(order, -order):self(order, order) + 1], axis=0)\n return np.moveaxis(output, 0, axis)",
"def rwb_psum(x, axis_name):\n return _rwb_psum_fwd(x, axis_name)[0]",
"def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)",
"def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)",
"def reduce_sum_d(x, y, axis=None, keepdims=None, kernel_name=\"reduce_sum_d\"):\n\n dtype = x[\"dtype\"]\n dtype_lower = dtype.lower()\n check_list = (\"float16\", \"float32\")\n check_dtype(dtype_lower, check_list, param_name=\"x\")\n\n with te.op.compute():\n shape = x[\"shape\"]\n shape_range = x[\"range\"]\n\n axes = []\n shape_len = len(shape)\n if not axis:\n for i, _ in enumerate(shape):\n axes.append(i)\n else:\n axes = list(axis)\n axes = cce_util.axis_check(shape_len, axes)\n\n shape_new, shape_range_new, axes_new, fused_rel_dic = \\\n fused_reduce_axis(shape, shape_range, axes)\n\n add_compile_info(\"fused_rel_dic\", fused_rel_dic)\n x[\"shape\"] = shape_new\n x[\"range\"] = shape_range_new\n shape_var_new = variable_shape([x])[0]\n\n data_input = tvm.placeholder(shape_var_new, name=\"data_input\",\n dtype=dtype_lower)\n res = reduce_sum_d_compute(data_input, y, axes_new, keepdims)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n # build\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.dynamic.build(sch, config)",
"def _sum_on_axis(self, M, undirected=True):\n\n if undirected:\n M = (M + M.T).astype('bool')\n colsum = M.sum(axis=0) # , dtype=np.int64)\n rowsum = M.sum(axis=1) # , dtype=np.int64) # already np.int64\n return rowsum.T, colsum",
"def rowsums (self):\n return self.values.sum (axis=0)",
"def colsums (self):\n return self.values.sum (axis=1)",
"def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)",
"def assignment_by_sum(x, values, indices, axis=0):\n x_new = copy(x)\n values = array(values)\n use_vectorization = hasattr(indices, \"__len__\") and len(indices) < ndim(x)\n if _is_boolean(indices):\n x_new[indices] += values\n return x_new\n zip_indices = _is_iterable(indices) and _is_iterable(indices[0])\n if zip_indices:\n indices = list(zip(*indices))\n if not use_vectorization:\n len_indices = len(indices) if _is_iterable(indices) else 1\n len_values = len(values) if _is_iterable(values) else 1\n if len_values > 1 and len_values != len_indices:\n raise ValueError(\"Either one value or as many values as indices\")\n x_new[indices] += values\n else:\n indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))\n x_new[indices] += values\n return x_new",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def _sum_grad(x, axis, dout):\n # input_shape = [2, 3] axis = [1]\n input_shape = shape_op(x)\n # output_shape_kept_dims = [2, 1]\n output_shape_kept_dims = reduced_shape(input_shape, axis)\n # tile_scaling = [1, 3]\n tile_scaling = tuple_div(input_shape, output_shape_kept_dims)\n grad = reshape(dout, output_shape_kept_dims)\n return tile(grad, tile_scaling)",
"def broadcast_labeled_sum(x, labels, nlabels, axis=0, roll=None):\n if axis != 0:\n x = np.rollaxis(x, axis, 0)\n\n output = _broadcast_labeled_sum(x, labels, nlabels)\n\n if roll is not None:\n output = np.rollaxis(output, 0, roll)\n return output",
"def sum_to_0d(x):\n assert_equal(x.ndim, 1)\n return np.squeeze(np.sum(x, keepdims=True))",
"def Sum2d(a):\n return(np.sum(np.sum(a,-1),-1))",
"def kahan_sum(a, axis=0):\n s = np.zeros(a.shape[: axis] + a.shape[axis + 1:])\n c = np.zeros(s.shape)\n for i in range(a.shape[axis]):\n # https://stackoverflow.com/a/42817610/353337\n y = a[(slice(None),) * axis + (i,)] - c\n t = s + y\n c = (t - s) - y\n s = t.copy()\n return s",
"def integral(self, axis=None):\n if axis is None:\n return (self * self.bset.area).sum()\n\n try:\n measure = numpy.prod([self.bset.binwidths[ax] for ax in axis],\n axis=0)\n except TypeError:\n measure = self.bset.binwidths[axis]\n (self * measure).sum(axis=axis)",
"def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]",
"def sum(self, elim=None, out=None):\n if (elim is None):\n elim = self.v\n return self.__opReduce2(self.v & elim,np.sum, out=out)"
]
| [
"0.7622369",
"0.7435083",
"0.742321",
"0.73752266",
"0.73362845",
"0.71223015",
"0.69976205",
"0.69614166",
"0.6825869",
"0.6780794",
"0.67299175",
"0.66423965",
"0.6566993",
"0.6565615",
"0.6412074",
"0.63147205",
"0.6298999",
"0.6296889",
"0.6282736",
"0.61980706",
"0.61980224",
"0.6194785",
"0.6141181",
"0.6111568",
"0.6099031",
"0.6073319",
"0.6040165",
"0.6015121",
"0.6013401",
"0.59944385"
]
| 0.8020687 | 0 |
Config file for main trainer factory (Scheduler to train multiple models in a row i.e. for a specified set of horizons) To ensure that we store the model settings only in one place, the configs Trainer will take the default values given in the model configs file (that must be specified in file_name_model_configs arguments in trainer configs yaml file). Attributes that are also specified in the trainer configs will replace the ones in the model configs | def __init__(self, config_file_name: str):
configs_trainer = io.read_yaml(PATH_CONFIG, config_file_name)
configs_model = configs_trainer[configs_trainer['model']]
# Add trainer configs attributes
horizons = configs_trainer['forecasting_horizons_trainer']
self.forecasting_horizons_trainer = range(horizons['smallest_horizon'], horizons['largest_horizon'] + 1)
for name, value in configs_trainer.items():
if name in ['train_date_when_predicing_min', 'train_date_to_predict_max']:
self.__setattr__(name, value)
# Initiate individual model configs object (replace attributes that were specified in configs_model).
configs = io.read_yaml(PATH_CONFIG, configs_trainer['file_name_model_configs'])
configs = configs[configs_trainer['model']]
Logger.info('Loaded model configs from file',
os.path.join(PATH_CONFIG, configs_trainer['file_name_model_configs']), self.__class__.__name__)
configs.update(configs_model)
def update_train_scope(attr, limit, fct):
if configs.get(attr) is not None and limit in vars(self):
date = fct(configs.get(attr), self.__getattribute__(limit))
configs.update({attr: date})
update_train_scope('train_start', 'train_date_when_predicting_min', max)
update_train_scope('train_end', 'train_date_to_predict_max', min)
self.configs_individual_model = Configs(configs={k: v for k, v in configs.items()
if k in Configs.__dict__.keys()})
# Update maximum date to predict train to ensure that we don't overlap with the evaluation period
if self.configs_individual_model.evaluation_start is not None and self.train_date_to_predict_max is not None:
max_date_to_predict = substract_period(
self.configs_individual_model.evaluation_start, 1,
highest_period=52 if self.configs_individual_model.is_weekly_forecast else 12
)
self.train_date_to_predict_max = min(self.train_date_to_predict_max, max_date_to_predict)
Logger.info('Loaded trainer configs from file',
os.path.join(PATH_CONFIG, config_file_name), self.__class__.__name__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, config_file_name: str=None, configs: dict=None, is_sell_in_model: bool=None):\n\n if config_file_name is not None:\n configs = io.read_yaml(PATH_CONFIG, config_file_name)\n\n if is_sell_in_model is None:\n self.is_sell_in_model = configs['model'] == 'sell_in'\n\n configs = configs['sell_in' if self.is_sell_in_model else 'sell_out']\n Logger.info('Loaded configs from file', os.path.join(PATH_CONFIG, config_file_name), self.__class__.__name__)\n\n for name, value in configs.items():\n if not name.endswith('_parameters'):\n self.__setattr__(name, value)\n\n # Add model parameters from configs file\n model_parameters = configs.get('_'.join(['lightgbm' if self.use_light_regressor else 'xgboost', 'parameters']))\n self.__setattr__('model_parameters', model_parameters)\n\n # Add features controller object to configs (ability to switch on & off features in model trainer)\n if self.features is not None:\n self.features = FeatureController(self.is_sell_in_model, self.features)\n\n if self.forecasting_horizons:\n self.update_train_test_windows()",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)",
"def update_config_external_template(config):\r\n\r\n # best parameters from the paper\r\n config['train_batch_size'] = 16384\r\n config['lr'] = 3e-4\r\n config['sgd_minibatch_size'] = 4096\r\n config['num_sgd_iter'] = 4\r\n config['rollout_fragment_length'] = 100\r\n\r\n # run ID to communicate to the http trainer\r\n config['run_uid'] = '_setme'\r\n\r\n # stable baselines accepts full episodes\r\n config[\"batch_mode\"] = \"complete_episodes\"\r\n\r\n # stable baselines server address\r\n config[\"http_remote_port\"] = \"http://127.0.0.1:50001\"\r\n\r\n # no gpus, stable baselines might use them\r\n config['num_gpus'] = 0\r\n\r\n # set trainer class\r\n config['_trainer'] = \"External\"\r\n config['_policy'] = \"PPO\"\r\n\r\n # tuned\r\n config['num_envs_per_worker'] = 10\r\n config['num_workers'] = 3\r\n return config",
"def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)",
"def _config_set(self):\n p = self._params\n self._config = tf.estimator.RunConfig(save_checkpoints_steps = p.save_checkpoints_steps,\n keep_checkpoint_max = p.keep_checkpoint_max,\n save_summary_steps = p.save_summary_steps\n )",
"def train_config(parser, input_argv=None):\n\n data(parser)\n token(parser)\n model(parser)\n if nsml.IS_ON_NSML:\n nsml_for_internal(parser)\n trainer(parser)\n\n # Use from config file\n base_config(parser)\n\n config = parser.parse_args(input_argv, namespace=NestedNamespace())\n\n use_base_config = config.base_config\n # use pre-defined base_config\n if use_base_config:\n base_config_path = os.path.join(\"base_config\", config.base_config)\n base_config_path = utils.add_config_extension(base_config_path)\n defined_config = utils.read_config()\n # config.overwrite(defined_config)\n\n config = NestedNamespace()\n config.load_from_json(defined_config)\n\n # overwrite input argument when base_config and arguments are provided.\n # (eg. --base_config bidaf --learning_rate 2) -> set bidaf.json then overwrite learning_rate 2)\n input_args = get_input_arguments(parser, input_argv)\n for k, v in input_args.items():\n setattr(config, k, v)\n\n if not use_base_config:\n config = optimize_config(config)\n\n set_gpu_env(config)\n set_batch_size(config)\n return config",
"def get_configs_from_multiple_files(model_config_path=\"\",\n train_config_path=\"\",\n train_input_config_path=\"\",\n eval_config_path=\"\",\n eval_input_config_path=\"\",\n graph_rewriter_config_path=\"\"):\n configs = {}\n if model_config_path:\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(model_config_path, \"r\") as f:\n text_format.Merge(f.read(), model_config)\n configs[\"model\"] = model_config\n\n if train_config_path:\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(train_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_config)\n configs[\"train_config\"] = train_config\n\n if train_input_config_path:\n train_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(train_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_input_config)\n configs[\"train_input_config\"] = train_input_config\n\n if eval_config_path:\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile(eval_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_config)\n configs[\"eval_config\"] = eval_config\n\n if eval_input_config_path:\n eval_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(eval_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_input_config)\n configs[\"eval_input_configs\"] = [eval_input_config]\n\n if graph_rewriter_config_path:\n configs[\"graph_rewriter_config\"] = get_graph_rewriter_config_from_file(\n graph_rewriter_config_path)\n\n return configs",
"def config(self):\n\n train_dataset = RandomClassificationDataset()\n eval_dataset = RandomClassificationDataset()\n\n return {\n 'model':\n SimpleModel(),\n 'train_dataloader':\n DataLoader(\n dataset=train_dataset,\n batch_size=4,\n sampler=dist.get_sampler(train_dataset),\n ),\n 'eval_dataloader':\n DataLoader(\n dataset=eval_dataset,\n sampler=dist.get_sampler(eval_dataset),\n ),\n 'max_duration':\n '2ep',\n 'autoresume':\n True,\n 'loggers': [],\n }",
"def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data",
"def base_training_config():\n return {\n # Name of the gradient optimizer. See ops/training.py.\n \"optimizer\": \"adam\",\n\n # Optimizer-specific parameters.\n \"momentum\": 0.9, # For momentum optimizer.\n \"adam_beta1\": 0.9, # For adam optimizer.\n \"adam_beta2\": 0.999, # For adam optimizer.\n \"adam_epsilon\": 1e-08, # For adam optimizer.\n\n # Initial learning rate.\n \"learning_rate\": 0.0008,\n\n # If > 0, the learning rate decay factor.\n \"learning_rate_decay_factor\": 0.5,\n\n # The number of steps before the learning rate decays by\n # learning_rate_decay_factor.\n \"learning_rate_decay_steps\": 400000,\n\n # If True, decay the learning rate at discrete intervals.\n \"learning_rate_decay_staircase\": False,\n\n # The minimum value to decay the learning rate to.\n \"learning_rate_decay_floor\": 0,\n\n # If > 0, the number of training steps.\n \"number_of_steps\": 0,\n\n # If > 0, clip gradients to this value.\n \"clip_gradient_norm\": 5.0,\n\n # How often (in seconds) to save model checkpoints.\n \"save_model_secs\": 60 * 10,\n\n # How often (in hours) checkpoints should be kept.\n \"keep_checkpoint_every_n_hours\": 2,\n\n # How often (in seconds) to save model summaries.\n \"save_summaries_secs\": 60 * 10,\n\n # How many model checkpoints to keep.\n \"max_checkpoints_to_keep\": 5,\n\n # Startup delay between worker replicas and chief. Only applies for async\n # multi-worker training.\n \"startup_delay_steps\": 100,\n }",
"def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config",
"def default_config(cls):\n\n config = {\n \"checkpoint_path\": \"\", # path to model checkpoint\n \"separated_audio_folder\": \"\" # path to folder where to save the separated audio tracks.\n }\n return config",
"def run(self, *args, **kwargs) -> None:\n loop = tqdm(self.configs, desc='Configurations')\n for cfg in loop:\n loop.set_postfix_str(cfg.experiment_cfg['name'])\n for i in range(cfg.num_models):\n filename = None\n run_id = None\n if cfg.filenames is not None:\n if isinstance(cfg.filenames, str):\n filename = cfg.filenames\n else:\n filename = cfg.filenames[i]\n elif cfg.run_ids is not None:\n run_id = cfg.run_ids[i]\n\n run_cfg = modelgen_cfg_to_runner_cfg(cfg, run_id=run_id, filename=filename)\n runner = Runner(run_cfg, persist_metadata=cfg.experiment_cfg)\n runner.run()\n\n # clear up memory between runs\n torch.cuda.empty_cache()",
"def build_trainer_config(restore_state=None, train_policies=None, config=None):\n obs_space = env_cls(env_config).observation_space\n act_space = env_cls(env_config).action_space\n\n agent_config = (PPOTFPolicy, obs_space, act_space, {\n \"model\": {\n \"use_lstm\": True,\n \"fcnet_hiddens\": [config['fc_units'], config['fc_units']],\n \"lstm_cell_size\": config['lstm_units'],\n },\n \"framework\": \"tfe\",\n })\n\n # N_POLICIES = 2\n policies_keys = ['victim', 'adversary']\n\n #policies = {policy_template % i: agent_config for i in range(N_POLICIES)}\n policies = {name: agent_config for name in policies_keys}\n\n def select_policy(agent_id):\n assert agent_id in [\"player1\", \"player2\"]\n agent_ids = [\"player1\", \"player2\"]\n \n # selecting the corresponding policy (only for 2 policies)\n return policies_keys[agent_ids.index(agent_id)]\n\n # randomly choosing an opponent\n # return np.random.choice(list(policies.keys()))\n \n if train_policies is None:\n train_policies = list(policies.keys())\n \n for k in train_policies:\n assert k in policies.keys()\n\n config = {\n \"env\": env_cls,\n # \"gamma\": 0.9,\n \"num_workers\": 0,\n # \"num_envs_per_worker\": 10,\n # \"rollout_fragment_length\": 10,\n \"train_batch_size\": config['train_batch_size'],\n \"multiagent\": {\n \"policies_to_train\": train_policies,\n \"policies\": policies,\n \"policy_mapping_fn\": select_policy,\n },\n \"framework\": \"tfe\",\n #\"train_batch_size\": 512\n #\"num_cpus_per_worker\": 2\n }\n return config",
"def model_config(**overrides):\n config = base_model_config()\n _override(config, overrides)\n return tf.contrib.training.HParams(**config)",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)",
"def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(args.model_zoo))\n cfg.DATASETS.TRAIN = (args.train_dataset, )\n cfg.DATASETS.TEST = (args.test_dataset, )\n cfg.DATALOADER.NUM_WORKERS = args.num_workers\n cfg.OUTPUT_DIR = args.output_dir\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n cfg.image_w = args.size[0]\n cfg.image_h = args.size[1]\n\n cfg.MODEL.WEIGHTS = args.model_zoo_weights\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.roi_thresh # set a custom testing threshold\n\n default_setup(cfg, args)\n return cfg",
"def build_trainer_config(config):\r\n # determining environment parameters\r\n env_fcn = config['_env_fcn']\r\n env = env_fcn(config['_env'])\r\n obs_space, act_space, n_policies = env.observation_space, env.action_space, env.n_policies\r\n env.close()\r\n\r\n policies = config['_get_policies'](config=config, n_policies=n_policies, obs_space=obs_space, act_space=act_space)\r\n select_policy = config['_select_policy']\r\n\r\n config = deepcopy(config)\r\n config['_all_policies'] = sorted(policies.keys())\r\n\r\n if config['_update_withpolicies'] and '_iteration' in config:\r\n config = config['_update_withpolicies'](config, iteration=config['_iteration'])\r\n\r\n config1 = deepcopy(config)\r\n config1['multiagent'] = {}\r\n config1['multiagent']['policies'] = policies\r\n\r\n for k in config['_train_policies']:\r\n assert k in policies.keys(), f\"Unknown policy {k} [range {policies.keys()}]\"\r\n\r\n rl_config = {\r\n \"env\": config['_env_name_rllib'],\r\n \"env_config\": config['_env'],\r\n \"multiagent\": {\r\n \"policies_to_train\": config['_train_policies'],\r\n \"policies\": policies,\r\n \"policy_mapping_fn\": partial(select_policy, config=config1),\r\n },\r\n 'tf_session_args': {'intra_op_parallelism_threads': config['_num_workers_tf'],\r\n 'inter_op_parallelism_threads': config['_num_workers_tf'],\r\n 'gpu_options': {'allow_growth': True},\r\n 'log_device_placement': True,\r\n 'device_count': {'CPU': config['_num_workers_tf']},\r\n 'allow_soft_placement': True\r\n },\r\n \"local_tf_session_args\": {\r\n \"intra_op_parallelism_threads\": config['_num_workers_tf'],\r\n \"inter_op_parallelism_threads\": config['_num_workers_tf'],\r\n },\r\n }\r\n\r\n # filling in the rest of variables\r\n for k, v in config.items():\r\n if k.startswith('_'): continue\r\n rl_config[k] = v\r\n\r\n if config.get('_verbose', True):\r\n print(\"Config:\")\r\n print(pretty_print(rl_config))\r\n\r\n if config['_trainer'] == 'External' and '_tmp_dir' in config:\r\n rl_config['tmp_dir'] = config['_tmp_dir']\r\n \r\n for key, val in rl_config.items():\r\n if isinstance(val, Domain):\r\n sampled_val = val.sample()\r\n rl_config[key] = sampled_val\r\n logging.warning(f\"Trainer got a ray.tune.sample for parameter {key}: {type(val)}({val}). Replacing it with a sampled value {sampled_val}\")\r\n\r\n return rl_config",
"def init_config(self):\n if self.is_client():\n return\n\n node_name = \"valnode%d\" % self.my_id\n\n # Read the keys\n keys = []\n with open(\"/home/martijn/stellar-core/keys.txt\", \"r\") as keys_file:\n for line in keys_file.readlines():\n line = line.strip()\n seed, pub_key = line.split(\" \")\n keys.append((seed, pub_key))\n\n # Make the validators info\n k = int(os.getenv('QUORUM', \"11\"))\n full_list = list(range(self.num_validators))\n quorum = random.sample(full_list, min(k, len(full_list)))\n\n # Make the validators info\n validators_string = \"\"\n for validator_index in quorum:\n if validator_index + 1 == self.my_id:\n continue\n validator_host, _ = self.experiment.get_peer_ip_port_by_id(validator_index + 1)\n validators_string += \"\"\"[[VALIDATORS]]\nNAME=\"valnode%d\"\nHOME_DOMAIN=\"dev\"\nPUBLIC_KEY=\"%s\"\nADDRESS=\"%s:%d\"\n\n\"\"\" % (validator_index + 1, keys[validator_index][1], validator_host, 14000 + validator_index + 1)\n\n with open(\"/home/martijn/stellar-core/stellar-core-template.cfg\", \"r\") as template_file:\n template_content = template_file.read()\n\n template_content = template_content.replace(\"<HTTP_PORT>\", str(11000 + self.my_id))\n template_content = template_content.replace(\"<NODE_SEED>\", keys[self.my_id - 1][0])\n template_content = template_content.replace(\"<NODE_NAME>\", node_name)\n template_content = template_content.replace(\"<DB_NAME>\", \"stellar_%d_db\" % self.my_id)\n template_content = template_content.replace(\"<PEER_PORT>\", str(14000 + self.my_id))\n template_content = template_content.replace(\"<VALIDATORS>\", validators_string)\n\n with open(\"stellar-core.cfg\", \"w\") as config_file:\n config_file.write(template_content)",
"def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg",
"def cook_config(ext_config_filename):\n mc = base_model_config()\n with open(ext_config_filename, \"r\") as fp:\n ext_mc = edict(json.load(fp, encoding=\"utf8\"))\n for s in ext_mc.keys():\n mc[s] = ext_mc[s]\n # mc.ANCHOR_BOX = set_anchors(mc)\n # print(np.max(np.square(np.array(set_anchors_testing(mc)) - np.array(set_anchors(mc)))))\n # mc.ANCHORS = len(mc.ANCHOR_BOX)\n # H, W, C = _get_output_shape(mc)\n # mc.MODEL_OUTPUT_SHAPE = [H, W, mc.ANCHOR_PER_GRID]\n return mc",
"def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)",
"def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )",
"def __init__(self, model_type, model_cfg, training_cfg):\n super().__init__()\n self.save_hyperparameters()\n\n self.model_cfg = model_cfg\n self.training_cfg = training_cfg\n \n if model_type == \"ConvLSTM\":\n self.model = Conv_LSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"AutoencLSTM\":\n self.model = AutoencLSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"ConvTransformer\":\n self.model = ENS_Conv_Transformer(num_hidden=self.model_cfg[\"num_hidden\"],\n output_dim=self.model_cfg[\"output_channels\"],\n depth=self.model_cfg[\"depth\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n num_conv_layers=self.model_cfg[\"num_conv_layers\"],\n kernel_size=self.model_cfg[\"kernel_size\"],\n img_width=self.model_cfg[\"img_width\"],\n non_pred_channels=self.model_cfg[\"non_pred_channels\"],\n num_layers_query_feat=self.model_cfg[\"num_layers_query_feat\"],\n in_channels=self.model_cfg[\"in_channels\"],\n baseline=self.training_cfg[\"baseline\"])\n self.baseline = self.training_cfg[\"baseline\"]\n self.future_training = self.training_cfg[\"future_training\"]\n self.learning_rate = self.training_cfg[\"start_learn_rate\"]\n self.training_loss = get_loss_from_name(self.training_cfg[\"training_loss\"])\n self.test_loss = get_loss_from_name(self.training_cfg[\"test_loss\"])",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict",
"def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config"
]
| [
"0.6581065",
"0.65535516",
"0.64014393",
"0.6348182",
"0.6270936",
"0.6252991",
"0.6247476",
"0.62378055",
"0.62012976",
"0.6048395",
"0.59550464",
"0.594158",
"0.59358466",
"0.5920022",
"0.5893728",
"0.5885183",
"0.58825177",
"0.58792585",
"0.58395",
"0.5823655",
"0.5821777",
"0.58170974",
"0.57939774",
"0.57821196",
"0.5780668",
"0.57729834",
"0.5761243",
"0.5760356",
"0.57545877",
"0.5731129"
]
| 0.7245064 | 0 |
Looks in build_dir for log_file in a folder that also includes the junit file. | def find_log_junit(build_dir, junit, log_file):
tmps = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir)
if '/tmp-node' in f.filename]
for folder in tmps:
filenames = [f.filename for f in view_base.gcs_ls(folder)]
if folder + junit in filenames:
path = folder + log_file
if path in filenames:
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pytest_logger_logsdir(self, config):",
"def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)",
"def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False",
"def test_missing_dir_in_custom_log_path(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"another_dir\" / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher",
"def test_custom_log_path_points_at_file(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher",
"def test_same_logging_file_custom_log_path(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n tasks = run_n_simple_tasks(5)\n\n for task in tasks:\n log_path = UsedLogPath(task)\n assert log_path == log_path_matcher",
"def test_logs(self):\n # Purge all logs\n log_dir = self.test_config['LOG_DIR']\n pattern = re.compile('^nginx-access-ui.log-(?P<day_of_log>\\d{8})(\\.gz)?$')\n logs = [f for f in os.listdir(log_dir) if re.search(pattern, f)]\n map(os.remove, logs)\n\n # Try to make report without logs\n self.generate_report()\n self.assertTrue(self.check_in_log(\"Not found logs in directory {}\".format(self.test_config['LOG_DIR'])))",
"def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())",
"def get_log_folder(cls, test_suite_name):\n if not test_suite_name:\n test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]\n sdk_path = cls.get_sdk_path()\n log_folder = os.path.join(sdk_path, \"TEST_LOGS\",\n test_suite_name +\n time.strftime(\"_%m%d_%H_%M_%S\", time.localtime(LOG_FOLDER_TIMESTAMP)))\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n return log_folder",
"def test_different_custom_logging_file(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n task_creator = lambda: generate_root_task(task_class=TestTask, x=\"Test\")\n\n log_path_1 = use_specific_log_file(task_creator, temp_dir, \"first\")\n log_path_2 = use_specific_log_file(task_creator, temp_dir, \"second\")\n\n for log_path in [log_path_1, log_path_2]:\n assert log_path.exists()\n with open(log_path, \"r\") as f:\n log_content = f.read()\n assert f\"Logging: Test\" in log_content",
"def test_preexisting_custom_log_file(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n file_content = \"This existing file has content.\"\n with open(custom_log_path, \"a\") as f:\n f.write(file_content)\n\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher\n\n with open(custom_log_path, \"r\") as f:\n log_content = f.read()\n assert file_content in log_content",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def kstest_logdir(tmpdir, test):\n logfile = test[\"logfile\"]\n for e in logfile.split(os.path.sep):\n if e.startswith(\"kstest-\"):\n return os.path.join(tmpdir, e)\n\n raise RuntimeError(f\"No kstest-* directory found in {logfile}\")",
"def getLogPath(self, folder):\n path = join(self.folder,'experimentLog_0001.txt')\n for f_path in os.listdir(folder):\n if re.search('experimentLog_[0-9]*.txt', f_path):\n path = join(self.folder,f_path)\n break\n\n return path",
"def pytest_logger_logdirlink(self, config):",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def test_passing_env(self):\n\n log_file = \"test.log\"\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n if os.path.exists(whole_log_file):\n os.remove(whole_log_file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_file\n\n logger = pgo_logger.get_logger()\n\n assert logger is not None\n\n logger.info(\"test\")\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True",
"def test_config_system_logger_fpath(get_config, default_config):\n cfg = get_config(SystemConfig, default_config('sys'))\n real_root = os.path.abspath(os.path.dirname(__file__))\n logger = cfg.logger(name='test', level=logging.ERROR)\n\n assert not cfg.DEBUG, 'sudden debug'\n assert logger.level == logging.ERROR, f\"wrong logging level: {logger}\"\n for handler in logger.handlers:\n if handler.name == 'file':\n file_path = os.path.join(real_root, 'res', 'messages.log')\n elif handler.name == 'errors':\n file_path = os.path.join(real_root, 'res', 'errors.log')\n else:\n continue\n assert getattr(handler, 'baseFilename') == file_path, f'wrong file path passed to logger handler'",
"def test_file_logger_all_values_parsed(self, mkdir):\n expected_file_logger = self.__create_file_logger(\n filename=\"/folder/log_file1\",\n format=\"('%(asctime)s [%(threadName)18s][%(levelname)8s] %(message)s')\",\n max_bytes=1024,\n backup_count=1,\n min_level=logging.INFO,\n max_level=logging.ERROR,\n )\n\n # parse config and get first logger\n parsed_config = self._get_parsed_config(\"file_loggers_config.yml\")\n parsed_file_logger = parsed_config.loggers[0]\n\n # make sure file was opened\n mkdir.assert_called_with(\"/folder\")\n\n result, msg = self.__compare_file_loggers(\n expected_file_logger, parsed_file_logger\n )\n self.assertTrue(\n result,\n msg=f\"Full config is not as expected, following comparison failed: {msg}\",\n )",
"def logs_directory(self):",
"def test_custom_log_path_points_at_dir(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n\n with pytest.raises(IsADirectoryError):\n run_n_simple_tasks(1)",
"def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname",
"def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)",
"def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file",
"def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)",
"def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())",
"def find_log_files(all_logs, log_file):\n log_files = []\n for folder in all_logs.itervalues():\n for log in folder:\n if log_file in log:\n log_files.append(log)\n\n return log_files",
"def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)",
"def test_download_build_log_file_log_not_file(self, mock_test, mock_os):\n from mod_test.controllers import (TestNotFoundException,\n download_build_log_file)\n\n mock_os.path.isfile.side_effect = TestNotFoundException('msg')\n\n with self.assertRaises(TestNotFoundException):\n download_build_log_file('1')\n\n mock_test.query.filter.assert_called_once()\n mock_os.path.isfile.assert_called_once()",
"def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)"
]
| [
"0.66856474",
"0.6602517",
"0.63428384",
"0.6296752",
"0.62367845",
"0.62194216",
"0.6125659",
"0.61027193",
"0.60568506",
"0.60384834",
"0.5944967",
"0.58859736",
"0.5884929",
"0.58814096",
"0.5857451",
"0.5755718",
"0.57389444",
"0.5735726",
"0.5714073",
"0.57060564",
"0.562928",
"0.56234455",
"0.5622455",
"0.55970496",
"0.5588696",
"0.5542629",
"0.55414987",
"0.5499667",
"0.54849416",
"0.5480021"
]
| 0.78429896 | 0 |
Returns list of files named log_file from values in all_logs | def find_log_files(all_logs, log_file):
log_files = []
for folder in all_logs.itervalues():
for log in folder:
if log_file in log:
log_files.append(log)
return log_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename",
"def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def get_access_logs(file_dir=log_dir):\n \n file_list = []\n for myfile in glob.glob1(file_dir, 'access_log*'):\n file_list.append('%s/%s' % (file_dir, myfile))\n# print file_list\n return file_list",
"def get_all_logs(directory, artifacts):\n log_files = {}\n if artifacts:\n dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)\n if f.is_dir]\n else:\n dirs = [directory]\n for d in dirs:\n log_files[d] = []\n for f in view_base.gcs_ls(d):\n log_name = regex.log_re.search(f.filename)\n if log_name:\n log_files[d].append(f.filename)\n return log_files",
"def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))",
"def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)",
"def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def FindLogFiles(base_dir):\n logcat_filter = re.compile(r'^logcat_(\\S+)_(\\d+)$')\n # list of tuples (<device_id>, <seq num>, <full file path>)\n filtered_list = []\n for cur_file in os.listdir(base_dir):\n matcher = logcat_filter.match(cur_file)\n if matcher:\n filtered_list += [(matcher.group(1), int(matcher.group(2)),\n os.path.join(base_dir, cur_file))]\n filtered_list.sort()\n file_map = {}\n for device_id, _, cur_file in filtered_list:\n if device_id not in file_map:\n file_map[device_id] = []\n\n file_map[device_id] += [cur_file]\n return file_map",
"def GetAllLogFilePaths(ssh):\n ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]\n log_files = []\n try:\n files_output = utils.CheckOutput(\" \".join(ssh_cmd), shell=True)\n log_files = FilterLogfiles(files_output.splitlines())\n except subprocess.CalledProcessError:\n logger.debug(\"The folder(%s) that running launch_cvd doesn't exist.\",\n constants.REMOTE_LOG_FOLDER)\n return log_files",
"def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages",
"def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)",
"def process_log_files(source_name, log_file_list):\n\n result_list = []\n out_fname = create_out_fname(source_name, suffix='_sum', ext=\".csv\")\n\n for log_file in log_file_list:\n result_list += process_log(log_file)\n\n if len(result_list) == 0:\n warning(\"Found no lammps log data to process from: {}\".format(source_name))\n else:\n write_csv(result_list, out_fname, LOG_FIELDNAMES, extrasaction=\"ignore\")",
"def collect_filterstats_from_logfiles(*args):\n all_stats = {}\n for path in args:\n with path.open(\"r\") as fp:\n all_stats[path.name] = collect_filterstats_from_log(fp)\n return pandas.DataFrame(all_stats.values(), index=all_stats.keys())",
"def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst",
"def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]",
"def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result",
"def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result",
"def get_filenames_from_loggers(loggers=None, _loggingmodule=None):\n _loggingmodule = _loggingmodule or _logging\n if loggers is None:\n loggers = [_loggingmodule.root]\n # noinspection PyUnresolvedReferences\n loggers.extend(_loggingmodule.Logger.manager.loggerDict.values())\n allfilenames = set()\n # Placeholders can be in the logger so limit it to\n # only loggers who have handlers.\n for logger in filter(lambda lo: hasattr(lo, 'handlers'), loggers):\n #Get all loghandler's baseFilename attr (or None), filter out\n # those that don't have it.\n filenames = [getattr(h, 'baseFilename', None) for h in logger.handlers]\n for f in filter(None, filenames):\n allfilenames.add(_os.path.abspath(f))\n return tuple(allfilenames)",
"def GetDeviceLogs(log_filenames, logger):\n device_logs = []\n\n for device, device_files in log_filenames.items():\n logger.debug('%s: %s', device, str(device_files))\n device_file_lines = []\n for cur_file in device_files:\n with open(cur_file) as f:\n device_file_lines += [(cur_file, f.read().splitlines())]\n combined_lines = CombineLogFiles(device_file_lines, logger)\n # Prepend each line with a short unique ID so it's easy to see\n # when the device changes. We don't use the start of the device\n # ID because it can be the same among devices. Example lines:\n # AB324: foo\n # AB324: blah\n device_logs += [('\\n' + device[-5:] + ': ').join(combined_lines)]\n return device_logs",
"def get_log_files_to_delete(self):\n dir_name, base_name = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n n, e = os.path.splitext(base_name)\n prefix = n + \".\"\n plen = len(prefix)\n for file_name in file_names:\n if self.namer is None:\n if not file_name.startswith(base_name):\n continue\n else:\n if (\n not file_name.startswith(base_name)\n and file_name.endswith(e)\n and len(file_name) > (plen + 1)\n and not file_name[plen + 1].isdigit()\n ):\n continue\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n parts = suffix.split(\".\")\n for part in parts:\n if self.extMatch.match(part):\n result.append(os.path.join(dir_name, file_name))\n break\n if len(result) < self.backupCount:\n result = []\n else:\n result.sort()\n result = result[: len(result) - self.backupCount]\n return result",
"def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVreader = csv.reader(l, delimiter=\"|\")\n logCSVreader.next() # skip header\n try:\n for row in logCSVreader:\n zip_file = row[0]\n log_entries_dict[zip_file].append(row)\n except:\n pass\n return log_entries_dict",
"def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]",
"def get_logs(logs_dir,useful_columns):\n logs_df_all=None\n for file_name in os.listdir(logs_dir):\n log_path=os.path.join(logs_dir,file_name)\n if log_path.endswith('.csv'):\n curr_logs_df=spark.read.csv(log_path,header=True)\n curr_logs_df=curr_logs_df.select(useful_columns)\n if logs_df_all is None:\n logs_df_all=curr_logs_df\n else:\n logs_df_all=logs_df_all.union(curr_logs_df)\n return logs_df_all",
"def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files",
"def getLogs():",
"def getLogs():",
"def filelist(basedir,interval_period_date,channel_list):\n files = []\n files_list = glob.glob(basedir+'/*')\n files_list_ch = []\n for s in files_list:\n if any(day_s in s for day_s in channel_list):\n files_list_ch.append(s)\n day_files = []\n for ch_folder in files_list_ch:\n files = glob.glob(ch_folder+'/*')\n date_file = [file for file in files if interval_period_date in file]\n if date_file != []:\n day_files.append(date_file[0])\n return sorted(day_files)",
"def test_result_group_logs_all_results(self, logs: List[pathlib.Path]):\n assert len(logs) == 2"
]
| [
"0.7600446",
"0.7588688",
"0.7271703",
"0.7102535",
"0.6964148",
"0.6879073",
"0.6856349",
"0.6664205",
"0.6594064",
"0.6556262",
"0.6484734",
"0.6444405",
"0.63837224",
"0.63441855",
"0.6228864",
"0.6220997",
"0.62146014",
"0.61931854",
"0.61931854",
"0.6190094",
"0.61443925",
"0.61102957",
"0.60494435",
"0.60267115",
"0.59615034",
"0.5961155",
"0.59544",
"0.59544",
"0.5948381",
"0.59473246"
]
| 0.8292149 | 0 |
returns dictionary given the artifacts folder with the keys being the folders, and the values being the log files within the corresponding folder | def get_all_logs(directory, artifacts):
log_files = {}
if artifacts:
dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)
if f.is_dir]
else:
dirs = [directory]
for d in dirs:
log_files[d] = []
for f in view_base.gcs_ls(d):
log_name = regex.log_re.search(f.filename)
if log_name:
log_files[d].append(f.filename)
return log_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages",
"def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVreader = csv.reader(l, delimiter=\"|\")\n logCSVreader.next() # skip header\n try:\n for row in logCSVreader:\n zip_file = row[0]\n log_entries_dict[zip_file].append(row)\n except:\n pass\n return log_entries_dict",
"def get_history_files(save_folder):\n for dirpath, _, filenames in os.walk(save_folder):\n result = {}\n for history_file in filenames:\n channel_id, extension = os.path.splitext(os.path.basename(history_file))\n if extension != \".json\": continue\n result[channel_id] = os.path.join(dirpath, history_file)\n return result\n return {}",
"def get_logs(build_dir, log_files, pod_name, filters, objref_dict):\n all_logs = {}\n results = {}\n old_dict_len = len(objref_dict)\n\n all_logs = get_all_logs(build_dir, True)\n apiserver_filename = find_log_files(all_logs, \"kube-apiserver.log\")\n kubelet_filenames = find_log_files(all_logs, \"kubelet.log\")\n if not pod_name and not objref_dict:\n return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,\n objref_dict, all_logs)\n for kubelet_log in kubelet_filenames:\n if pod_name:\n parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,\n objref_dict=objref_dict)\n objref_dict.update(parsed_dict)\n if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:\n if log_files == []:\n log_files = [kubelet_log]\n if apiserver_filename:\n log_files.extend(apiserver_filename)\n for log_file in log_files:\n parsed_file = parse_log_file(log_file, pod_name, filters,\n objref_dict=objref_dict)\n if parsed_file:\n results[log_file] = parsed_file\n break\n\n return all_logs, results, objref_dict, log_files",
"def artifacts(self) -> dict:\n return {}",
"def find_log_files(all_logs, log_file):\n log_files = []\n for folder in all_logs.itervalues():\n for log in folder:\n if log_file in log:\n log_files.append(log)\n\n return log_files",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def _parse_logs_path(path):\n tokens = [token for token in path.split('/') if len(token) > 0]\n artifact = {}\n job = {}\n\n if tokens[0].startswith('periodic'):\n if len(tokens) >= 3:\n artifact['pipeline'] = tokens[0]\n job['name'] = tokens[1]\n else:\n if len(tokens) >= 2:\n artifact['change_id'] = int(tokens[1])\n\n if len(tokens) >= 3:\n artifact['revision'] = int(tokens[2])\n\n if len(tokens) >= 4:\n artifact['pipeline'] = tokens[3]\n\n if len(tokens) >= 5:\n job['name'] = tokens[4]\n\n return artifact, job",
"def logs_directory(self):",
"def FindLogFiles(base_dir):\n logcat_filter = re.compile(r'^logcat_(\\S+)_(\\d+)$')\n # list of tuples (<device_id>, <seq num>, <full file path>)\n filtered_list = []\n for cur_file in os.listdir(base_dir):\n matcher = logcat_filter.match(cur_file)\n if matcher:\n filtered_list += [(matcher.group(1), int(matcher.group(2)),\n os.path.join(base_dir, cur_file))]\n filtered_list.sort()\n file_map = {}\n for device_id, _, cur_file in filtered_list:\n if device_id not in file_map:\n file_map[device_id] = []\n\n file_map[device_id] += [cur_file]\n return file_map",
"def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict",
"def get_folders_content(path):\n folder_contents = {}\n for folder_name, folder in zip(json_names, experiment_folders(path)):\n if not folder.exists():\n raise RuntimeError(\n f\"The {folder_name} folder doesn' exist in the\" \" specified path\"\n )\n\n folder_contents[folder_name] = folder.glob(\"*\")\n return folder_contents",
"def process_logs(logs):\n all_data = {}\n for log in logs:\n with open(log) as f:\n data = json.load(f)\n scenario = data[0].get(\"scenario\", None)\n if scenario is None:\n # No scenario name, no way to organize the data\n continue\n\n # Use the log's date as the run identifier\n # This assumes the format is SCENARIO-YYYY-MM-DD.json\n # NOTE: This may not match the GitHub Action run dates due to tests taking\n # a very long time.\n day = datetime.strptime(log[1+len(scenario):-5], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n if day not in all_data:\n all_data[day] = {}\n\n # Group them by scenario, assume each file is from one scenario per day\n all_data[day][scenario] = data\n return all_data",
"def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)",
"def extract_folder_file_structure() -> Dict[str, List[str]]:\n folders_and_files = {}\n for path_to_folder in glob.glob(f\"{ZULIPTERMINAL}/**/\", recursive=True):\n complete_directory_path = Path(path_to_folder)\n if complete_directory_path.name in FOLDERS_TO_EXCLUDE:\n continue\n relative_directory_path = complete_directory_path.relative_to(ROOT_DIRECTORY)\n if str(relative_directory_path) not in DESC_FOR_NO_FILE_FOLDERS:\n files_in_directory = [\n file.name\n for file in complete_directory_path.glob(\"*.py\")\n if file.name != \"__init__.py\"\n ]\n folders_and_files[str(relative_directory_path)] = files_in_directory\n return folders_and_files",
"def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def get_files_dict(folder_path, filter_term, recursive):\n if recursive:\n query = folder_path + '**/' + filter_term\n files_list = glob.glob(query, recursive=True)\n else:\n query = folder_path + filter_term\n files_list = glob.glob(query, recursive=False)\n files_list = [f for f in files_list if os.path.isfile(f)]\n files_dict = {f: get_timestamp(f) for f in files_list}\n return files_dict",
"def _get_log_file(self, _action):\n prefix = \"work/{mapper}.{{library_name}}/log/{mapper}.{{library_name}}\".format(\n mapper=self.__class__.name\n )\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n yield key + \"_md5\", prefix + ext + \".md5\"",
"def _get_logs(self):\n contents = dict()\n contents[\"Scheduler\"] = self._parse_log_content(\n self.scheduler.client.get_scheduler_logs()\n )\n log_workers = self.scheduler.client.get_worker_logs()\n for i, (_, worker_content) in enumerate(log_workers.items()):\n contents[f\"Worker-{i}\"] = self._parse_log_content(worker_content)\n return contents",
"def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()",
"def get_logs():\n callback = bottle.request.query.get('callback')\n folder = os.path.dirname(os.path.abspath(__file__))\n test_run_title = bottle.request.query.test_run_id\n results = {'logs': {'monitor': '', 'testrun': ''}, 'host': bottle.request.headers.get('host')}\n try:\n with open(os.path.join(folder, 'monitor.log'), 'r+') as _f:\n results['logs'].update({'monitor': tools.get_last_logs(_f.readlines())})\n with open(os.path.join(folder, '%s-testrun.log' % test_run_title), 'r+') as _f:\n results['logs'].update({'testrun': tools.get_last_logs(_f.readlines())})\n except IOError as err:\n key = 'monitor' if 'monitor' in str(err) else 'testrun'\n results['logs'].update({key: 'Could not find logs: %s' % err})\n return '{0}({1})'.format(callback, [results])",
"def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict",
"def filenames(self) -> dict[str, str]:\r\n ...",
"def getLogs():",
"def getLogs():",
"def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)",
"def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))",
"def getLogPath(self, folder):\n path = join(self.folder,'experimentLog_0001.txt')\n for f_path in os.listdir(folder):\n if re.search('experimentLog_[0-9]*.txt', f_path):\n path = join(self.folder,f_path)\n break\n\n return path",
"def survey_logs(results_dir, log_filenames=log_filenames,\n recursively=True, verbose=1):\n\n if verbose > 1:\n print(f\"\\nLooking for log files in:\\n '{results_dir}'\")\n\n log_dirs = []\n log_dir_paths = []\n dir_contents = []\n\n for root, directories, filenames in os.walk(results_dir):\n\n # Go through all files except hidden ones\n selected_files = [f for f in filenames if not f.startswith('.')]\n checks = pd.Series(\n [f in selected_files for f in log_filenames.values()],\n index=log_filenames.keys()\n )\n if checks.sum() > 0:\n log_dirs.append(os.path.split(root)[-1])\n log_dir_paths.append(root)\n dir_contents.append(checks)\n index = pd.Index(log_dirs, name='Folder name')\n log_inventory = pd.DataFrame.from_records(dir_contents, index=index)\n log_inventory['Path'] = log_dir_paths\n\n if verbose > 0:\n print(f\"{len(log_inventory)} log directories inventoried\")\n\n return log_inventory"
]
| [
"0.7088755",
"0.65402335",
"0.61850625",
"0.6157875",
"0.61373955",
"0.6120842",
"0.6115297",
"0.6083896",
"0.60730094",
"0.60391843",
"0.6012872",
"0.59854996",
"0.59497964",
"0.58434486",
"0.5785914",
"0.57677436",
"0.5703347",
"0.5692961",
"0.5668204",
"0.5629328",
"0.5591093",
"0.5557526",
"0.55445707",
"0.55150527",
"0.55036926",
"0.55036926",
"0.54540616",
"0.54316044",
"0.54189765",
"0.54173756"
]
| 0.74142367 | 0 |
Based on make_dict, either returns the objref_dict or the parsed log file | def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
log = gcs_async.read(log_filename).get_result()
if log is None:
return {}, False if make_dict else None
if pod:
bold_re = regex.wordRE(pod)
else:
bold_re = regex.error_re
if objref_dict is None:
objref_dict = {}
if make_dict and pod:
return kubelet_parser.make_dict(log.decode('utf8', 'replace'), bold_re, objref_dict)
else:
return log_parser.digest(log.decode('utf8', 'replace'),
error_re=bold_re, filters=filters, objref_dict=objref_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def object_decoder(obj):\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj",
"def attributesFromDict(d):\n self = d.pop('self')\n for name, value in d.items():\n setattr(self, name, value)\n \n \"\"\"Manage a log file\"\"\"\n \n def __init__(self, logfile):\n \"\"\"logfile is the file name or None\"\"\"\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))\n \n def __enter__(self):\n return self\n \n def write(self, text):\n if self.logfile:\n self.file.write(\"%.2f: %s\\n\" % (time.time() - self.starttime, text))\n self.file.flush()\n \n def close(self):\n if self.logfile:\n self.write(\"Closing log\")\n self.file.close()",
"def _readin_JSON(file):\n\tdef object_decoder(obj):\n\t\t\"\"\"This function is used to properly load the JSON elements into the corresponding classes.\"\"\"\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj\n\n\tfp = open(file,'r')\n\tlf = json.load(fp, object_hook=object_decoder)\n\tfp.close()\n\treturn lf",
"def _return_context_dict_from_log(self, log_ids: list) -> dict:\n if not self.debug_path:\n for log_id in log_ids:\n result = self.client.download_file(log_id)\n with open(result, \"r+\") as log_info:\n for line in log_info:\n if self.RAW_RESPONSE_HEADER.match(line):\n try:\n return json.loads(log_info.readline())\n except Exception:\n pass\n if self.CONTEXT_HEADER.match(line) and not self.raw_response:\n context = \"\"\n line = log_info.readline()\n while not self.HUMAN_READABLE_HEADER.match(line):\n context = context + line\n line = log_info.readline()\n context = re.sub(r\"\\(val\\..+\\)\", \"\", context)\n try:\n temp_dict = json.loads(context)\n if temp_dict:\n return temp_dict\n except Exception:\n pass\n return dict()\n else:\n temp_dict = dict()\n with open(self.debug_path, \"w+b\") as output_file:\n for log_id in log_ids:\n result = self.client.download_file(log_id)\n with open(result, \"r+\") as log_info:\n for line in log_info:\n if self.RAW_RESPONSE_HEADER.match(line) and not temp_dict:\n output_file.write(line.encode(\"utf-8\"))\n line = log_info.readline()\n try:\n temp_dict = json.loads(line)\n except Exception:\n pass\n if (\n self.CONTEXT_HEADER.match(line)\n and not self.raw_response\n ):\n context = \"\"\n output_file.write(line.encode(\"utf-8\"))\n line = log_info.readline()\n while not self.HUMAN_READABLE_HEADER.match(line):\n output_file.write(line.encode(\"utf-8\"))\n context = context + line\n line = log_info.readline()\n context = re.sub(r\"\\(val\\..+\\)\", \"\", context)\n try:\n temp_dict = json.loads(context)\n except Exception:\n pass\n output_file.write(line.encode(\"utf-8\"))\n logger.info(\n f\"[green]Debug Log successfully exported to {self.debug_path}[/green]\"\n )\n return temp_dict",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def _resolve_dict_entry(self, doc_uri, main_doc, obj):\n # Interpret '$ref' key if present in obj\n if '$ref' in obj:\n result = self._load_ref(doc_uri, main_doc, obj['$ref'])\n else:\n result = self.dict_class()\n # Merge values from obj with result\n for k, v in obj.items():\n if k != '$ref':\n result[k] = self._resolve(doc_uri, main_doc, v)\n return result",
"def __init__(self, fname):\n self.format = 2\n self.target = {}\n self.filters = {}\n self.comment = {}\n\n try:\n rec = re.compile('file\\s+object\\s+filter', re.I)\n rre = re.compile('(run\\d\\d\\d\\d?)(.*)')\n old = re.compile('\\s*(\\S+)\\s+(\\S+)\\s+(.*)$')\n oldii = re.compile('\\s*(\\S+)\\s*$')\n with open(fname) as f:\n for line in f:\n m = rec.search(line)\n if m:\n self.format = 1\n if len(self.comment):\n raise Exception('Error in night log = ' + fname + ', line = ' + line)\n\n mr = rre.match(line)\n if mr:\n run = mr.group(1)\n if self.format == 2:\n self.comment[run] = mr.group(2).strip()\n else:\n m = old.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n self.filters[run] = m.group(2)\n self.comment[run] = m.group(3)\n else:\n m = oldii.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n except FileNotFoundError:\n sys.stdout.write(f'Night log = {fname} does not exist\\n')\n except Exception as err:\n sys.stdout.write(f'Problem on night log = {fname}:' + str(err) + '\\n')",
"def ProcessEntryFile(fd):\n\tglobal reference\n\n\tname = ''\n\tfilename = ''\n\tdd = {}\n\teof = False\n\twhile not eof:\n\t\tline = fd.readline()\n\t\tif len(line) == 0:\n\t\t\teof = True\n\t\t\tif name in reference.keys():\n\t\t\t\treference[name] = dd\n\t\t\telif name != '':\n\t\t\t\treference[name] = dd\n\t\t\t#if verbose: print reference\n\t\telse:\n\t\t\tline = line.strip()\n\t\t\tif line.startswith('name'):\n\t\t\t\tif name in reference.keys() or name != '':\n\t\t\t\t\treference[name] = dd\n\t\t\t\ttokens = line.split()\n\t\t\t\tnn = tokens[0].split('=')\n\t\t\t\tname = nn[1]\n\t\t\t\tdd = {}\n\t\t\telif line.startswith('file'):\n\t\t\t\tfilename = line[len('file='):]\n\t\t\t\tif name in reference.keys():\n\t\t\t\t\tdd \t= reference[name]\n\t\t\t\t\tif dd.has_key(filename):\n\t\t\t\t\t\tfilename = ''\n\t\t\telse:\n\t\t\t\tif filename != '':\n\t\t\t\t\ttokens = line.split()\n\t\t\t\t\tlength = len(tokens)\n\t\t\t\t\t#print tokens\n\t\t\t\t\tfirst = True\n\t\t\t\t\tfor t in range(0,length,2):\n\t\t\t\t\t\tpos = tokens[t].find('=')\n\t\t\t\t\t\tcountline = int(tokens[t][pos+1:])\n\t\t\t\t\t\tpos = tokens[t+1].find('=')\n\t\t\t\t\t\tref = tokens[t+1][pos+1:]\n\t\t\t\t\t\ttline = (countline,ref)\n\t\t\t\t\t\tif first:\n\t\t\t\t\t\t\tdd[filename] = [tline]\n\t\t\t\t\t\t\tfirst = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tff = dd[filename] #list of tuples (line,ref)\t\t\t\t\n\t\t\t\t\t\t\tff.append(tline)\n\t\t\t\t\t\t\tdd[filename] = ff",
"def read_logs(self) -> Dict[str, Any]:\n return self.maps[0]",
"def get_logs(build_dir, log_files, pod_name, filters, objref_dict):\n all_logs = {}\n results = {}\n old_dict_len = len(objref_dict)\n\n all_logs = get_all_logs(build_dir, True)\n apiserver_filename = find_log_files(all_logs, \"kube-apiserver.log\")\n kubelet_filenames = find_log_files(all_logs, \"kubelet.log\")\n if not pod_name and not objref_dict:\n return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,\n objref_dict, all_logs)\n for kubelet_log in kubelet_filenames:\n if pod_name:\n parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,\n objref_dict=objref_dict)\n objref_dict.update(parsed_dict)\n if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:\n if log_files == []:\n log_files = [kubelet_log]\n if apiserver_filename:\n log_files.extend(apiserver_filename)\n for log_file in log_files:\n parsed_file = parse_log_file(log_file, pod_name, filters,\n objref_dict=objref_dict)\n if parsed_file:\n results[log_file] = parsed_file\n break\n\n return all_logs, results, objref_dict, log_files",
"def parse_record(dom, record_dict, log):\n return parser(dom, record_dict, log)",
"def recover_objects(self, log, flat_log):\n pass",
"def __init__(self, file='logs.json'):\n self.file = file\n self.log_data = {}",
"def __init__(self, cliDict):\r\n\r\n print(\"initializing Model\")\r\n #self.filename = filename\r\n #print(\"filename is \",self.filename)\r\n self.cliDict=cliDict\r\n self.objdict = {'HOSTNAME':'','IPADDRESS':'','LOG':'','VERBOSE':'','ORIGUSERNAME':'','TESTUSERNAME':'', \\\r\n 'PASSWORD7':'','PLAINTEXT':'','SECRET8':'','CHANGE':'','VERIFIED':'','NOTES-AND-ERRORS':''} \r\n #testing\r\n #self.path = 'e:/dougsprogs/convert7to8/convert728/'\r\n \r\n\r\n ##Main checks to see if Filename is blank\r\n #if filename :#filename is not blank.\r\n #self.checkFilename()\r\n #if filename is blank, create the default dict\r\n #else: #filename is blank \"\"\r\n self.loadDictRow()\r\n #loadDictValue(key=\"IPADDRESS\", value=str(ipaddress))\r\n #now check to create the default empty file\r\n #checkFilename()\r",
"def parseDocObjectsToStrings(records, obj_type):\n for doc in records:\n for key, value in doc.items():\n # all dates should look the same\n if isinstance(value, datetime.datetime):\n doc[key] = datetime.datetime.strftime(value,\n \"%Y-%m-%d %H:%M:%S\")\n if key == \"_id\" or key == \"id\":\n doc[\"recid\"] = str(value)\n doc[\"details\"] = \"<a href='\"+getHREFLink(doc, obj_type)+\"'>\"\\\n \"<div class='icon-container'>\"\\\n \"<span class='ui-icon ui-icon-document'></span>\"\\\n \"</div>\"\\\n \"</a>\"\n elif key == \"password_reset\":\n doc['password_reset'] = None\n elif key == \"campaign\":\n camps = []\n for campdict in value:\n camps.append(campdict['name'])\n doc[key] = \"|||\".join(camps)\n elif key == \"source\":\n srcs = []\n for srcdict in doc[key]:\n srcs.append(srcdict['name'])\n doc[key] = \"|||\".join(srcs)\n elif key == \"tags\":\n tags = []\n for tag in doc[key]:\n tags.append(tag)\n doc[key] = \"|||\".join(tags)\n elif key == \"is_active\":\n if value:\n doc[key] = \"True\"\n else:\n doc[key] = \"False\"\n elif key == \"tickets\":\n tickets = []\n for ticketdict in value:\n tickets.append(ticketdict['ticket_number'])\n doc[key] = \"|||\".join(tickets)\n elif key == \"datatype\":\n doc[key] = value.keys()[0]\n elif key == \"to\":\n doc[key] = len(value)\n elif key == \"thumb\":\n doc['url'] = reverse(\"crits.screenshots.views.render_screenshot\",\n args=(unicode(doc[\"_id\"]),))\n elif key==\"results\" and obj_type == \"AnalysisResult\":\n doc[key] = len(value)\n elif isinstance(value, list):\n if value:\n for item in value:\n if not isinstance(item, basestring):\n break\n else:\n doc[key] = \",\".join(value)\n else:\n doc[key] = \"\"\n doc[key] = html_escape(doc[key])\n value = doc[key].strip()\n if isinstance(value, unicode) or isinstance(value, str):\n val = ' '.join(value.split())\n val = val.replace('\"',\"'\")\n doc[key] = val\n return records",
"def get_hash_log_curr(self):\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'U')\n #first line is header, skip\n log.readline()\n for line in log:\n try:\n line = line.replace('\\n','')\n # log maintenance. only keep number of days designated\n line = line.split('|')\n if len(line) != 6:\n raise Exception\n if line[4] > self.log_cut_off_date:\n self.hash_log_curr[line[2]] = line\n except:\n self.print_to_log('Bad log Line: ' + str(line))\n self.print_to_log('Hash Log read Successfully')\n except IOError:\n self.print_to_log('No log found')\n self.hash_log_curr = None\n except IndexError:\n self.print_to_log('Bad Log File')\n raise\n except:\n self.print_to_log('Unknown Error, Exiting ')\n raise",
"def init(self, obj):\n obj_dict = {'name': obj.get_obj_name(),\n 'properties': obj.get_obj_properties(),\n 'actions': []}\n\n self.log_data[obj.get_obj_id()] = obj_dict",
"def __build_info(self, obj: Object, record: TNSRecord) -> dict:\n type_id = self.__get_type_id(record)\n redshift = record.redshift\n type_changed = type_id != obj.type_id\n redshift_changed = redshift != obj.redshift\n if type_changed or redshift_changed: # keep history of previous values\n return {\n 'type_id': type_id, 'redshift': redshift, 'aliases': {**obj.aliases, 'iau': record.name},\n 'history': self.__build_history(obj),\n 'data': {**obj.data, 'tns': record.to_json()}\n }\n elif 'iau' not in obj.aliases:\n return {\n 'aliases': {**obj.aliases, 'iau': record.name},\n 'data': {**obj.data, 'tns': record.to_json()}\n }\n else:\n return {}",
"def load(self, reffile):\n with open(reffile, 'r') as rfh:\n self._seqdict = json.load(rfh)\n\n # Check format\n keys = ['name','subtype','accessions']\n if self._nloci > 1:\n keys.append('loci')\n for seqkey,seqs in self._seqdict.iteritems():\n for seq,seqentry in seqs.iteritems():\n self._genenum += 1\n\n for k in keys:\n if not k in seqentry:\n raise Exception('Improperly formated SeqDict object')\n\n if self._nloci > 1:\n if len(seqentry['loci']) != self._nloci:\n raise Exception('Improperly formated SeqDict object')\n\n return None",
"def preprocess_log(self, log_file_full_path: str) -> Union[Dict, None]:\n\n try:\n with open(log_file_full_path, 'r') as log_file:\n\n switcher, coords = {}, {}\n\n self.monitor.info('-> Started to parse log file...')\n for line in log_file:\n try:\n if 'control_switch_on' in line:\n switch, ts = json.loads(line).values()\n switcher[ts] = int(switch)\n elif 'geo' in line:\n geo, ts = json.loads(line).values()\n coords[ts] = geo\n else:\n self.monitor.warning('-> Unknown happened on line while parsing:\\n', line)\n continue\n except Exception as e:\n self.monitor.exception(\" Something bad happened\", repr(e))\n self.monitor.info(f'-> Parsed log with {len(switcher)} switcher marks and {len(coords)} coords.')\n log_file.close()\n\n merged_log = {**switcher, **coords}\n sequence = {key: merged_log[key] for key in sorted(merged_log.keys())}\n self.monitor.info(f' -> Merged signal types and sorted by ts. Got a sequenced log with {len(sequence)} records.')\n\n return sequence\n\n except Exception as e:\n self.monitor.exception(f'-> Something bad happened. Details:\\n {repr(e)}')\n\n return None",
"def log_extract(log_info):\n \n #Handle file names, strings and open file-like objects equivalently\n with uber_open_rmode(log_info) as log_info:\n \n headers = []\n footers = []\n i = 0\n \n #for all lines in file/output\n for line in log_info:\n \n #skip blank lines\n if len(line.split()) == 0:\n continue\n \n #This is listed before both run and minimize simulations \n if 'Memory usage per processor =' in line:\n headers.append(i+1)\n \n #This follows both run and minimize simulations\n elif 'Loop time of' in line:\n footers.append(i-1)\n \n i += 1\n \n #Add last line to footers for incomplete logs\n footers.append(i)\n \n log_info.seek(0)\n \n #Create DataModelDict root\n log_dict = DM()\n log_dict['LAMMPS-log-thermo-data'] = DM()\n \n #for all lines in file/output\n for header, footer in zip(headers, footers):\n\n #Read thermo data\n df = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\\s+', engine='python', skip_blank_lines=True)\n log_info.seek(0) \n\n #Convert to DataModelDict\n thermo = DM()\n for j in df:\n thermo[str(j)] = df[j].values.tolist()\n \n #Append simulation results to DataModelDict root\n simulation = DM([('thermo', thermo)])\n log_dict['LAMMPS-log-thermo-data'].append('simulation', simulation)\n \n return log_dict",
"def parse_file_content(content_of_log_file):\n \n search_regex = re.compile('^=== \\[(?P<process_time>.+)\\] :(?P<process_state>.+): (?P<process_name>[^\\s]+)\\s*(?P<process_status>[0-9])*$')\n jobs_dict = {}\n for log_line in content_of_log_file:\n parsed_log_line = search_regex.search(log_line)\n if parsed_log_line is not None:\n process = parsed_log_line.group('process_name')\n if parsed_log_line.group('process_state') == \"START\" : \n start_time = (parsed_log_line.group('process_time'))\n jobs_dict[process] = Job(name = process, start_time = start_time, end_time = None, status = None)\n elif parsed_log_line.group('process_state') == \"END\":\n end_time = parsed_log_line.group('process_time')\n if process in jobs_dict.keys():\n jobs_dict[process].end_time = end_time\n else:\n print(\"Start of the project is not present in the log file. Could not record end\")\n if parsed_log_line.group('process_state') == \"STATUS\":\n if process in jobs_dict.keys():\n jobs_dict[process].status = parsed_log_line.group('process_status')\n else:\n print(\"Start of the project is not present in the log file. Could not record status\")\n return jobs_dict",
"def __init__(self, fileName):\n self.recordDict = {}\n for line in open(fileName, 'r') :\n sipRecord = json.loads(line)\n self.recordDict[sipRecord['addressOfRecord']] = line",
"def create_data_record(self, data_dict):\n source_dict = deepcopy(data_dict)\n assert not self.is_conflicting_keys(data_dict,\n self.default_values), \"Conflicting keys between default_values and extra_values\"\n source_dict.update(self.default_values)\n return {\n '_index': self.get_full_index(),\n '_type': 'python_log',\n '_source': source_dict\n }",
"def pre_lookup(self, file):\n return {}",
"def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict",
"async def line_to_obj(raw_line: bytearray, ref: Ref) -> Optional[ObjectRec]:\n # secondary_update = None\n if raw_line[0:1] == b\"0\":\n return None\n\n if raw_line[0:1] == b'-':\n rec = ref.obj_store[int(raw_line[1:], 16)]\n rec.alive = 0\n await mark_dead(rec.id)\n\n if 'Weapon' in rec.Type:\n impacted = await determine_contact(rec, type='impacted', ref=ref)\n if impacted:\n rec.impacted = impacted[0]\n rec.impacted_dist = impacted[1]\n sql = create_impact_stmt()\n vals = (ref.session_id, rec.parent, rec.impacted, rec.id,\n ref.time_offset, rec.impacted_dist)\n await DB.execute(sql, *vals)\n return rec\n\n comma = raw_line.find(b',')\n rec_id = int(raw_line[0:comma], 16)\n try:\n rec = ref.obj_store[rec_id]\n rec.update_last_seen(ref.time_offset)\n rec.updates += 1\n\n except KeyError:\n # Object not yet seen...create new record...\n rec = ObjectRec(id_=rec_id,\n session_id=ref.session_id,\n first_seen=ref.time_offset,\n last_seen=ref.time_offset)\n ref.obj_store[rec_id] = rec\n\n while True:\n last_comma = comma + 1\n comma = raw_line.find(b',', last_comma)\n if comma == -1:\n break\n\n chunk = raw_line[last_comma:comma]\n eq_loc = chunk.find(b\"=\")\n key = chunk[0:eq_loc]\n val = chunk[eq_loc + 1:]\n\n if key == b\"T\":\n i = 0\n pipe_pos_end = -1\n while i < COORD_KEY_LEN:\n pipe_pos_start = pipe_pos_end + 1\n pipe_pos_end = chunk[eq_loc + 1:].find(b'|', pipe_pos_start)\n if pipe_pos_start == -1:\n break\n\n coord = chunk[eq_loc + 1:][pipe_pos_start:pipe_pos_end]\n if coord != b'':\n c_key = COORD_KEYS[i]\n if c_key == \"lat\":\n rec.lat = float(coord) + ref.lat\n elif c_key == \"lon\":\n rec.lon = float(coord) + ref.lon\n else:\n rec.update_val(c_key, float(coord))\n i += 1\n else:\n rec.update_val(\n key.decode('UTF-8') if key != b'Group' else 'grp', val.decode('UTF-8'))\n\n rec.compute_velocity(ref.time_since_last)\n\n if rec.updates == 1 and rec.should_have_parent():\n parent_info = await determine_contact(rec, type='parent', ref=ref)\n if parent_info:\n rec.parent = parent_info[0]\n rec.parent_dist = parent_info[1]\n\n return rec",
"def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2",
"def dosomething(file_dict2, path, buf):\n file_dict2[path] = {\"Extension\": 1, \"Hash value\": 1}\n return file_dict2",
"def read(cls, filename: str) -> Objdict:\n obj = config.read(filename)\n return cls.from_obj(obj)"
]
| [
"0.6649974",
"0.59932905",
"0.58671886",
"0.5755533",
"0.5745892",
"0.55871373",
"0.55416346",
"0.5481624",
"0.5440245",
"0.5378912",
"0.5357203",
"0.5282771",
"0.52630085",
"0.5223232",
"0.5209461",
"0.51650983",
"0.5125105",
"0.51221484",
"0.5097486",
"0.50874823",
"0.5078812",
"0.5064908",
"0.504213",
"0.49924782",
"0.49851075",
"0.49628675",
"0.49573916",
"0.49559858",
"0.49521935",
"0.49496767"
]
| 0.67737544 | 0 |
Load Disaster Declarations and County information into database, as two separate tables that are being created simultaneously. Database information being pulled from FEMA API Disasters Declaration Summaries Data set. Data populates both disastesr and counties tables. API returns 1k records max ($top set to variable result_count in payload), while loop continues to make the API call until records_returned is a number less than 1k (which signals no further API calls need to be made). To avoid making a call that returns duplicate records, a skip is made ($skip, set to the result_count iteration). | def load_disasters():
print "Disasters"
#deletes any data within the table before seeding
Disaster.query.delete()
result_count = 1000
iteration = 0
records_returned = 1000
# makes payload requests from FEMA API
while records_returned == 1000:
payload = {'$top': result_count,
'$skip': result_count * iteration,
'$inlinecount': 'allpages',
'$filter': 'declarationDate ge \'1990-01-01T04:00:00.000z\'',
'$select': 'disasterNumber,declarationDate,state,incidentType,title,incidentBeginDate,incidentEndDate,placeCode,declaredCountyArea'}
r = requests.get(
"http://www.fema.gov/api/open/v1/DisasterDeclarationsSummaries",
params=payload)
# iteration counter, starts at zero, for every iteration add 1
iteration += 1
disaster_info = r.json()
metadata = disaster_info['metadata']
record_count = metadata['count']
records_returned = len(disaster_info['DisasterDeclarationsSummaries'])
# parsing through the information returned from API
for incident_dict in disaster_info['DisasterDeclarationsSummaries']:
disasterNumber = incident_dict.get('disasterNumber')
declarationDate = incident_dict.get('declarationDate')
state = incident_dict.get('state')
incidentType = incident_dict.get('incidentType')
title = incident_dict.get('title')
incidentBeginDate = incident_dict.get('incidentBeginDate')
incidentEndDate = incident_dict.get('incidentEndDate')
placeCode = incident_dict.get('placeCode')
declaredCountyArea = incident_dict.get('declaredCountyArea')
"""Try/Except does two things: the try is doing a check to see if the county is already in the counties tables and if it is, then setting the Disaster.countyArea_id in the disasters table. The except is occuring only when the NoResultFound occurs and is creating the county and adding it to the counties table."""
try:
#variable county set to "answer" of query
county_check = County.query.filter(County.county_name==declaredCountyArea, County.state_code==state).one()
countyArea_id = county_check.county_id
# creating a county when NoResultFound error occurs
except NoResultFound:
county = County(state_code=state,
county_name=declaredCountyArea)
db.session.add(county)
#!!!!!!!!! ask bonnie about this again !!!!!!!!!!#
db.session.flush()
countyArea_id = county.county_id
county = County.query.filter(County.county_name==declaredCountyArea, County.state_code==state).one()
countyArea_id = county.county_id
disaster = Disaster(disasterNumber=disasterNumber,
state=state,
declarationDate=declarationDate,
incidentType=incidentType,
title=title,
incidentBeginDate=incidentBeginDate,
incidentEndDate=incidentEndDate,
placeCode=placeCode,
declaredCountyArea=declaredCountyArea,
countyArea_id=countyArea_id)
db.session.add(disaster)
db.session.commit()
print "Disasters and Counties seeded" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Collect_data(clo, cla, FD, LD, ZC, idx_time, cntry, TZ):\r\n #initiate the console response\r\n rep = \"\"\r\n global ddf\r\n \r\n #define the noaa server access\r\n server_noaa = \"https://www.ncei.noaa.gov/data/global-hourly/access/\"\r\n \r\n #Initate dateframe\r\n data = pd.DataFrame()\r\n \r\n #Convert date from specified timezone to UTC\r\n FDc = Date_calibration(FD, 1, TZ)\r\n LDc = Date_calibration(LD, 1, TZ)\r\n \r\n #define timestep\r\n timestep = \"1 H\"\r\n \r\n #Loop on the range time by each year\r\n for y in range(FDc.year, (LDc.year + 1)):\r\n rep += '--- Collect Year ['+str(y) + '] --- \\n' \r\n \r\n #Loop on each weather station while the ouput data is good\r\n #weather station in the year instancied dataframe \r\n for i in range(len(ddf[y])) :\r\n \r\n #Define the memory key : year_zipcode\r\n key_d_z = str(y)+'_'+str(ZC)\r\n \r\n #Verify if the data is already in the memory\r\n if(key_d_z in memory_df.keys()) :\r\n #The data is already in the memory :\r\n #Collect the data and go next (save compute time and server solicitation)\r\n ext_data = memory_df[key_d_z]\r\n NS, DO = memory_NS_DO[key_d_z].split('_')[0], memory_NS_DO[key_d_z].split('_')[1]\r\n break\r\n else:\r\n \r\n #The data isn't in the memory :\r\n \r\n #Collect information about the nearest weather station from the zip code\r\n rs, NS, DO = Fetch_station(clo, cla, y)\r\n \r\n #Generate the ftp key weather station\r\n code_station = str(ddf[y]['USAF'][rs])+str(ddf[y]['WBAN'][rs])\r\n \r\n #Define the server access\r\n url = server_noaa+str(y)+'/'+code_station+'.csv'\r\n \r\n #Get the data\r\n req = requests.get(url)\r\n \r\n #The server answer\r\n if(req.status_code == 200):\r\n \r\n #Extract the data (only date and temperature)\r\n ext_data = pd.read_csv(url, usecols=['DATE','TMP'])\r\n \r\n #Check if the data isn't empty (1000 is arbitrary)\r\n if(len(ext_data) > 1000):\r\n \r\n #Format data\r\n ext_data, qual = Data_formatting(ext_data, y, timestep, TZ)\r\n \r\n #Check if the data quality respect the quality rule\r\n if(qual > quality_rule):\r\n \r\n #Save the date in the memory\r\n memory_df[key_d_z] = ext_data\r\n memory_NS_DO[key_d_z] = str(NS)+'_'+str(DO)\r\n \r\n #Response for the GUI\r\n rep += \"# Station [\"+str(NS)+\"] valid : \\n\"\r\n rep += \"- Quality density data : \"+str(round(qual, 2))+\"% \\n\"\r\n rep += \"- Great circle distance : \"+str(round(DO,2))+\"km \\n\"\r\n break\r\n else:\r\n #The data quality is too bad\r\n #Response for the GUI\r\n rep += \"# Station [\"+str(NS)+\"] invalid : \\n\"\r\n rep += \"- Quality density data : \"+str(round(qual,2))+\"% \\n\"\r\n rep += \"> Quality criterion unsatisfied \\n\"\r\n \r\n #Delete the weather station in the dataframe (instancied by year)\r\n ddf[y] = ddf[y].drop(rs).reset_index(drop=True)\r\n continue\r\n \r\n else:\r\n #The data density is too low\r\n \r\n #Response for the GUI\r\n rep += \"# Station [\"+str(NS)+\"] invalid : \\n\"\r\n rep += \"> Low data volume \\n\"\r\n \r\n #Delete the weather station in the dataframe (instancied by year)\r\n ddf[y] = ddf[y].drop(rs).reset_index(drop=True)\r\n\r\n continue\r\n else:\r\n #The NOAA doesn't answer for the code station\r\n rep += \"# Station [\"+str(NS)+\"] invalid : \\n\"\r\n rep += \"> Server doesn't answer\\n\"\r\n \r\n #Delete the weather station in the dataframe (instancied by year)\r\n ddf[y] = ddf[y].drop(rs).reset_index(drop=True)\r\n\r\n continue\r\n #Add data in the Dataframe\r\n data = data.append(ext_data)\r\n \r\n #Define a new dataframe mark out by the specified time range\r\n range_time = pd.DataFrame(index=pd.DatetimeIndex(start=FD, end=LD, freq=timestep))\r\n \r\n #Paste data in the time marked out Dataframe\r\n range_time['Temp'] = data['TMP']\r\n \r\n #Calculate the amount of NaN (global)\r\n nb_nan = range_time['Temp'].isnull().sum()\r\n \r\n #Calculate the global quality\r\n quality = (1 - (nb_nan) / len(range_time)) * 100\r\n \r\n #Fill the gap data by temporal interpolation\r\n data = range_time.interpolate(method='time')\r\n data = data.ffill().bfill()\r\n data['Temp'] = round(data['Temp'], 2)\r\n \r\n \r\n #If specified index time, cut the data for it\r\n if(len(idx_time)> 2):\r\n final_data = pd.DataFrame(index = pd.to_datetime(idx_time))\r\n final_data['Temp'] = round(data['Temp'], 2)\r\n #final_data = final_data.resample(rule = str(timestep)).mean()\r\n #final_data = final_data.dropna()\r\n else:\r\n final_data = data\r\n \r\n return final_data, quality, NS, DO, rep",
"def pull_usafacts_data(base_url: str, metric: str, logger: Logger, cache: str=None) -> pd.DataFrame:\n # Read data\n df = fetch(base_url.format(metric=metric), cache)\n date_cols = [i for i in df.columns if i.startswith(\"2\")]\n logger.info(\"data retrieved from source\",\n metric=metric,\n num_rows=df.shape[0],\n num_cols=df.shape[1],\n min_date=min(date_cols),\n max_date=max(date_cols),\n checksum=hashlib.sha256(pd.util.hash_pandas_object(df).values).hexdigest())\n df.columns = [i.lower() for i in df.columns]\n # Clean commas in count fields in case the input file included them\n df[df.columns[4:]] = df[df.columns[4:]].applymap(\n lambda x: int(x.replace(\",\", \"\")) if isinstance(x, str) else x)\n # Check missing FIPS\n null_mask = pd.isnull(df[\"countyfips\"])\n assert null_mask.sum() == 0\n\n unexpected_columns = [x for x in df.columns if \"Unnamed\" in x]\n unexpected_columns.extend(DROP_COLUMNS)\n\n # Assign Grand Princess Cruise Ship a special FIPS 90000\n # df.loc[df[\"FIPS\"] == 6000, \"FIPS\"] = 90000\n # df.loc[df[\"FIPS\"] == 6000, \"stateFIPS\"] = 90\n\n # Ignore Grand Princess Cruise Ship and Wade Hampton Census Area in AK\n df = df[\n (df[\"countyfips\"] != 6000)\n & (df[\"countyfips\"] != 2270)\n ]\n\n # Change FIPS from 0 to XX000 for statewise unallocated cases/deaths\n unassigned_index = (df[\"countyfips\"] == 0)\n df.loc[unassigned_index, \"countyfips\"] = df[\"statefips\"].loc[unassigned_index].values * 1000\n\n # Conform FIPS\n df[\"fips\"] = df[\"countyfips\"].apply(lambda x: f\"{int(x):05d}\")\n\n\n\n # Drop unnecessary columns (state is pre-encoded in fips)\n try:\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n except KeyError as e:\n raise ValueError(\n \"Tried to drop non-existent columns. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Check that columns are either FIPS or dates\n try:\n columns = list(df.columns)\n columns.remove(\"fips\")\n # Detects whether there is a non-date string column -- not perfect\n # USAFacts has used both / and -, so account for both cases.\n _ = [int(x.replace(\"/\", \"\").replace(\"-\", \"\")) for x in columns]\n except ValueError as e:\n raise ValueError(\n \"Detected unexpected column(s) \"\n \"after dropping DROP_COLUMNS. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Reshape dataframe\n df = df.melt(\n id_vars=[\"fips\"],\n var_name=\"timestamp\",\n value_name=\"cumulative_counts\",\n )\n # timestamp: str -> datetime\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"])\n # Add a dummy first row here on day before first day\n min_ts = min(df[\"timestamp\"])\n df_dummy = df.loc[df[\"timestamp\"] == min_ts].copy()\n df_dummy.loc[:, \"timestamp\"] = min_ts - pd.Timedelta(days=1)\n df_dummy.loc[:, \"cumulative_counts\"] = 0\n df = pd.concat([df_dummy, df])\n # Obtain new_counts\n df.sort_values([\"fips\", \"timestamp\"], inplace=True)\n df[\"new_counts\"] = df[\"cumulative_counts\"].diff() # 1st discrete difference\n # Handle edge cases where we diffed across fips\n mask = df[\"fips\"] != df[\"fips\"].shift(1)\n df.loc[mask, \"new_counts\"] = np.nan\n df.reset_index(inplace=True, drop=True)\n\n # Final sanity checks\n days_by_fips = df.groupby(\"fips\").count()[\"cumulative_counts\"].unique()\n unique_days = df[\"timestamp\"].unique()\n # each FIPS has same number of rows\n if (len(days_by_fips) > 1) or (days_by_fips[0] != len(unique_days)):\n raise ValueError(\"Differing number of days by fips\")\n min_timestamp = min(unique_days)\n max_timestamp = max(unique_days)\n n_days = (max_timestamp - min_timestamp) / np.timedelta64(1, \"D\") + 1\n if n_days != len(unique_days):\n raise ValueError(\n f\"Not every day between {min_timestamp} and \"\n \"{max_timestamp} is represented.\"\n )\n return df.loc[\n df[\"timestamp\"] >= min_ts,\n [ # Reorder\n \"fips\",\n \"timestamp\",\n \"new_counts\",\n \"cumulative_counts\",\n ],\n ]",
"def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res",
"def load_restaurants(city):\n session = connect_db()\n # Start offset at 0 to return the first 20 results from Yelp API request\n offset = 0\n\n # Get total number of restaurants for this city\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n result_len = 20\n \n # Get all restaurants for a city and load each restaurant into the database\n # Note: Yelp has a limitation of 1000 for accessible results, so get total results\n # if less than 1000 or get only 1000 results back even if there should be more\n while (1000 > offset) and (result_len==20):\n results = search(bearer_token, 'restaurant', city, offset)\n result_len = len(results['businesses'])\n\n # API response returns a SearchResponse object with accessible attributes\n # response.businesses returns a list of business objects with further attributes\n for business in results['businesses']:\n biz = get_business(bearer_token, business['id'])\n try:\n table.insert(biz)\n except DuplicateKeyError:\n print 'DUPS!'\n\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n try:\n yelp_price_level = biz['price']\n except:\n yelp_price_level = None\n try:\n hours_type = biz['hours'][0]['hours_type']\n is_open_now = biz['hours'][0]['is_open_now']\n for item in biz['hours'][0]['open']:\n if item['day'] == 1:\n hour_start_tuesday = item['start']\n hour_end_tuesday = item['end']\n elif item['day'] == 0:\n hour_start_monday = item['start']\n hour_end_monday = item['end']\n elif item['day'] == 2:\n hour_start_wednesday = item['start']\n hour_end_wednesday = item['end']\n elif item['day'] == 3:\n hour_start_thursday = item['start']\n hour_end_thursday = item['end']\n elif item['day'] == 4:\n hour_start_friday = item['start']\n hour_end_friday = item['end']\n elif item['day'] == 5:\n hour_start_saturday = item['start']\n hour_end_saturday = item['end']\n elif item['day'] == 6:\n hour_start_sunday = item['start']\n hour_end_sunday = item['end']\n except:\n hours_type = None\n is_open_now = None\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n restaurant = Restaurant(\n yelp_id = business['id'],\n yelp_rating = biz['rating'],\n yelp_review_count = biz['review_count'],\n name = biz['name'],\n phone = biz['phone'],\n yelp_url = biz['url'],\n yelp_price_level = yelp_price_level,\n latitude = biz['coordinates']['latitude'],\n longitude = biz['coordinates']['longitude'],\n hours_type = hours_type,\n is_open_now = is_open_now,\n hour_start_monday = hour_start_monday,\n hour_end_monday = hour_end_monday,\n hour_start_tuesday = hour_start_tuesday,\n hour_end_tuesday = hour_end_tuesday,\n hour_start_wednesday = hour_start_wednesday,\n hour_end_wednesday = hour_end_wednesday, \n hour_start_thursday = hour_start_thursday,\n hour_end_thursday = hour_end_thursday, \n hour_start_friday = hour_start_friday,\n hour_end_friday = hour_end_friday, \n hour_start_saturday = hour_start_saturday,\n hour_end_saturday = hour_end_saturday, \n hour_start_sunday = hour_start_sunday,\n hour_end_sunday = hour_end_sunday, \n is_closed = biz['is_closed'],\n categories = biz['categories'][0]['alias'],\n display_phone = biz['display_phone'],\n location = ' '.join(biz['location']['display_address']),\n location_city = biz['location']['city'],\n location_state = biz['location']['state'],\n location_zip_code = biz['location']['zip_code'],\n location_city_id = biz['location']['city'] + ', ' + biz['location']['state'])\n session.merge(restaurant)\n # Yelp returns only 20 results each time, so need to offset by 20 while iterating\n offset += 20\n print('current offset: ', offset)\n session.commit()",
"def load_fhwa_records():\n print('--- Importing FHWA DFLTD v.2 records ---')\n for i in tqdm(range(len(tbl_project))):\n prj_id = tbl_project['lng_KeyProject'][i]\n\n expl_in_project = tbl_exploration[\n tbl_exploration.lng_KeyProject == prj_id].index\n for i_exp in expl_in_project:\n expl_id = tbl_exploration['txt_KeyExplorationName'][i_exp]\n\n piles_in_project = tbl_deepfoundation[\n tbl_deepfoundation.lng_KeyProject == prj_id].index\n for i_pile in piles_in_project:\n pile_id = tbl_deepfoundation['lng_KeyDeepFoundation'][i_pile]\n\n tests_for_pile = tbl_loadtest[\n (tbl_loadtest.lng_KeyProject == prj_id) &\n (tbl_loadtest.lng_KeyDeepFoundation == pile_id)\n ].index\n for i_lt in tests_for_pile:\n test_id = tbl_loadtest['lng_KeyLoadTest'][i_lt]\n\n # -- Adding Project Data -------------------------------- #\n if len(piles_in_project) > 1 and len(expl_in_project) < 2:\n wrn = 'Expanded from a project with multiple piles '\\\n 'and/or retests'\n prj = add_loc_proj(i, wrn)\n elif len(piles_in_project) < 2 and len(expl_in_project) > 1:\n wrn = 'Expanded from a project with multiple '\\\n 'explorations'\n prj = add_loc_proj(i, wrn)\n elif len(piles_in_project) > 1 and len(expl_in_project) > 1:\n wrn = 'Expanded from a project with multiple '\\\n 'explorations and multiple piles/retests'\n prj = add_loc_proj(i, wrn)\n else:\n prj = add_loc_proj(i)\n db.session.add(prj)\n\n # -- Adding Exploration Data ---------------------------- #\n exploration = add_expl_data(i_exp, expl_id, prj)\n db.session.add(exploration)\n\n # -- Adding Layer Data ---------------------------------- #\n add_layer_data(prj_id, expl_id, exploration)\n\n # -- Adding Pile Data ----------------------------------- #\n pile = add_pile_data(i_pile, prj_id, pile_id, prj)\n db.session.add(pile)\n\n # -- Adding Load Test Data ------------------------------ #\n load_test = add_load_test_data(i_lt, pile)\n db.session.add(load_test)\n\n # -- Adding Static Test Data ---------------------------- #\n add_static_test_data(prj_id, pile_id, test_id, load_test)\n\n # -- Adding Interpreted Data ---------------------------- #\n add_interp_data(prj_id, pile_id, test_id, load_test)\n\n db.session.commit()",
"def dataLoader(stationDict, startDate, endDate):\n\n # Generate a URL\n url = ('https://waterservices.usgs.gov/nwis/dv/?format=json' +\n # Specify the sites to download\n '&sites=' + stationDict['DatasetExternalID'] +\n # Specify the start date\n '&startDT=' + datetime.strftime( startDate, '%Y-%m-%d' ) +\n #Specify the end data\n '&endDT=' + datetime.strftime( endDate, '%Y-%m-%d' ) +\n # Specify that we want streamflow\n '¶meterCd=00060' +\n # Specify that we want daily means\n '&statCd=00003' +\n # Allow all sites\n '&siteStatus=all' )\n \n # Get the data\n response = requests.get(url)\n\n # Check the status code\n if response.status_code != 200:\n return \n else:\n response = response.json()\n \n # Create a dataframe from the data\n df = pd.DataFrame(response['value']['timeSeries'][0]['values'][0]['value'])\n\n # Set the index to the dateTime index\n df.set_index(pd.DatetimeIndex(pd.to_datetime(df['dateTime'])), inplace = True)\n del df['dateTime'] # Delete the redundant column\n\n # Replace missing data with NaN's\n df['value'].replace(to_replace = '-999999', value = np.nan, inplace = True)\n\n # Convert to numeric\n df['value'] = pd.to_numeric(df['value'])\n \n # Remove any duplicate data in the dataset\n df = df[~df.index.duplicated(keep='last')] # Remove duplicates from the dataset\n df = df[~df.index.isnull()]\n\n # Rename the columns\n df.columns = ['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag', 'USGS | ' + stationDict['DatasetExternalID'] + ' | Streamflow | CFS']\n del df['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag']\n\n # Return the data frame\n return df",
"def example_data():\n disaster1 = Disaster(id=1, disasterNumber=null,ihProgramDeclared=null,iaProgramDeclared=null,paProgramDeclared=null,hmProgramDeclared=null,state='California',declarationDate=null,fyDeclared=1998,disasterType='DR',incidentType='Tornado',title=null,incidentBeginDate=null,incidentEndDate=null,disasterCloseOutDate=null,declaredCountyArea=null,placeCode=null,hash_=null,lastRefresh=null)\n disaster2 = Disaster(id=2, disasterNumber=null,ihProgramDeclared=null,iaProgramDeclared=null,paProgramDeclared=null,hmProgramDeclared=null,state='California',declarationDate=null,fyDeclared=2004,disasterType='DR',incidentType='Flood',title=null,incidentBeginDate=null,incidentEndDate=null,disasterCloseOutDate=null,declaredCountyArea=null,placeCode=null,hash_=null,lastRefresh=null)\n disaster3 = Disaster(id=3, disasterNumber=null,ihProgramDeclared=null,iaProgramDeclared=null,paProgramDeclared=null,hmProgramDeclared=null,state='California',declarationDate=null,fyDeclared=1994,disasterType='DR',incidentType='Earthquake',title=null,incidentBeginDate=null,incidentEndDate=null,disasterCloseOutDate=null,declaredCountyArea=null,placeCode=null,hash_=null,lastRefresh=null)\n\n db.session.add_all([disaster1, disaster2, disaster3])\n db.session.commit()",
"def parser():\n with open('misc/DisasterDeclarationsSummaries.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n\n counter = 0\n\n for row in reader:\n disaster = Disaster()\n disaster.disasterNumber = make_intger(row.get('disasterNumber'))\n disaster.ihProgramDeclared = make_boolean(row.get('ihProgramDeclared'))\n disaster.iaProgramDeclared = make_boolean(row.get('iaProgramDeclared'))\n disaster.paProgramDeclared = make_boolean(row.get('paProgramDeclared'))\n disaster.hmProgramDeclared = make_boolean(row.get('hmProgramDeclared'))\n disaster.state = make_string(row.get('state'))\n disaster.declarationDate = make_datetime(row.get('declarationDate'))\n disaster.fyDeclared = make_intger(row.get('fyDeclared'))\n disaster.disasterType = make_string(row.get('disasterType'))\n disaster.incidentType = make_string(row.get('incidentType'))\n disaster.title = make_string(row.get('title'))\n disaster.incidentBeginDate = make_datetime(row.get('incidentBeginDate'))\n disaster.incidentEndDate = make_datetime(row.get('incidentEndDate'))\n disaster.disasterCloseOutDate = make_datetime(row.get('disasterCloseOutDate'))\n disaster.declaredCountyArea = make_string(row.get('declaredCountyArea'))\n disaster.placeCode = make_intger(row.get('placeCode', None))\n disaster.hash_ = make_string(row.get('hash'))\n disaster.lastRefresh = make_datetime(row.get('lastRefresh'))\n\n db.session.add(disaster)\n db.session.commit()\n\n counter += 1\n print ('Inserted {} items'.format(counter))",
"def get_data_from_db(district):\n data_frames = get_athena_dataframes()\n df_result = copy.copy(data_frames['new_covid_case_summary'])\n df_result = df_result[df_result['district'] == district.lower()]\n df_result = df_result.loc[:, :'deceased']\n df_result.dropna(axis=0, how='any', inplace=True)\n df_result['date'] = pd.to_datetime(df_result['date'])\n df_result['date'] = df_result['date'].apply(lambda x: x.strftime(\"%-m/%-d/%y\"))\n df_result.rename({'total': 'confirmed', 'active': 'hospitalized'}, axis='columns', inplace=True)\n for col in df_result.columns:\n if col in ['hospitalized', 'confirmed', 'recovered', 'deceased']:\n df_result[col] = df_result[col].astype('int64')\n df_result = df_result.fillna(0)\n df_result = df_result.rename(columns={'date': 'index'})\n df_result.drop(['state', 'district'], axis=1, inplace=True)\n df_result = df_result.set_index('index').transpose().reset_index().rename(columns={'index': \"observation\"})\n df_result.insert(0, column=\"region_name\", value=district.lower().replace(',', ''))\n df_result.insert(1, column=\"region_type\", value=\"district\")\n\n return df_result",
"def import_counties():\n\n query = 'INSERT INTO texas_counties(county, region) VALUES(%s,%s)'\n with persistence() as db:\n # create new cursor instance\n cursor = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n for council, counties in COUNCIL_DATA.items():\n for county in counties:\n cursor.execute(query, (county, council))\n db.commit()",
"def get_usda_food_data (connection):\n\n tables = ['usda_food_access_feb2014', 'usda_food_assistance_feb2014',\n 'usda_food_health_feb2014', 'usda_food_insecurity_feb2014',\n 'usda_food_stores_feb2014']\n\n for table in tables:\n if table == tables[0]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_LACCESS_POP10\" AS \"low_access_food_pct10\",\n\"PCT_LACCESS_LOWI10\" AS \"low_access_food_low_inc_pct10\",\n\"PCT_LACCESS_SENIORS10\" AS \"low_access_food_snr_pct10\",\n\"PCT_LACCESS_HHNV10\" AS \"low_access_food_no_car_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[1]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"REDEMP_SNAPS12\" AS \"snap_redemp_per_store_2012\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[2]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"PCT_DIABETES_ADULTS10\" AS \"pct_diabetes_adults_2010\",\n\"PCT_OBESE_ADULTS13\" AS \"pct_obese_adults_2013\",\n\"RECFACPTH12\" AS \"rec_fac_2012\",\n\"NATAMEN\" AS \"ers_nat_amenity_index_1999\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[3]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"FOODINSEC_10_12\" AS \"food_insec_house_pct_10_12\",\n\"VLFOODSEC_10_12\" AS \"very_low_food_insec_house_pct_10_12\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n elif table == tables[4]:\n sql_query = \"\"\"\nSELECT \"FIPS\",\n\"GROCPTH12\" AS \"grocery_pct10\"\nFROM {0};\n \"\"\".format (table)\n # print (sql_query)\n\n if table == tables[0]:\n data = pd.read_sql_query(sql_query, con)\n data.where ((pd.notnull (data)), other=np.nan, inplace=True)\n data = data.dropna (subset=['FIPS'])\n data['FIPS'] = data['FIPS'].apply (lambda x: str(x).zfill (5))\n else:\n data_tmp = pd.read_sql_query(sql_query, con)\n data_tmp.where ((pd.notnull (data_tmp)), other=np.nan, inplace=True)\n data_tmp = data_tmp.dropna (subset=['FIPS'])\n data_tmp['FIPS'] = data_tmp['FIPS'].apply (lambda x: str(x).zfill (5))\n data = pd.merge (data, data_tmp, on=\"FIPS\", how=\"left\")\n\n return (data)",
"def get_rainfall_data(zone):\n zone = zone[1:len(zone)-1]\n rain_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get rainfall data\n query = \"Select rain_date, rain_rainfall From rainfall Left join fire_danger_zone on rainfall.rain_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and rainfall.rain_date >= date('2010-01-01') Order by rainfall.rain_date;\"\n dataframe = pd.read_sql_query(query, conn) \n rainfall = dataframe['rain_rainfall'].values.tolist()\n\n # get dates\n dates = dataframe['rain_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'rain_'+zone\n rain_response[data_name] = rainfall\n rain_response['labels'] = dates\n \n # return data\n response = jsonify(rain_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response",
"def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent",
"def fetch_census_data(self, states):\n print('Fetching census data')\n for table in CensusTable.objects.all():\n api = self.get_series(table.series)\n for variable in table.variables.all():\n estimate = '{}_{}'.format(\n table.code,\n variable.code\n )\n print('>> Fetching {} {} {}'.format(\n table.year,\n table.series,\n estimate\n ))\n for state in tqdm(states):\n self.get_county_estimates_by_state(\n api=api,\n table=table,\n variable=variable,\n estimate=estimate,\n state=state,\n )",
"def import_stations(time_res='hourly',time_format='%Y%m%d%H',\r\n campaign_time=[datetime(2018,12,9), datetime(2018,12,12)],\r\n data_category='air_temperature', station_ids=['00044','00091'],\r\n dbase_dir='dbase', table_dir='tables',Output=True,\r\n memory_save=True):\r\n timeranges=['recent','historical']\r\n #%%load the datasets available at each timestep\r\n dwd_datasets_meta=dwd_datasets_meta=json.load(open(table_dir+\"\\\\dwd_station_meta.txt\"))\r\n #try to get a variable from the category, otherwise use interpolation of higher frequency data\r\n resample_frequency=None\r\n time_res_dbase=time_res\r\n try:\r\n dwd_datasets_meta[time_res][data_category]\r\n except Exception:\r\n if time_res=='daily':\r\n try:\r\n dwd_datasets_meta['hourly'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of hourly data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='hourly'\r\n except Exception:\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, daily_mean of 10_minutes data used instead')\r\n resample_frequency='D'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n if time_res=='hourly':\r\n try: \r\n dwd_datasets_meta['10_minutes'][data_category]\r\n print(data_category,' is not provided at the required resolution, hourly_mean of 10_minutes data used instead')\r\n resample_frequency='H'\r\n time_res_dbase='10_minutes'\r\n except Exception:\r\n print(data_category, 'not available')\r\n sys.exit(1)\r\n \r\n \r\n #%% download from dwd if necessary\r\n #connect to server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #get the mean time of the campaign\r\n date_mean=campaign_time[0]+(campaign_time[1]-campaign_time[0])/2 \r\n # load the inititial ds\r\n dbase_path=dbase_dir+'\\\\db_stations_'+time_res+'_'+data_category+'.nc'\r\n if os.path.exists(dbase_path):\r\n with xr.open_dataset(dbase_path) as dwd_dbase:\r\n dwd_dbase.load()\r\n print('Existing database imported')\r\n #get the non_nans stations\r\n current_stations=np.array(dwd_dbase[list(dwd_dbase.keys())[0]].sel(time=date_mean,method='nearest').dropna('STATIONS_ID').coords['STATIONS_ID'])\r\n else:\r\n print(dbase_path, 'does not exist, we create a new netcdf_file')\r\n dwd_dbase=xr.Dataset()\r\n current_stations=np.array((-9999)).reshape(1)\r\n #change directory on server\r\n for timerange in timeranges:\r\n archive_url='/climate_environment/CDC/observations_germany/climate/'+time_res_dbase+'/'+data_category+'/'+timerange \r\n ftp.cwd(archive_url)\r\n #get the archive\r\n for station_id in station_ids:\r\n #we check whether the station is in the database with this parameter already\r\n if int(station_id) in current_stations:\r\n print('Station', station_id, 'with category', data_category,'in ',timerange,'dbase already')\r\n continue\r\n try:\r\n archive_name=[s for s in ftp.nlst() if station_id in s][0]\r\n except:\r\n print('No ',timerange,'data for station',station_id)\r\n continue\r\n print('Retrieving {}...'.format(archive_name))\r\n retrieved = False\r\n archive = io.BytesIO()\r\n # try to retrieve file\r\n while not retrieved:\r\n try:\r\n ftp.retrbinary(\"RETR \" + archive_name, archive.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(archive_url)\r\n archive.seek(0)\r\n with ZipFile(archive) as myzip:\r\n for f in myzip.infolist():\r\n # This is the data file\r\n #print('zip content:', f.filename)\r\n if f.filename.startswith('produkt_'):\r\n product = io.StringIO(str(myzip.read(f.filename),'utf-8'))\r\n #get dataframe from product \r\n dwd_product=pd.read_csv(product,sep=';',skipinitialspace=True)\r\n #get datetime\r\n dwd_product['time']=pd.to_datetime(dwd_product['MESS_DATUM'],format=time_format) \r\n dwd_product=dwd_product.rename(columns=dwd_datasets_meta[time_res_dbase][data_category])\r\n dwd_product=dwd_product.reset_index()\r\n dwd_product=dwd_product.set_index(['time','STATIONS_ID'])\r\n dwd_product=dwd_product.drop(columns=['MESS_DATUM','quality_level_of_next_columns','end_of_record','index'])\r\n #append to database\r\n dwd_xr=dwd_product.to_xarray()\r\n #replace all values equal to -999 to nan\r\n for data_var in dwd_xr.data_vars:\r\n dwd_xr[data_var]=dwd_xr[data_var].where(dwd_xr[data_var]>-999)\r\n if station_id=='05009':\r\n print('ok') \r\n #only add relevant dates if available memoryis rather small\r\n \r\n if memory_save and timerange=='historical':\r\n dwd_xr=dwd_xr.sel(time=slice(campaign_time[0]-timedelta(days=1),campaign_time[1]+timedelta(days=1)))\r\n #dwd_xr=dwd_xr.squeeze()\r\n \r\n try:\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr])\r\n except Exception as e:\r\n print(e)\r\n print('try merging with compat=override')\r\n dwd_dbase=xr.merge([dwd_dbase,dwd_xr],compat='override')\r\n print(archive_name,' added to database')\r\n #upscale to required temporal resolution\r\n if resample_frequency is not None:\r\n dwd_dbase=dwd_dbase.resample(time=resample_frequency).mean(skipna=True)\r\n print('DWD data upscaled to',time_res,'averages')\r\n if Output==True:\r\n dwd_dbase.to_netcdf(dbase_path)\r\n print('Updated database' ,dbase_path)\r\n return dwd_dbase",
"def import_temp_data(counties):\n for index, row in counties.iterrows():\n station = row[2]\n url = f'https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+0{station}+por+por+maxt+none+mave+5+01+F'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n table = soup.find('table')\n data = pd.read_html(str(table))\n df = data[0]\n df.columns = df.iloc[0]\n df = df.drop([0])\n df = df.iloc[-65:-8, :]\n df = df.rename(columns={'YEAR(S)': 'Year'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n df = df.set_index('Year')\n df = df.dropna(axis=1)\n df = df.replace(to_replace='-----', value=np.nan)\n df = df.astype('float64')\n df = df.fillna(df.mean().round(2))\n df = df.add_suffix('_t')\n name = row[0]\n df['County'] = name\n df.to_csv(f'{name}_avgmaxtemp.csv')\n print(f'Avg. max. temp. data from {name} saved')\n time.sleep(3.14)\n print('Done')",
"def ingest(self):\n self.log.debug('starting the ``get`` method')\n\n dictList = self._create_dictionary_of_ned_d()\n self.primaryIdColumnName = \"primaryId\"\n self.raColName = \"raDeg\"\n self.declColName = \"decDeg\"\n\n tableName = self.dbTableName\n createStatement = u\"\"\"\n CREATE TABLE `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `Method` varchar(150) DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `dist_derived_from_sn` varchar(150) DEFAULT NULL,\n `dist_in_ned_flag` varchar(10) DEFAULT NULL,\n `dist_index_id` mediumint(9) DEFAULT NULL,\n `dist_mod` double DEFAULT NULL,\n `dist_mod_err` double DEFAULT NULL,\n `dist_mpc` double DEFAULT NULL,\n `galaxy_index_id` mediumint(9) DEFAULT NULL,\n `hubble_const` double DEFAULT NULL,\n `lmc_mod` double DEFAULT NULL,\n `notes` varchar(500) DEFAULT NULL,\n `primary_ned_id` varchar(150) DEFAULT NULL,\n `redshift` double DEFAULT NULL,\n `ref` varchar(150) DEFAULT NULL,\n `ref_date` int(11) DEFAULT NULL,\n `master_row` tinyint(4) DEFAULT '0',\n `major_diameter_arcmin` double DEFAULT NULL,\n `ned_notes` varchar(700) DEFAULT NULL,\n `object_type` varchar(100) DEFAULT NULL,\n `redshift_err` double DEFAULT NULL,\n `redshift_quality` varchar(100) DEFAULT NULL,\n `magnitude_filter` varchar(10) DEFAULT NULL,\n `minor_diameter_arcmin` double DEFAULT NULL,\n `morphology` varchar(50) DEFAULT NULL,\n `hierarchy` varchar(50) DEFAULT NULL,\n `galaxy_morphology` varchar(50) DEFAULT NULL,\n `radio_morphology` varchar(50) DEFAULT NULL,\n `activity_type` varchar(50) DEFAULT NULL,\n `in_ned` tinyint(4) DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `eb_v` double DEFAULT NULL,\n `sdss_coverage` TINYINT DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)\n ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;\n CREATE\n VIEW `view_%(tableName)s_master_recorders` AS\n (SELECT \n `%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,\n `%(tableName)s`.`object_type` AS `object_type`,\n `%(tableName)s`.`raDeg` AS `raDeg`,\n `%(tableName)s`.`decDeg` AS `decDeg`,\n `%(tableName)s`.`dist_mpc` AS `dist_mpc`,\n `%(tableName)s`.`dist_mod` AS `dist_mod`,\n `%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,\n `%(tableName)s`.`Method` AS `dist_measurement_method`,\n `%(tableName)s`.`redshift` AS `redshift`,\n `%(tableName)s`.`redshift_err` AS `redshift_err`,\n `%(tableName)s`.`redshift_quality` AS `redshift_quality`,\n `%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,\n `%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,\n `%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,\n `%(tableName)s`.`eb_v` AS `gal_eb_v`,\n `%(tableName)s`.`hierarchy` AS `hierarchy`,\n `%(tableName)s`.`morphology` AS `morphology`,\n `%(tableName)s`.`radio_morphology` AS `radio_morphology`,\n `%(tableName)s`.`activity_type` AS `activity_type`,\n `%(tableName)s`.`ned_notes` AS `ned_notes`,\n `%(tableName)s`.`in_ned` AS `in_ned`,\n `%(tableName)s`.`primaryId` AS `primaryId`\n FROM\n `%(tableName)s`\n WHERE\n (`%(tableName)s`.`master_row` = 1));\n \"\"\" % locals()\n\n self.add_data_to_database_table(\n dictList=dictList,\n createStatement=createStatement\n )\n\n self._clean_up_columns()\n self._get_metadata_for_galaxies()\n self._update_sdss_coverage()\n\n self.log.debug('completed the ``get`` method')\n return None",
"def download_data(self) -> None: # coverage: ignore\n\n navaids = []\n c = requests.get(f\"{base_url}/earth_fix.dat\")\n\n for line in c.iter_lines():\n\n line = line.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) < 3 or line[0] == \"#\":\n continue\n\n # Start with valid 2 digit latitude -45. or 52.\n if not ((line[0] == \"-\" and line[3] == \".\") or line[2] == \".\"):\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example line:\n # 30.580372 -094.384169 FAREL\n fields = line.split()\n navaids.append(\n Navaid(\n fields[2],\n \"FIX\",\n float(fields[0]),\n float(fields[1]),\n None,\n None,\n None,\n None,\n )\n )\n\n c = requests.get(f\"{base_url}/earth_nav.dat\")\n\n for line in c.iter_lines():\n\n line = line.decode(encoding=\"ascii\", errors=\"ignore\").strip()\n\n # Skip empty lines or comments\n if len(line) == 0 or line[0] == \"#\":\n continue\n\n # Data line => Process fields of this record, separated by a comma\n # Example lines:\n # 2 58.61466599 125.42666626 451 522 30 0.0 A Aldan NDB\n # 3 31.26894444 -085.72630556 334 11120 40 -3.0 OZR CAIRNS VOR-DME\n # type lat lon elev freq ? var id desc\n # 0 1 2 3 4 5 6 7 8\n\n fields = line.split()\n\n # Valid line starts with integers\n if not fields[0].isdigit():\n continue # Next line\n\n # Get code for type of navaid\n itype = int(fields[0])\n\n # Type names\n wptypedict = {\n 2: \"NDB\",\n 3: \"VOR\",\n 4: \"ILS\",\n 5: \"LOC\",\n 6: \"GS\",\n 7: \"OM\",\n 8: \"MM\",\n 9: \"IM\",\n 12: \"DME\",\n 13: \"TACAN\",\n }\n\n # Type code never larger than 20\n if itype not in list(wptypedict.keys()):\n continue # Next line\n\n wptype = wptypedict[itype]\n\n # Select types to read\n if wptype not in [\"NDB\", \"VOR\", \"ILS\", \"GS\", \"DME\", \"TACAN\"]:\n continue # Next line\n\n # Find description\n try:\n idesc = line.index(fields[7]) + len(fields[7])\n description: Optional[str] = line[idesc:].strip().upper()\n except Exception:\n description = None\n\n navaids.append(\n Navaid(\n fields[7],\n wptype,\n float(fields[1]),\n float(fields[2]),\n float(fields[3][1:])\n if fields[3].startswith(\"0-\")\n else float(fields[3]),\n float(fields[4])\n if wptype == \"NDB\"\n else float(fields[4]) / 100,\n float(fields[6])\n if wptype in [\"VOR\", \"NDB\", \"ILS\", \"GS\"]\n else None,\n description,\n )\n )\n\n self._data = pd.DataFrame.from_records(\n navaids, columns=NavaidTuple._fields\n )\n\n self._data.to_pickle(self.cache_dir / \"traffic_navaid.pkl\")",
"async def fetch_resulting_datasets(db_pool, query_parameters, misses=False, accessible_missing=None):\n async with db_pool.acquire(timeout=180) as connection:\n datasets = []\n try: \n if misses:\n if accessible_missing:\n query = f\"\"\"SELECT id as \"datasetId\", access_type as \"accessType\", stable_id as \"stableId\"\n FROM beacon_dataset\n WHERE id IN ({create_prepstmt_variables(len(accessible_missing))});\n \"\"\"\n # LOG.debug(f\"QUERY to fetch accessible missing info: {query}\")\n statement = await connection.prepare(query)\n db_response = await statement.fetch(*accessible_missing)\n else:\n return []\n else:\n query = f\"\"\"SELECT * FROM {DB_SCHEMA}.query_data_summary_response({create_prepstmt_variables(13)});\"\"\"\n LOG.debug(f\"QUERY to fetch hits: {query}\")\n statement = await connection.prepare(query)\n db_response = await statement.fetch(*query_parameters) \n\n for record in list(db_response):\n processed = transform_misses(record) if misses else await transform_record(db_pool, record)\n datasets.append(processed)\n return datasets\n except Exception as e:\n raise BeaconServerError(f'Query resulting datasets DB error: {e}')",
"def pull_data_from_geodb(sids, verbose=True):\n api_key = \"f125f4c130e61d9f4ad5874aadfe07ff\"\n geodb = dpf.data(api_key).geo\n\n df_a = pd.DataFrame() # for annual features\n df_m = pd.DataFrame() # for monthly features\n\n # gets meta data for each feature from its series id, \n # then uses meta data to pull all of the feature's regional data\n for sid in sids:\n print(f\"Series ID: {sid}\")\n\n print(\" > Collecting meta data...\")\n meta = geodb['meta'](series_id=sid).iloc[0, :]\n\n for col in ['min_date', 'max_date']:\n meta[col] = pd.to_datetime(meta[col]).date()\n\n print(\" > Pulling dataframe...\")\n data = geodb['data'](\n series_group=meta['series_group'],\n date=meta['max_date'],\n start_date=meta['min_date'],\n region_type=meta['region_type'],\n units=meta['units'],\n frequency=meta['frequency'],\n season=meta['season']\n )\n # swap series id for its feature name (change colname later)\n data['series_id'] = meta['title']\n\n if verbose:\n print(f\" > Title: {meta['title']}\")\n print(f\" > Min date: {meta['min_date']}\")\n print(f\" > Max date: {meta['max_date']}\")\n print(f\" > Units: {meta['units']}\")\n print(f\" > Season: {meta['season']}\")\n print(f\" > Frequency: {meta['frequency']}\")\n print(f\" > Region type: {meta['region_type']}\")\n\n print(f\" > Appending to df_{meta['frequency']}...\")\n if meta['frequency'] == 'a':\n df_a = df_a.append(data)\n else:\n df_m = df_m.append(data)\n continue\n\n df_a = Wrangler.transform_fresh_columns(df_a)\n df_m = Wrangler.transform_fresh_columns(df_m)\n\n return df_a, df_m",
"def ingest(self,check=True):\n\t\tdata=self.data_all\n\t\tpubdate=time_utils.parseISO(self.api.last_update).date()\n\t\t\n\t\tcounter=0\n\t\tfor item in data:\n\t\t\tareacode=item['areaCode']\n\t\t\tdatestring=item['specimenDate']\n\t\t\t_date=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=_date,areacode=areacode)\n\t\t\trow.areaname=item['areaName']\n\t\t\tdaily=item['newCasesBySpecimenDate']\n\t\t\ttotal=item['cumCasesBySpecimenDate']\n\t\t\t\n\t\t\t#log.debug(f'{row.areaname}: {datestring}')\t\t\t\n\t\t\tif created:\n\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\trow.save()\n\t\t\t\t\n\t\t\t\tif daily:\n\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\tlog.debug(f'date:{_date} lag: {lag} daily:{daily}')\n\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\tdrow.add_cases=daily #if a new daily case, assume no prior report\n\t\t\t\t\tdrow.save()\n\t\t\t\n\t\t\tif not created:\n\t\t\t\texisting_daily=row.dailyLabConfirmedCases\n\t\t\t\texisting_total=row.totalLabConfirmedCases\n\t\t\t\tif daily is not None:\n\t\t\t\t\tif existing_daily !=daily or existing_total!=total:\n\t\t\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\t\t\trow.save()\n\t\t\t\t\t\tif existing_daily !=daily:\n\t\t\t\t\t\t\tlog.debug(f'Updating {row.areaname} on {datestring}: Daily: {existing_daily} to {daily} Total: {existing_total} to {total}')\n\t\t\t\t\t\t\tif existing_daily:\n\t\t\t\t\t\t\t\t_increase=daily-existing_daily\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t_increase=daily\n\t\t\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\t\t\tdrow.add_cases=_increase\n\t\t\t\t\t\t\tdrow.save()\n\t\t\t\t\t\n\t\t\tcounter+=1\n\t\t\tif counter%1000==0:\n\t\t\t\tlog.info(f'Processing row {counter}')\n\t\tlog.info(f'Processed: {counter} rows')\n\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_update',self.edition)\n\t\telse:\n\t\t\tlog.info('Latest update not updated')",
"def preprocessing(self):\n print(\"This may take a while, please grab a coffee. Average wait time: 2 - 6 mins.\")\n print(\"Loading data... \")\n df = ExternalDataRetrieval().get_data()\n\n print(\"Preprocessing data... \")\n\n amino_df = pd.DataFrame()\n # Set column names for zinc content dataframe\n zcolumns = ['value', 'group']\n # Set column names for food groups dataframe\n fcolumns = ['ID', 'food', 'group', 'manufacturer']\n # Declare zinc content dataframe\n zinc_df = pd.DataFrame(columns=zcolumns)\n # Declare food group dataframe\n food_df = pd.DataFrame(columns=fcolumns)\n # Doing this one amino acids type at a time.\n for n in AMINO_LIST:\n food = []\n # nutrients components of the food type is further nested in 'nutrients', which its components are further\n # nested\n for i, items in enumerate(df['nutrients']):\n # Iterate through the nutrient type to obtain necessary info.\n # For this project, there are many redundant data in there.\n f_flag = False\n # Only need to set the flag to activate the zinc check for one amino acid loop\n if n == AMINO_LIST[0]:\n z_flag = False\n for item in items:\n # Check to see if this nutrient type is one of the amino acids\n if item.get(\"name\") == n and item.get(\"value\") > 0:\n # If so, add the food type to the amino acid type array\n food.append(df['name'][i]['long'])\n f_flag = True\n # Check to see if this nutrient type is Zinc, only need to do this for one amino acid loop.\n if item.get(\"name\") == Z and n == AMINO_LIST[0]:\n # If so, gets its zinc content value and the food group it is in.\n zinc_df.loc[i] = [item.get(\"value\"), df['group'][i]]\n z_flag = True\n if f_flag and z_flag:\n break\n\n # Build the food group data dataframe one food at a time, only need to do this for one amino acid loop.\n if n == AMINO_LIST[0]:\n food_df.loc[i] = [df['meta'][i]['ndb_no'], df['name']\n [i]['long'], df['group'][i], df['manufacturer'][i]]\n\n # Assemble the amino acid type array in to nutrient dataframe\n fd = pd.DataFrame({n: food})\n # Since the length of each columns varies (amino acid food types appearance in food types varies),\n # there are many NaN in the dataframe as a result. We need to drop the NaN\n fd = fd.dropna()\n amino_df = pd.concat([amino_df, fd], axis=1, ignore_index=True)\n # Add column names to the nutrient dataframe\n amino_df.columns = AMINO_LIST\n print(\"Good news, preprocessing completed successfully! \")\n return amino_df, zinc_df, food_df",
"def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)",
"def setup_sample_data(no_of_records):\n rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'}\n for counter in range(0, no_of_records)]\n return rows_in_database",
"def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')",
"def data_fetch(self, curs, splat_table, mcl_table, crs_no=0, output_fname=None):\n\t\tgene_no2gene_id = get_gene_no2gene_id(curs)\t#08-31-05\n\t\toutf = open(output_fname, 'w')\t#08-31-05\n\t\toutf.write(\"r:=[\")\t#08-31-05\n\t\t\n\t\tmcl_id2cluster_dstructure = {}\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tsys.stderr.write(\"Getting the basic information for all clusters...\\n\")\n\t\tcurs.execute(\"DECLARE crs%s CURSOR FOR select m.mcl_id, m.vertex_set, m.connectivity, 0,\\\n\t\t\tm.recurrence_array, s.edge_set, s.connectivity, m.cooccurrent_cluster_id from %s m, %s s where \\\n\t\t\tm.splat_id=s.splat_id\"\\\n\t\t\t%(crs_no, mcl_table, splat_table))\t#06-20-05\tconnectivity_original faked to be 0\n\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\trows = curs.fetchall()\n\t\twhile rows:\n\t\t\tfor row in rows:\n\t\t\t\tunit = cluster_dstructure()\n\t\t\t\tunit.cluster_id = row[0]\n\t\t\t\tvertex_set = row[1][1:-1].split(',')\n\t\t\t\tunit.vertex_set = map(int, vertex_set)\n\t\t\t\tunit.connectivity = row[2]\n\t\t\t\tunit.connectivity_original = row[3]\n\t\t\t\trecurrence_array = row[4][1:-1].split(',')\n\t\t\t\tunit.recurrence_array = map(float, recurrence_array)\n\t\t\t\tunit.edge_set = parse_splat_table_edge_set(row[5])\n\t\t\t\tunit.splat_connectivity = row[6]\n\t\t\t\tunit.cooccurrent_cluster_id = row[7]\n\t\t\t\tunit.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, unit.vertex_set)\n\t\t\t\tunit.go_no2information = self.get_information_of_go_functions(curs, \\\n\t\t\t\t\tunit.go_no2association_genes, len(unit.vertex_set), no_of_total_genes, p_value_cut_off=0.05)\t#jasmine wants to cut some go-nos.\n\t\t\t\tunit.edge_cor_2d_list, unit.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, unit.edge_set)\n\t\t\t\t\n\t\t\t\tstr_tmp = self.return_string_form_of_cluster_dstructure(unit, gene_no2gene_id)\t#08-31-05\n\t\t\t\toutf.write(\"%s,\"%str_tmp)\n\t\t\t\t#mcl_id2cluster_dstructure[unit.cluster_id] = unit\n\t\t\t\t\"\"\"\n\t\t\t\torder_1st_id, order_2nd_id = map(int, unit.cooccurrent_cluster_id.split('.'))\n\t\t\t\tif order_1st_id not in self.order_1st_id2all_clusters:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id] = {}\n\t\t\t\tif order_2nd_id not in self.order_1st_id2all_clusters[order_1st_id]:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id] = []\n\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id].append(unit)\n\t\t\t\t\"\"\"\n\t\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\t\trows = curs.fetchall()\n\t\toutf.write(\"[]]:\")\t#08-31-05, 09-01-05 add the last blank []\n\t\tdel outf\n\t\tsys.stderr.write(\"Done.\\n\")\n\t\treturn mcl_id2cluster_dstructure",
"def run(params, conn, outputfile):\n date_begin = parse(params['date_begin'] + ' 00:00:00 +0700')\n date_end = parse(params['date_end'] + ' 23:59:59 +0700')\n domain_id = params['domain_id']\n authority_ids = params['authority_ids']\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19', domain_id)\n main_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19-followup', domain_id)\n follow_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n line_list = join(main_data, follow_data)\n tabular(line_list)\n\n if len(line_list) == 0:\n return False\n\n df = pandas.DataFrame(line_list)\n df['date'] = df['date'].dt.tz_convert(tz)\n df['date'] = df['date'].dt.strftime('%d/%m/%Y %H:%M')\n writer = pandas.ExcelWriter(outputfile)\n df.to_excel(writer, 'covid-19', columns=['report_id', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'date', 'latitude', 'longitude',\n '01', '02', '03', '04', '05', '06',\n '07', '08', '09', '10', '11', '12', '13', '14'], index=False)\n ldf = pandas.DataFrame(flat(main_data))\n ldf['date'] = ldf['date'].dt.tz_convert(tz)\n ldf.sort_values(by=['date'], inplace=True)\n ldf['date'] = ldf['date'].dt.strftime('%d/%m/%Y %H:%M')\n\n def is_followup(row):\n return row['report_id'] != row['group_id']\n\n ldf['followup'] = ldf.apply(is_followup, axis=1)\n ldf.to_excel(writer,\n 'all',\n columns=['report_id', 'group_id', 'followup', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'total_times', 'activity_other',\n 'date', 'latitude', 'longitude'],\n index=False)\n writer.save()\n return True",
"def import_precip_data(counties):\n for index, row in counties.iterrows():\n station = row[2]\n url = f'https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+0{station}+por+por+pcpn+none+msum+5+01+F'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n table = soup.find('table')\n data = pd.read_html(str(table))\n df = data[0]\n df.columns = df.iloc[0]\n df = df.drop([0])\n df = df.iloc[-65:-8, :]\n df = df.rename(columns={'YEAR(S)': 'Year'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n df = df.set_index('Year')\n df = df.dropna(axis=1)\n df = df.replace(to_replace='-----', value=np.nan)\n df = df.astype('float64')\n df = df.fillna(df.mean().round(2))\n df = df.add_suffix('_p')\n name = row[0]\n df['County'] = name\n df.to_csv(f'{name}_precip.csv')\n print(f'Precipitation data from {name} saved')\n time.sleep(3.14)\n print('Done')",
"def get_311_data():\n # reading in data and saving to separate DFs\n source = spark.read.csv(\"source.csv\", sep=\",\", header=True, inferSchema=True)\n case = spark.read.csv(\"case.csv\", sep=\",\", header=True, inferSchema=True)\n dept = spark.read.csv(\"dept.csv\", sep=\",\", header=True, inferSchema=True)\n\n # returning DFs\n return source, case, dept",
"def mover_get_data(lfns,\n path,\n sitename,\n queuename,\n stageinTries,\n inputpoolfcstring=\"xmlcatalog_file:PoolFileCatalog.xml\",\n ub=\"outdated\", # to be removed\n dsname=\"\",\n dsdict={},\n rucio_dataset_dictionary={},\n guids=[],\n analysisJob=False,\n usect=True,\n pinitdir=\"\",\n proxycheck=True,\n spsetup=\"\",\n tokens=[],\n userid=\"\",\n inputDir=\"\",\n jobId=None,\n jobDefId=\"\",\n access_dict=None,\n scope_dict=None,\n workDir=\"\",\n DN=None,\n dbh=None,\n jobPars=\"\",\n cmtconfig=\"\",\n filesizeIn=[],\n checksumIn=[],\n transferType=None,\n experiment=\"\",\n eventService=False,\n sourceSite=\"\"):\n\n tolog(\"Mover get data started\")\n\n statusPFCTurl = None\n pilotErrorDiag = \"\"\n\n # FAX counters (will be reported in jobMetrics; only relevant when FAX has been activated after a stage-in failure)\n N_filesWithoutFAX = 0\n N_filesWithFAX = 0\n bytesWithoutFAX = 0L\n bytesWithFAX = 0L\n\n # FAX control variable, if FAX is used as primary site mover in combination with direct I/O\n usedFAXandDirectIO = False\n\n # The FAX variables above will be stored in a dictionary, to be returned by this function\n FAX_dictionary = {}\n\n # Is the DBRelease file available locally?\n DBReleaseIsAvailable = handleDBRelease(dbh, lfns, jobPars, path)\n\n # Should stage-in be aborted? (if there are only locally available DBRelease files in the stage-in list)\n if abortStageIn(dbh, lfns, DBReleaseIsAvailable):\n return 0, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Setup the dictionary necessary for all instrumentation\n report = getInitialTracingReport(userid, sitename, dsname, \"get_sm\", analysisJob, jobId, jobDefId, DN)\n\n if stageinTries != 0:\n get_RETRY = min(stageinTries, MAX_NUMBER_OF_RETRIES)\n else:\n get_RETRY = MAX_RETRY\n get_TIMEOUT = 5*3600/get_RETRY\n\n fail = 0\n guidfname = {}\n error = PilotErrors()\n\n region = readpar('region')\n\n # Space tokens currently not used for input files\n # # check if there is are any space tokens\n # _token = getProperSpaceTokenList(token, listSEs, len(lfns))\n\n # Select the correct mover\n copycmd, setup = getCopytool(mode=\"get\")\n\n # Get the sitemover object corresponding to the default copy command\n sitemover = getSiteMover(copycmd, setup)\n\n # Get the experiment object\n thisExperiment = getExperiment(experiment)\n\n # Get the name for the PFC file\n _path = path\n if eventService:\n # Update the path (create the PFC in one level above the payload workdir)\n path = os.path.abspath(os.path.join(path, '..'))\n pfc_name = getPFCName(path, inputpoolfcstring)\n # done with the event server modification (related to the PFC generation), reset the path again\n path = _path\n\n # Build the file info dictionary (use the filesize and checksum from the dispatcher if possible) and create the PFC\n # Format: fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, copytool)\n # replicas_dic[guid1] = [ replica1, .. ] where replicaN is an object of class replica\n ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic = \\\n getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, path, dbh, DBReleaseIsAvailable,\\\n scope_dict, pfc_name=pfc_name, filesizeIn=filesizeIn, checksumIn=checksumIn, thisExperiment=thisExperiment)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Until the Mover PFC file is no longer needed, call the TURL based PFC \"PoolFileCatalogTURL.xml\"\n pfc_name_turl = pfc_name.replace(\".xml\", \"TURL.xml\")\n\n # Create a SURL to space token dictionary\n tokens_dictionary = getSurlTokenDictionary(lfns, tokens)\n\n # Create a TURL based PFC if necessary/requested (i.e. if copy tool should not be used [useCT=False] and\n # if oldPrefix and newPrefix are not already set in copysetup [useSetPrefixes=False])\n ec, pilotErrorDiag, createdPFCTURL, usect = PFC4TURLs(analysisJob, transferType, fileInfoDic, pfc_name_turl, sitemover, sitename, usect, dsdict, eventService, tokens_dictionary, sitename, sourceSite, lfns)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Correct the total file size for the DBRelease file if necessary\n totalFileSize = correctTotalFileSize(totalFileSize, fileInfoDic, lfns, dbh, DBReleaseIsAvailable)\n\n # Only bother with the size checks if the copy tool is to be used (non-direct access mode)\n if usect:\n # Get a proper maxinputsize from schedconfig/default \n _maxinputsize = getMaxInputSize()\n\n # Check the total input file size\n ec, pilotErrorDiag = verifyInputFileSize(totalFileSize, _maxinputsize, error)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Do we have enough local space to stage in all data and run the job?\n ec, pilotErrorDiag = verifyAvailableSpace(sitemover, totalFileSize, path, error)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Get the replica dictionary from file (used when the primary replica can not be staged due to some temporary error)\n replica_dictionary = getReplicaDictionaryFile(path)\n\n # file counters\n N_files_on_tape = 0\n N_root_files = 0\n N_non_root_files = 0\n\n # If FAX is used as a primary site mover then set the default FAX mode to true, otherwise to false (normal mode)\n if copycmd == \"fax\":\n usedFAXMode = True\n else:\n usedFAXMode = False\n\n # Use isOneByOneFileTransfer() to determine if files should be transferred one by one or all at once\n if not sitemover.isOneByOneFileTransfer():\n\n # Note: this mode is used by the aria2c site mover only\n # Normal stage-in is below\n\n tolog(\"All files will be transferred at once\")\n\n # Extract the file info for the first file in the dictionary\n guid, gpfn, lfn, fsize, fchecksum, filetype, copytool = extractInputFileInfo(fileInfoDic[0], lfns)\n file_access = getFileAccess(access_dict, lfn)\n dsname = getDataset(lfn, dsdict)\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText = sitemover_get_all_data(sitemover, error, gpfn, lfn, path, fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect, pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT, dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId, workDir=workDir, cmtconfig=cmtconfig, lfns=lfns,\\\n experiment=experiment, replicas_dic=replicas_dic, dsdict=dsdict, scope_dict=scope_dict)\n if s != 0:\n tolog('!!WARNING!!2999!! Failed during stage-in of multiple files: %s' % (error.getErrorStr(s)))\n tolog(\"Exit code: %s\" % (s))\n fail = s\n\n # Normal stage-in (one by one file transfers)\n if sitemover.isOneByOneFileTransfer() or fail != 0:\n \n tolog(\"Files will be transferred one by one\")\n\n # Reset any previous failure\n fail = 0\n\n # Loop over all files in the file info dictionary\n number_of_files = len(fileInfoDic.keys())\n tolog(\"Will process %d file(s)\" % (number_of_files))\n for nr in range(number_of_files):\n # Extract the file info from the dictionary\n guid, gpfn, lfn, fsize, fchecksum, filetype, copytool = extractInputFileInfo(fileInfoDic[nr], lfns)\n\n # Has the copycmd/copytool changed? (E.g. due to FAX) If so, update the sitemover object\n if copytool != copycmd:\n copycmd = copytool\n # Get the sitemover object corresponding to the new copy command\n sitemover = getSiteMover(copycmd, setup)\n tolog(\"Site mover object updated since copytool has changed\")\n\n # Update the dataset name\n dsname = getDataset(lfn, dsdict)\n proper_dsname = getDataset(lfn, rucio_dataset_dictionary)\n scope = getFileScope(scope_dict, lfn)\n\n # Update the tracing report with the proper container/dataset name\n report = updateReport(report, gpfn, proper_dsname, fsize, sitemover)\n report['scope'] = scope\n\n # The DBRelease file might already have been handled, go to next file\n if isDBReleaseFile(dbh, lfn) and DBReleaseIsAvailable:\n updateFileState(lfn, workDir, jobId, mode=\"transfer_mode\", state=\"no_transfer\", type=\"input\")\n guidfname[guid] = lfn # needed for verification below\n continue\n else:\n tolog(\"(Not a DBRelease file)\")\n\n tolog(\"Mover is preparing to copy file %d/%d (lfn: %s guid: %s dsname: %s)\" % (nr+1, number_of_files, lfn, guid, dsname))\n tolog('Copying %s to %s (file catalog checksum: \\\"%s\\\", fsize: %s) using %s (%s)' %\\\n (gpfn, path, fchecksum, fsize, sitemover.getID(), sitemover.getSetup()))\n\n # Get the number of replica retries\n get_RETRY_replicas = getNumberOfReplicaRetries(createdPFCTURL, replica_dictionary, guid)\n\n file_access = getFileAccess(access_dict, lfn)\n\n # Loop over get function to allow for multiple get attempts for a file\n will_use_direct_io = False\n get_attempt = 0\n\n #get_RETRY = 1 #2 #PN\n while get_attempt < get_RETRY:\n if get_attempt > 0:\n _rest = 5*60\n tolog(\"(Waiting %d seconds before next stage-in attempt)\" % (_rest))\n sleep(_rest)\n tolog(\"Get attempt %d/%d\" % (get_attempt + 1, get_RETRY))\n replica_number = 0\n replica_transferred = False\n s = 1\n\n # Loop over replicas\n while s != 0 and replica_number < get_RETRY_replicas:\n # Grab the gpfn from the replicas dictionary in case alternative replica stage-in is allowed\n gpfn = getAlternativeReplica(gpfn, guid, replica_number, createdPFCTURL, replica_dictionary)\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText, N_files_on_tape, N_root_files, N_non_root_files, replica_transferred, will_use_direct_io = sitemover_get_data(sitemover, error,\\\n get_RETRY, get_RETRY_replicas, get_attempt,\\\n replica_number, N_files_on_tape, N_root_files,\\\n N_non_root_files, gpfn, lfn, path,\\\n fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect,\\\n pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT,\\\n dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId,\\\n workDir=workDir, cmtconfig=cmtconfig,\\\n experiment=experiment, scope_dict=scope_dict,\\\n sourceSite=sourceSite)\n # Get out of the multiple replica loop\n if replica_transferred:\n break\n\n # Increase the replica attempt counter in case the previous replica could not be transferred\n replica_number += 1\n\n # Get out of the multiple get attempt loop\n if replica_transferred:\n break\n\n # Increase the get attempt counter in case of failure to transfer the file\n get_attempt += 1\n\n # Increase the successful file transfer counter (used only when reporting FAX transfers)\n if s == 0:\n # note the special case if FAX is the primary site mover (normally FAX is the fallback)\n if sitemover.copyCommand == \"fax\":\n N_filesWithFAX += 1\n bytesWithFAX += long(fsize)\n else:\n # Normal case\n N_filesWithoutFAX += 1\n bytesWithoutFAX += long(fsize)\n\n if s != 0:\n # Normal stage-in failed, now try with FAX if possible\n if error.isPilotFAXErrorCode(s):\n if isFAXAllowed(filetype, gpfn) and transferType != \"fax\" and sitemover.copyCommand != \"fax\": # no point in trying to fallback to fax if the fax transfer above failed\n tolog(\"Normal stage-in failed, will attempt to use FAX\")\n usedFAXMode = True\n\n # Get the FAX site mover\n old_sitemover = sitemover\n sitemover = getSiteMover(\"fax\", \"\")\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText, N_files_on_tape, N_root_files, N_non_root_files, replica_transferred, will_use_direct_io = sitemover_get_data(sitemover, error,\\\n get_RETRY, get_RETRY_replicas, get_attempt, replica_number,\\\n N_files_on_tape, N_root_files, N_non_root_files,\\\n gpfn, lfn, path,\\\n fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect,\\\n pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT,\\\n dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId,\\\n workDir=workDir, cmtconfig=cmtconfig, experiment=experiment)\n if replica_transferred:\n tolog(\"FAX site mover managed to transfer file from remote site (resetting error code to zero)\")\n pilotErrorDiag = \"\"\n s = 0\n\n # Increase the successful FAX transfer counter\n N_filesWithFAX += 1\n bytesWithFAX += long(fsize)\n else:\n tolog(\"FAX site mover also failed to transfer file from remote site, giving up\")\n\n # restore the old sitemover\n del sitemover\n sitemover = old_sitemover\n else:\n tolog(\"(Not an error code eligible for FAX fail-over)\")\n\n if s != 0:\n tolog('!!FAILED!!2999!! Failed to transfer %s: %s (%s)' % (os.path.basename(gpfn), s, error.getErrorStr(s)))\n tolog(\"Exit code: %s\" % (s))\n\n # report corrupt file to consistency server if needed\n if s == error.ERR_GETADMISMATCH or s == error.ERR_GETMD5MISMATCH or s == error.ERR_GETWRONGSIZE or s == error.ERR_NOSUCHFILE:\n reportFileCorruption(gpfn, sitemover)\n\n # exception for object stores\n if (gpfn.startswith(\"s3:\") or 'objectstore' in gpfn) and '.log.tgz' in gpfn:\n tolog(\"!!FAILED!!2999!! Failed to transfer a log file from S3 objectstore. Will skip it and continue the job.\")\n else:\n fail = s\n break\n\n # Build the dictionary used to create the PFC for the TRF\n # In the case of FAX, use the global paths if direct access is to be used for the particlar file\n if usedFAXMode and will_use_direct_io:\n # The site mover needed here is the FAX site mover since the global file path methods are defined there only\n old_sitemover = sitemover\n sitemover = getSiteMover(\"fax\", \"\")\n guidfname[guid] = sitemover.findGlobalFilePath(lfn, dsname, sitename, sourceSite)\n\n # Restore the old sitemover\n del sitemover\n sitemover = old_sitemover\n\n # If FAX is used as a primary site mover, in combination with direct access, set the usedFAXandDirectIO flag\n # this will later be used to update the run command (e.g. --lfcHost is not needed etc)\n if copycmd == \"fax\":\n usedFAXandDirectIO = True\n else:\n guidfname[guid] = lfn # local_file_name\n\n if fail == 0:\n # Make sure the PFC has the correct number of files\n fail, pilotErrorDiag = verifyPFCIntegrity(guidfname, lfns, dbh, DBReleaseIsAvailable, error)\n\n # Now that the Mover PFC file is no longer needed, back it up and rename the TURL based PFC if it exists\n # (the original PFC is no longer needed. Move it away, and then create the PFC for the trf/runAthena)\n # backupPFC4Mover(pfc_name)\n\n # Create a standard PFC with SURLs if needed (basically this is default)\n # note: if FAX was used as a primary site mover in combination with direct I/O, then the SURLs will actually be TURLs\n # but there is no need to use the special TURL creation method PFC4TURL used above (FAX will have returned the TURLs instead)\n createStandardPFC4TRF(createdPFCTURL, pfc_name_turl, pfc_name, guidfname)\n\n tolog(\"Number of identified root files : %d\" % (N_root_files))\n tolog(\"Number of transferred non-root files: %d\" % (N_non_root_files))\n\n if usedFAXMode:\n tolog(\"Number of files without FAX : %d (normal transfers)\" % (N_filesWithoutFAX))\n tolog(\"Number of files with FAX : %d (successful FAX transfers)\" % (N_filesWithFAX))\n tolog(\"Bytes without FAX : %d (normal transfers)\" % (bytesWithoutFAX))\n tolog(\"Bytes with FAX : %d (successful FAX transfers)\" % (bytesWithFAX))\n\n if N_files_on_tape > 0:\n tolog(\"!!WARNING!!2999!! Number of skipped files: %d (not staged)\" % (N_files_on_tape))\n if N_root_files == 0:\n # This should only happen for user jobs\n tolog(\"Mover get_data failed since no root files could be transferred\")\n fail = error.ERR_NOSTAGEDFILES\n else:\n tolog(\"Mover get_data finished (partial)\")\n else:\n if fail == 0:\n tolog(\"Get successful\")\n tolog(\"Mover get_data finished\")\n else:\n tolog(\"Mover get_data finished (failed)\")\n tolog(\"Will return exit code = %d, pilotErrorDiag = %s\" % (fail, pilotErrorDiag)) \n\n # Now populate the FAX dictionary before finishing\n FAX_dictionary = getFAXDictionary(N_filesWithoutFAX, N_filesWithFAX, bytesWithoutFAX, bytesWithFAX, usedFAXandDirectIO)\n\n return fail, pilotErrorDiag, statusPFCTurl, FAX_dictionary"
]
| [
"0.6040113",
"0.5981123",
"0.59303576",
"0.58975035",
"0.5869871",
"0.57901585",
"0.576051",
"0.5668672",
"0.56307334",
"0.5579675",
"0.554138",
"0.55237526",
"0.55177534",
"0.5482741",
"0.544993",
"0.54196256",
"0.53621525",
"0.5353647",
"0.5353226",
"0.53431803",
"0.533402",
"0.533205",
"0.5272234",
"0.5256264",
"0.52555424",
"0.52370554",
"0.5233185",
"0.5225165",
"0.52202725",
"0.5217287"
]
| 0.8253348 | 0 |
Load States into database from a text file. | def load_states():
print "States and Territories"
State.query.delete()
for row in open("data/states_and_territories.txt"):
row = row.rstrip()
# can't seem to get rid of "\r" character other than doing a .split
piped_rows = row.split("\r")
for i in piped_rows:
state_info = i.split("|")
state_name = state_info[0]
state_code = state_info[1]
state = State(state_name=state_name, state_code=state_code)
db.session.add(state)
db.session.commit()
print "States seeded" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Load(self, filename):\n\n self.sm['state'] = self.AddState\n self.sm['condition'] = self.AddCondition\n exec(open(filename).read(), self.sm)\n self.name = self.sm['name']\n if not self.name.isalnum():\n raise Exception(\"State machine name must consist of only alphanumeric\"\n \"characters.\")\n self.comment = self.sm['comment']",
"def read_from_file(self, flo):\n # get initial states\n match = re.match(r\"Initial State\\:\\s*\\{(.*)\\}\", flo.readline())\n self.initial_state = int(match.groups()[0])\n\n # get final states\n match = re.match(r\"Final States\\:\\s*\\{(.*)\\}\", flo.readline())\n self.final_states = [\n int(state) for state in match.groups()[0].split(',')]\n\n # get state count - we don't actually need this\n match = re.match(r\"Total States\\:\\s*(\\d*)$\", flo.readline())\n num_states = int(match.groups()[0])\n\n # get state names\n match = re.match(r\"State\\s*(.*)\\s*$\", flo.readline())\n symbol_names = [name.strip() for name in match.groups()[0].split()]\n\n # get transitions\n state_pattern = r\"(\\d*)\\s*\" + r\"\\s*\".join(\n r\"\\{(.*)\\}\" for _ in symbol_names)\n reo = re.compile(state_pattern)\n transitions = {}\n for state_string in flo.readlines():\n groups = reo.match(state_string).groups()\n from_state = int(groups[0])\n end_state_strings = groups[1:]\n transitions[from_state] = {}\n for symbol, end_states in zip(symbol_names, end_state_strings):\n if end_states:\n transitions[from_state][symbol] = [\n int(state) for state in end_states.split(\",\")]\n self.transitions = transitions\n\n symbol_names.remove(NULL) # get alphabet by removing null symbol\n self.alphabet = symbol_names",
"def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n st = self.__createStudentFromLine(line)\n # invoke the store method from the base class\n StudentsRepo.store_student(self, st)\n fh.close()",
"def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()",
"def load_map(self, filename):\n with open(filename, 'rb') as file:\n self.current_obstacles = pickle.load(file)\n self.current_goal = pickle.load(file)\n try:\n setstate(pickle.load(file))\n except EOFError:\n print(\"No random state stored\")",
"def load(self, file_name_with_path: str):\n\n if self.state._models is None:\n self.register_models()\n logger.info(\"Agent State loaded successfully\")\n for k, model in self.state._models.items():\n model.load(file_name_with_path=os.path.join(f'{file_name_with_path}_{model.name}.th'))\n logger.info(f'{file_name_with_path}_{model.name}.th loaded')\n logger.info(f\"{model.name} model loaded successfully\")\n self.state = Munch(json.load(open(file_name_with_path + \".meta\")))",
"def loadState(self, file):\n if isinstance(file, str):\n with open(file, 'r') as f:\n xml = f.read()\n else:\n xml = file.read()\n self.context.setState(mm.XmlSerializer.deserialize(xml))",
"def load_input(filename: str) -> list:\n\n text_stream = io.open(filename, 'r', encoding='utf-8', errors='ignore', newline='\\n')\n \"\"\" Calls Python's io function to read the file with the specified name.\"\"\"\n\n initial_state = []\n for i in range(0, 4):\n initial_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The rstrip method removes all trailing whitespace of the string. The split \n method uses the given character as the delimiter to break down the string and \n return a list of the substrings. The map function takes that list, converts \n the substrings into integers and returns a map object, which is eventually \n converted into a list by the exterior call to the list function. \"\"\"\n\n \"\"\" A state is represented as a multi-layer list. The first layer contains \n the four rows, each of which is a second layer that consists of four tiles. \"\"\"\n\n blank_line = text_stream.readline()\n \"\"\" In the input file, there is a blank line in between the two states.\"\"\"\n\n goal_state = []\n for i in range(0, 4):\n goal_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The construct of this part is identical to the one above. \"\"\"\n\n text_stream.close()\n\n ret = [initial_state, goal_state]\n \"\"\" Returns the two lists that represent the initial and goal states, \n respectively. \"\"\"\n return ret",
"def load(self, file_name):\n with open(file_name, \"r\") as fp:\n print >> sys.stderr, \"loading businesses data from %s...\" %(file_name)\n for line in fp.readlines():\n items = line.split(\"\\t\")\n business_id, business_data = json.loads(items[0]), json.loads(items[1])\n\n self.db[business_id] = BusinessInst({\n \"business_id\" : business_id,\n \"stars\" : business_data[0], # this average score is provided by yelp\n \"ratings\" : business_data[1], # this average score is computed by ourselves\n \"review_count\" : business_data[2],\n \"categories\" : business_data[3],\n \"pos_reviews\" : business_data[4],\n \"neg_reviews\" : business_data[5]\n })\n print >> sys.stderr, \"succ, %s records loaded\" %(len(self.db))",
"def load(self):\n logger.debug('Loading state from file %s', self.file_path)\n\n with open(self.file_path, 'rb') as f:\n self.data = pickle.load(f)",
"def load_state(self, X, file):\n self._initializing_corpus(X, file)\n self.loaded = True",
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def load_state_codes(data):\n state_codes = [address['state'] for address in data]\n state_codes_objects = [State(data=state_code)\n for state_code\n in state_codes]\n State.objects.bulk_create(state_codes_objects)",
"def load_categories():\n\n Category.query.delete()\n\n with open(category_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n categories_data = row.split(\",\")\n\n id = int(categories_data[0])\n category = categories_data[1]\n\n category_model = Category(id=id, category=category)\n db.session.add(category_model)\n db.session.commit()",
"def load(self, path):\n self.load_state_dict(torch.load(path))",
"def load(self, path):\n self.load_state_dict(torch.load(path))",
"def read_file(self, filename):\n with open(filename, 'r') as file:\n for line in file:\n l = line.strip()\n\n if l == ST_POS0:\n self._state = ST_POS0\n elif l == ST_TRNS:\n self._state = ST_TRNS\n elif l == ST_POS1:\n self._state = ST_POS1\n else:\n self._parse_line(l)\n self._state = None",
"def load_file(self):\n try:\n f = open(self._file_name, \"r\")\n line = f.readline()\n while len(line) > 0:\n super(RentalHistoryText, self).add_rental(self.string_to_obj(line))\n line = f.readline()\n f.close()\n except IOError as e:\n raise e",
"def load_stop_table(self, filename):\n self.stop_table = HashTable(191)\n with open(filename, 'r') as f:\n for word in f.readlines():\n self.stop_table.insert(word.replace('\\n',''),None)",
"def import_db(import_file):\n import_data(import_file)",
"def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)",
"def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()",
"def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()",
"def load_db(db_file):\n db = {}\n logging.info('loading weighted vectors from {0}'.format(db_file))\n with open(db_file, 'r') as f:\n for line in f:\n j = json.loads(line)\n db.update(j)\n return db",
"def read_states(self, filename: str, comment: str = None) -> pd.DataFrame:\n self.states = self._parse(filename, comment=comment)\n self.states['name'] = self.states['name'].astype('str')",
"def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()",
"def load_map(path):\n file = open(path + '.txt', 'r')\n data = file.read().split('\\n')\n game_map = []\n file.close()\n for row in data:\n game_map.append(list(row))\n return game_map",
"def load_db(file):\n if os.path.isfile(file):\n try:\n start = time.time()\n db = []\n with open(file, 'r') as f:\n for item in json_lines.reader(f):\n db.append(item)\n stop = time.time() - start\n print(\"load_db time: \", stop, 'sec')\n return db\n except Exception as e:\n print(file, \"is probably corrupted. Creating empty db now...\")\n DbManager.erase_db(file)\n raise e\n\n else:\n # corrupt...\n print(\"database not found. creating new\")\n DbManager.new_db(file)",
"def read_fa_from_file(path):\n states = set()\n transitions = {}\n with open(path, \"r\") as f:\n line = f.readline().strip()\n alphabet = line.split(' ')\n final_states = set(f.readline().strip().split(\" \"))\n while line != \"\":\n line = f.readline().strip()\n elems = line.split(\" \")\n if len(elems) == 1:\n start_state = elems[0]\n return FiniteAutomata(alphabet, transitions, states, final_states, start_state)\n else:\n states.add(elems[0])\n states.add(elems[1])\n curr_line_states = transitions.get(elems[0], [])\n if not curr_line_states:\n transitions[elems[0]] = [Transition(elems[1], elems[2])]\n else:\n transitions[elems[0]].append(Transition(elems[1], elems[2]))\n raise FileNotFoundError(path)",
"def load(self, from_path):\n with open(from_path, 'rb') as f:\n self.load_state_dict(torch.load(f))"
]
| [
"0.67029935",
"0.64187706",
"0.636451",
"0.6263663",
"0.617526",
"0.61659336",
"0.6165151",
"0.6137184",
"0.6128943",
"0.6117616",
"0.6106107",
"0.60338277",
"0.6024515",
"0.60234845",
"0.5983557",
"0.5983557",
"0.59706086",
"0.5956377",
"0.5955591",
"0.5950796",
"0.5930032",
"0.59268004",
"0.59268004",
"0.59139556",
"0.5912397",
"0.590107",
"0.58958215",
"0.58829963",
"0.58819944",
"0.5873717"
]
| 0.81981784 | 0 |
Return a tupil represent the upper and lower values of the price range at the given index. If there is one dataseries then return the tupil (value, None) | def get_value_at_index(self, index, cc):
high = cc.dsget('high')
low = cc.dsget('low')
return (high[index], low[index]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bounds(self, value = None, index = None):\n\n if self._data is None or 0 in self._data.shape:\n return (0.0, 0.0)\n\n if type(value) == types.IntType:\n if self.value_dimension == 0:\n maxi = nanmax(self._data[value, ::])\n mini = nanmin(self._data[value, ::])\n else:\n # value_dimension == 1\n maxi = nanmax(self._data[::, value])\n mini = nanmin(self._data[::, value])\n elif type(index) == types.IntType:\n if self.index_dimension == 0:\n maxi = nanmax(self._data[index, ::])\n mini = nanmin(self._data[index, ::])\n else:\n # index_dimension == 1\n maxi = nanmax(self._data[::, index])\n mini = nanmin(self._data[::, index])\n else:\n # value is None and index is None:\n maxi = nanmax(self._data)\n mini = nanmin(self._data)\n\n return (mini, maxi)",
"def range(self) -> ty.Tuple[float, float]:\r\n ...",
"def get_bounds(self, t_index):\n mean = self.get_mean(t_index)\n std = self.get_std()\n return mean - std, mean + std",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def position_tuple_for_index(self, index):\n x = self.base_values[index % self.size]\n y = self.base_values[index // self.size]\n return x, y",
"def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
"def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])",
"def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)",
"def get_value_tuple(self, index):\n return (self.color_lookup_table_points[0][index],\n self.color_lookup_table_points[1][index],\n self.color_lookup_table_points[2][index])",
"def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)",
"def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum",
"def get_value_at_index(self, index, cc):\n tl = cc.dsget(self.title)\n return (tl[index], None)",
"def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range",
"def eta_range(self):\n\t\tticks = self.eta_details.keys()\n\t\treturn min(ticks), max(ticks)",
"def get_outlier_data(cls, data: tuple or list, remove_outliers=False) -> tuple:\n cls._data_validation(data)\n q1, q2, q3, iqr = cls.get_quartile_data(data)\n data_without_outliers = list()\n outliers_list = list()\n lower_out_bound, upper_out_bound = q1 - 1.5*iqr, q3 + 1.5*iqr\n print(lower_out_bound, upper_out_bound)\n for i in range(len(data)):\n if lower_out_bound <= data[i] <= upper_out_bound:\n data_without_outliers.append(data[i])\n else:\n outliers_list.append(data[i])\n if remove_outliers:\n return tuple(data_without_outliers)\n else:\n return tuple(outliers_list)",
"def data_range(self, n=-1):\n if self.rotate and len(self.results['velocities']) > 1:\n # Then we can use the last two velocities and time to extrapolate the line.\n prevVel = self.results['velocities'][-2:]\n prevTime = self.results['times'][-2:]\n\n coefficents = imageRot.computeFit(prevTime, prevVel)\n nextCenter = imageRot.extrapolate(coefficents, self.time[self.time_index])\n\n velocity_index = self.spectrogram._velocity_to_index(nextCenter)\n start_index = max(0, velocity_index - 2*self.span)\n end_index = min(velocity_index + 2*self.span,\n len(self.spectrogram.velocity))\n \n return start_index, end_index, coefficents\n # Compute the angle.\n if len(self.results['velocities']) > 0:\n last_v = self.results['velocities'][n]\n else:\n last_v = self.v_start\n velocity_index = self.spectrogram._velocity_to_index(last_v)\n start_index = max(0, velocity_index - self.span)\n end_index = min(velocity_index + self.span,\n len(self.spectrogram.velocity))\n return (start_index, end_index)",
"def index_getter(start_position, end_position, position_index_list):\n index_min = [tup[0] for tup in position_index_list if tup[1] >= start_position]\n index_max = [tup[0] for tup in position_index_list if tup[1] <= end_position]\n ind_min = min(index_min)\n ind_max = max(index_max)\n return ind_min, ind_max",
"def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()",
"def getDataRange(self):\n return None if self._dataRange is None else tuple(self._dataRange)",
"def _series_handler(self, values, style, caller, *args):\n\n behaviors = {\"over\": values.ge,\n \"under\": values.lt}\n\n evaluated = values[behaviors.get(caller)(self.margin)]\n\n if style == \"values\":\n return evaluated\n else:\n return list(evaluated.index)",
"def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end",
"def values(self):\n lower = float(self.lowerSpnbx.value())\n upper = float(self.upperSpnbx.value())\n return lower, upper",
"def getValueAt(*args):\n return _osgAnimation.OutQuartFunction_getValueAt(*args)",
"def calc_minmax(idx, *rest):\n\n if isinstance(idx, (pd.Series, pd.DataFrame)):\n xmin, xmax = idx.index.min(), idx.index.max()\n ymin, ymax = idx.min(), idx.max()\n else:\n xmin, xmax = idx.min(), idx.max()\n ymin, ymax = float('inf'), -float('inf')\n\n for r in rest:\n if r is not None:\n if isinstance(r, (pd.Series, pd.DataFrame)):\n xmin = min(xmin, r.index.min())\n xmax = max(xmax, r.index.max())\n ymin = min(ymin, r.min())\n ymax = max(ymax, r.max())\n\n return xmin, xmax, ymin, ymax",
"def data_range(x):\n return max(x)-min(x)",
"def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals",
"def range(series):\n return min(series), max(series)",
"def price_set(self, price):\n \n if price != None:\n start_price = end_price = ''\n for k, each_price in enumerate(price):\n if k == 0 and each_price != None:\n start_price = each_price\n elif k == 1 and each_price != None:\n end_price = each_price\n if end_price == '':\n end_price = start_price\n else:\n start_price = end_price = 'NIL'\n \n return start_price, end_price"
]
| [
"0.62020886",
"0.58060557",
"0.57509655",
"0.5696991",
"0.5630712",
"0.5555234",
"0.549801",
"0.5446549",
"0.54304546",
"0.5422337",
"0.54095584",
"0.5409541",
"0.5402449",
"0.53884476",
"0.5376142",
"0.5351813",
"0.5332987",
"0.53233963",
"0.5312718",
"0.53064764",
"0.5296712",
"0.52947223",
"0.5277367",
"0.5273699",
"0.52562046",
"0.52323925",
"0.5220863",
"0.52115315",
"0.52041066",
"0.51876163"
]
| 0.6731937 | 0 |
Trains one elastic logistic classifier per review group. Saves the trained classifiers within self.models. | def train(self, x_train, y_train):
# convert input to format for classifier
list_of_embeddings = list(x_train[self.embeddings_col])
x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings])
# discard fold ID column from labels
review_groups = [col for col in y_train.columns if not col=='k']
for review_group in tqdm(review_groups, desc='Train Review Groups'):
# pull label column
labels = y_train[review_group]
# logistic classifier
classifier = SGDClassifier(loss="log", alpha=self.alpha,
l1_ratio = self.l1_ratio, penalty="elasticnet").fit(x_train, labels)
# save the model in dictionary of models
self.models[review_group] = classifier | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)",
"def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()",
"def trainModel( self, featureTrain, classTrain):",
"def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')",
"def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')",
"def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers",
"def train_elastic_net_model(\n x,\n y,\n outer_cv_splits: int,\n inner_cv_splits: int,\n l1_ratio: List[float],\n model_name: str,\n max_iter: Optional[int] = None,\n export: bool = True,\n) -> Tuple[List[float], List[float]]:\n auc_scores = []\n auc_pr_scores = []\n it = _help_train_elastic_net_model(\n x=x,\n y=y,\n outer_cv_splits=outer_cv_splits,\n inner_cv_splits=inner_cv_splits,\n l1_ratio=l1_ratio,\n max_iter=max_iter,\n )\n\n # Iterator to calculate metrics for each CV step\n for i, (glm_elastic, y_test, y_pred) in enumerate(it):\n logger.info(f'Iteration {i}: {glm_elastic.get_params()}')\n auc_scores.append(roc_auc_score(y_test, y_pred))\n auc_pr_scores.append(average_precision_score(y_test, y_pred))\n\n # Export a pickle the model of the given CV\n if export:\n import joblib\n joblib.dump(glm_elastic, os.path.join(CLASSIFIER_RESULTS, f'{model_name}_{i}.joblib'))\n\n # Return a list with all AUC/AUC-PR scores for each CV step\n return auc_scores, auc_pr_scores",
"def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")",
"def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")",
"def train(self, train_instances, train_labels, update_cache=True,\n sample_weight=None):\n self.classifier.fit(train_instances, train_labels, sample_weight=sample_weight)\n if update_cache:\n pickle.dump(self.classifier, open(self.cache_filename, 'wb'))",
"def train_all_curated(self, bench=False):\n train_X, train_y = self.format_input(self.M.curated_genes, self.neg_train_genes)\n self.train(train_X, train_y)\n pkl.dump(self, open(self.save_path + '/nash_model_trained.pkl', 'wb'))\n if bench:\n self.benchmark(train_X, train_y)\n\n # do feature selection on dataset as a whole so it is easier to be scored\n if self.feat_sel:\n self.dataset = pd.DataFrame(self.skb.transform(self.dataset), index=self.dataset.index)",
"def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''",
"def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()",
"def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)",
"def set_train(self):\n for m in self.models.values():\n m.train()",
"def train(self):\n sys.stderr.write(\"Training Multi-layer Perceptron classifier...\")\n self.clf.fit(self.m_features,self.target)\n sys.stderr.write(\"\\n\")\n \n # save the trained model to file\n with open(os.path.join(self.conf[\"paths\"][\"dir\"],self.conf[\"paths\"][\"mlp\"]),\"w\") as f:\n pickle.dump(self.clf,f)\n \n sys.stderr.write(\"Calculating MLP Predictions...\")\n for artist in self.artists:\n # predict the similarities\n predicted_similar = self.clf.predict(self.m_features[artist._id].reshape(1,-1))\n \n # and convert to the appropriate format\n for i,yes in enumerate(predicted_similar[0]):\n if yes == 1:\n artist.predicted_similar.append(i)\n sys.stderr.write(\"\\n\")\n\n self.calc_stats()",
"def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)",
"def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)",
"def _post_transform(self):\n # Reclassify strategy post __init__, if needed.\n for (reclassifier, args, kwargs) in self._reclassifiers:\n self.classifier = reclassifier(self.classifier, *args, **kwargs)",
"def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")",
"def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)",
"def train(self, train_set) -> None:\n super().train(train_set)\n # split into data and target\n xlist, y = zip(*train_set)\n x = sparse.vstack(xlist)\n self._classifier.fit(x, y)",
"def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])",
"def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time",
"def run_classification_models(train,test,metric_file_path,classes):\n metric_names = ['accuracy','weightedRecall','weightedPrecision']\n f = open(metric_file_path,'w')\n f.write('model,'+','.join(metric_names)+'\\n')\n name = 'Logistic Regression'\n model = LogisticRegression()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.regParam,[0,.25,.5]) \\\n .addGrid(model.elasticNetParam,[0,.25,.5])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel\n print name\n print '\\t Best regParam (lambda): %.2f'%best_model._java_obj.getRegParam()\n print '\\t Best elasticNetparam (alpha): %.2f'%best_model._java_obj.getElasticNetParam()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Decision Tree'\n model = DecisionTreeClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.maxBins,[8,16,32])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best maxBins: %d'%best_model._java_obj.getMaxBins()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Random Forest'\n model = RandomForestClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.numTrees,[10,15,20])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best numTrees: %d'%best_model._java_obj.getNumTrees()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'One vs Rest'\n model = OneVsRest(classifier=LogisticRegression()).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Naive Bayes'\n model = NaiveBayes()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.smoothing,[.5,1,2])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best smoothing: %.1f'%best_model._java_obj.getSmoothing()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n if classes == 2:\n name = 'Gradient Boosted Trees'\n model = GBTClassifier(seed=7).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Linear Support Vector Machine'\n model = LinearSVC().fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names) \n f.close()",
"def train_model_pipeline(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True,\r\n feature_extractor=flatten_array, model=train_logistic_regression): \r\n # Create dataframe subject to feature extractor requirements\r\n X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group = \\\r\n create_train_test_sets(conform_shape=conform_shape, indi_proportion=indi_proportion, \r\n incl_group_imgs=incl_group_imgs)\r\n \r\n # Extract features\r\n if feature_extractor == extract_ORB_features:\r\n if os.path.isfile('Trained_Models/Kmeans_model.sav'):\r\n kmeans_model = load_model('Trained_Models/Kmeans_model.sav')\r\n else:\r\n kmeans_model = kmeans_cluster(X_train, 500)\r\n X_train = feature_extractor(X_train, kmeans_model, normalize = False)\r\n X_test_indi = feature_extractor(X_test_indi, kmeans_model, normalize = False)\r\n X_test_group = feature_extractor(X_test_group, kmeans_model, normalize = False)\r\n\r\n else:\r\n X_train = feature_extractor(X_train)\r\n X_test_indi = feature_extractor(X_test_indi)\r\n X_test_group = feature_extractor(X_test_group)\r\n \r\n # Train model on flattened array (no feature extraction)\r\n trained_model = model(X_train, y_train)\r\n \r\n indi_pred_class, indi_accuracy = evaluate_model(trained_model, X_test_indi, y_test_indi)\r\n group_pred_class, group_accuracy = evaluate_model(trained_model, X_test_group, y_test_group)\r\n \r\n return trained_model, indi_pred_class, indi_accuracy, group_pred_class, group_accuracy",
"def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal",
"def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()",
"def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)",
"def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")"
]
| [
"0.6353949",
"0.6166256",
"0.60605466",
"0.6053671",
"0.59588176",
"0.5941645",
"0.5833356",
"0.57968616",
"0.57968616",
"0.5778012",
"0.5771533",
"0.57486176",
"0.5737569",
"0.57228905",
"0.57154137",
"0.5713114",
"0.56817275",
"0.5633404",
"0.5621355",
"0.5607235",
"0.5567943",
"0.5562119",
"0.5560618",
"0.55498505",
"0.55455923",
"0.55380076",
"0.55261946",
"0.55242026",
"0.5521545",
"0.55203044"
]
| 0.6931635 | 0 |
Get the admin visual data for a specific assignment. Currently the data passed back feeds into the radial and passed time scatter graphs. | def public_visuals_assignment_id(assignment_id: str):
# Get the assignment object
assignment = Assignment.query.filter(
Assignment.id == assignment_id
).first()
# If the assignment does not exist, then stop
req_assert(assignment is not None, message='assignment does not exist')
# Assert that the assignment is within the course context
assert_course_context(assignment)
# Generate and pass back the visual data
return success_response({
'assignment_data': get_admin_assignment_visual_data(
assignment_id
)
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAdminData(self):\n return getAdminData(self)",
"def admin_dashboard(request):\n data = request.GET.copy()\n if \"assigned_to\" not in data:\n data[\"assigned_to\"] = request.user.id\n assignment_form = AssignmentForm(data)\n assigned_to: Optional[Person] = None\n if assignment_form.is_valid():\n assigned_to = assignment_form.cleaned_data[\"assigned_to\"]\n\n current_events = Event.objects.current_events().prefetch_related(\"tags\")\n\n # This annotation may produce wrong number of instructors when\n # `unpublished_events` filters out events that contain a specific tag.\n # The bug was fixed in #1130.\n unpublished_events = (\n Event.objects.active()\n .unpublished_events()\n .select_related(\"host\")\n .annotate(\n num_instructors=Count(\n Case(\n When(task__role__name=\"instructor\", then=Value(1)),\n output_field=IntegerField(),\n )\n ),\n )\n .order_by(\"-start\")\n )\n\n # assigned events that have unaccepted changes\n updated_metadata = Event.objects.active().filter(metadata_changed=True)\n\n current_events = current_events.filter(assigned_to=assigned_to)\n unpublished_events = unpublished_events.filter(assigned_to=assigned_to)\n updated_metadata = updated_metadata.filter(assigned_to=assigned_to)\n\n context = {\n \"title\": None,\n \"assignment_form\": assignment_form,\n \"assigned_to\": assigned_to,\n \"current_events\": current_events,\n \"unpublished_events\": unpublished_events,\n \"updated_metadata\": updated_metadata.count(),\n \"main_tags\": Tag.objects.main_tags(),\n }\n return render(request, \"dashboard/admin_dashboard.html\", context)",
"def getAdmin():",
"def get_dataset_details(name, analyst):\n\n template = None\n allowed_sources = user_sources(analyst)\n dataset_object = Dataset.objects(name = name,\n source__name__in=allowed_sources).first()\n if not dataset_object:\n error = (\"Either no data exists for this dataset\"\n \" or you do not have permission to view it.\")\n template = \"error.html\"\n args = {'error': error}\n return template, args\n\n dataset_object.sanitize_sources(username=\"%s\" % analyst,\n sources=allowed_sources)\n\n # remove pending notifications for user\n remove_user_from_notification(\"%s\" % analyst, dataset_object.id, 'Dataset')\n\n # subscription\n subscription = {\n 'type': 'Dataset',\n 'id': dataset_object.id,\n 'subscribed': is_user_subscribed(\"%s\" % analyst,\n 'Dataset',\n dataset_object.id),\n }\n\n #objects\n objects = dataset_object.sort_objects()\n\n #relationships\n relationships = dataset_object.sort_relationships(\"%s\" % analyst, meta=True)\n\n # relationship\n relationship = {\n 'type': 'Datset',\n 'value': dataset_object.id\n }\n\n #comments\n comments = {'comments': dataset_object.get_comments(),\n 'url_key':dataset_object.name}\n\n # favorites\n favorite = is_user_favorite(\"%s\" % analyst, 'Dataset', dataset_object.id)\n\n # services\n service_list = get_supported_services('Dataset')\n\n # analysis results\n service_results = dataset_object.get_analysis_results()\n\n args = {'dataset': dataset_object,\n 'objects': objects,\n 'relationships': relationships,\n 'comments': comments,\n 'favorite': favorite,\n 'relationship': relationship,\n 'subscription': subscription,\n 'name': dataset_object.name,\n 'service_list': service_list,\n 'service_results': service_results}\n\n return template, args",
"def get_assignment_info(self):\n url = self.server_url + \"/api/v1/courses/\" + str(self.course_id) + '/assignments/' + str(self.assignment_id)\n r = requests.get(url, headers=self.headers, params=self.params)\n assignment = json.loads(r.text)\n return assignment",
"def get_info_admin(self):\n return self.get_info(\"HS_ADMIN\")",
"def visual_sundial_assignment(assignment_id: str):\n # Get the assignment object\n assignment = Assignment.query.filter(\n Assignment.id == assignment_id\n ).first()\n\n # If the assignment does not exist, then stop\n req_assert(assignment is not None, message='assignment does not exist')\n\n # Assert that the assignment is within the view of\n # the current admin.\n assert_course_context(assignment)\n\n # Pull the (maybe cached) sundial data\n return success_response({'sundial': get_assignment_sundial(assignment.id)})",
"def listSetInfo(self) :\n data = self.getSelectedRowData()\n\n if data : \n setName = data[self.setCols.index('Asset Name')]\n root = data[self.setCols.index('Root')]\n\n self.setAsmLocator(setName)\n self.setAsmRoot(mode='asset')\n self.setAsmRoot(mode='shot')\n self.setAsmVersion(root)\n\n self.viewData()",
"def admin(request):\n\n admin = get_admin(request)\n\n # For now, admin panels always appear in ascending order\n\n model_admin_root = admin[\"models\"]\n\n # TODO: Have renderer adapters for panels, so that they can override views\n admin_panels = sorted(model_admin_root.items(), key=lambda pair: pair[1].title)\n rendered_panels = [render_panel(ma, request, name=\"admin_panel\") for id, ma in admin_panels]\n\n return dict(panels=rendered_panels)",
"def web_data_admin(self):\n return {\n 'name': self.name,\n 'alias': self.alias,\n 'access': self.access,\n 'owner': self.owner.username,\n 'desc': self.desc,\n 'ip_network': str(self.ip_network),\n 'network': self.network,\n 'netmask': self.netmask,\n 'gateway': self.gateway,\n 'nic_tag': self.nic_tag,\n 'nic_tag_type': self.nic_tag_type,\n 'vlan_id': self.vlan_id,\n 'vxlan_id': self.vxlan_id,\n 'mtu': self.mtu,\n 'resolvers': self.get_resolvers(),\n 'dns_domain': self.dns_domain,\n 'ptr_domain': self.ptr_domain,\n 'dc_bound': self.dc_bound_bool,\n 'dhcp_passthrough': self.dhcp_passthrough,\n }",
"def get_scattering(self, param_name: list = ['S11', 'S21']):\n # TODO: move the plot in this analysis module. Renderer should recover the entire data\n return self.renderer.plot_params(param_name)",
"def get_admins_timeseries_chart_data():\n chart_data = {}\n try:\n pipe = [\n {'$sort': {VAX_DATE_KEY: 1}}\n ]\n cursor = vax_admins_summary_coll.aggregate(pipeline=pipe)\n data = list(cursor)\n df = pd.DataFrame(data)\n\n dates = df[VAX_DATE_KEY].apply(\n lambda x: format_datetime(x, SERIES_DT_FMT)).unique().tolist()\n data = [{\n 'name': OD_TO_PC_MAP[r],\n 'data': (\n df[df[VAX_AREA_KEY] == r][VAX_SECOND_DOSE_KEY].cumsum() /\n df[df[VAX_AREA_KEY] == r][POP_KEY] * 100\n ).round(2).to_list()\n } for r in sorted(df[VAX_AREA_KEY].unique())]\n chart_data = {\n \"title\": gettext('Vaccination trend'),\n \"yAxisTitle\": gettext('Pop. vaccinated (2nd dose) [%]'),\n \"dates\": dates,\n \"data\": data\n }\n app.logger.debug(f\"Time series chart data {chart_data}\")\n except Exception as e:\n app.logger.error(f\"While getting vax timeseries chart data {e}\")\n return chart_data",
"def dashboard():",
"def test_visualisations_get_visualisation_render_data(self):\n pass",
"def getAdminContent(self, **params):\n return getAdminContent(self, **params)",
"def _get_dashboard_data(self):\n if self._dashboard_data is None:\n dashboard_object = self._get_dashboard_object()\n self._dashboard_data = serialize_json_safe(dashboard_object)\n\n return self._dashboard_data",
"def show_data():",
"def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')",
"def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')",
"def view_assignment_list():\n\n if len(Assignments.assignments_list) == 0:\n Ui.print_message(\"Assignment list is empty\")\n else:\n Ui.print_assignments_list(Assignments.assignments_list, \"Assignments List:\")",
"def displayData(cls):\n return (\n \"paramName\",\n \"autoFollow\",\n \"lowerDisplay\",\n \"upperDisplay\",\n \"binCount\",\n \"xscale\",\n \"yweight\"\n )",
"def data():\n return render_template(\n 'data.html',\n title='World Happiness Report',\n year=datetime.now().year,\n message='Main Data Model'\n )",
"def datacollection(self):\n \n self.datadict = {'name': self.name, 'supplyname': self.supplyname, 'tranname': self.tranname, 'demandname': self.demandname, \n 'nodenum': self.nodenum, 'demandnum': self.demandnum, 'trannum': self.trannum, 'supplynum': self.supplynum,\n \"demandseries\": self.demandseries, 'transeries': self.transeries, 'supplyseries': self.supplyseries,\n \"edgediameter\": self.edgediameter, 'population_assignment': self.popuassign, 'elevation': self.elevation, 'color': self.color}",
"def get_data():\n pass",
"def get(self):\n DA = DataAccessor()\n students = DA.getStudents()\n admins = DA.getAdmins()\n self.generate('manageUsers.html', {\n 'admins' : admins,\n 'students' : students\n })",
"def showResults(self, name=None, showtable=True, ax=None, stats=True):\n job, name = self.getJob(name)\n \n if job == None:\n print 'job not in DB'\n return\n if job.state() != 'Finished':\n print 'job not finished'\n return\n\n self.matrices = job.data.allMatrices()\n #print self.matrices['ModellingResults'].csvRepresentation()\n jobmeta = job.metadata()\n cols = self.DB.getSimpleFields()\n expcol = None\n expdata = None\n #print jobmeta\n if jobmeta.has_key('expcol'):\n expcol = jobmeta['expcol']\n if expcol not in cols and jobmeta.has_key('project'):\n #we may have stored the exp data in another project\n prjdata = jobmeta['project']\n print 'trying to loading exp data from external project(s)'\n from PEATDB.Base import PDatabase\n from PEATTables import PEATTableModel\n \n tmpdb = PDatabase(**prjdata)\n print tmpdb\n S = PEATTableModel(tmpdb)\n expdata = S.simpleCopy(include=['Mutations'])\n print expdata \n \n #if exp column not known then ask user \n if expcol == '' or expcol == None: \n mpDlg = MultipleValDialog(title='Select Experimental Data',\n initialvalues=[cols],\n labels=['exp data column:'],\n types=['list'],\n parent=self.mainwin)\n if mpDlg.result == True:\n expcol = mpDlg.results[0]\n else:\n return\n\n for m in self.matrices:\n matrix = self.matrices[m]\n if matrix == None or not 'Total' in matrix.columnHeaders():\n continue\n \n ax,mh,x,y = self.plotMerged(matrix, expcol, expdata, m,\n showtable, ax, name, stats)\n \n #need to add this for mousehandler to work.. hack \n '''from Correlation import MouseHandler\n mh = MouseHandler(ax, labels=expcol, key='Mutations')\n mh.connect()'''\n\n return ax,mh,x,y",
"async def plot_GM_assignments_in_3d_tuple(data: np.ndarray, assignments, save_fig_to_file: bool, fig_file_prefix='train_assignments', show_now=True, **kwargs) -> Tuple[object, object]: # TODO: medium: rename this function\n # TODO: find out why attaching the log entry/exit decorator kills the streamlit rotation app. For now, do not attach.\n if not isinstance(data, np.ndarray):\n err = f'Expected `data` to be of type numpy.ndarray but instead found: {type(data)} (value = {data}).'\n raise TypeError(err)\n # Parse kwargs\n s = kwargs.get('s', 0.5)\n marker = kwargs.get('marker', 'o')\n alpha = kwargs.get('alpha', 0.8)\n title = kwargs.get('title', 'Assignments by GMM')\n azim_elev = kwargs.get('azim_elev', (70, 135))\n # Plot graph\n unique_assignments = list(np.unique(assignments))\n R = np.linspace(0, 1, len(unique_assignments))\n colormap = plt.cm.get_cmap(\"Spectral\")(R)\n tsne_x, tsne_y, tsne_z = data[:, 0], data[:, 1], data[:, 2]\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n # Loop over assignments\n for i, assignment in enumerate(unique_assignments):\n # Select data for only assignment i\n idx = np.where(assignments == assignment)\n # Assign to colour and plot\n ax.scatter(tsne_x[idx], tsne_y[idx], tsne_z[idx], c=colormap[i], label=assignment, s=s, marker=marker, alpha=alpha)\n ax.set_xlabel('Dim. 1')\n ax.set_ylabel('Dim. 2')\n ax.set_zlabel('Dim. 3')\n ax.view_init(*azim_elev)\n plt.title(title)\n plt.legend(ncol=3)\n # Draw now?\n if show_now:\n plt.show()\n else:\n plt.draw()\n # Save to graph to file?\n if save_fig_to_file:\n file_name = \"GMM_with_3d_tuple\"\n save_graph_to_file(fig, file_name)\n\n return fig, ax",
"def showDataset(request):\n\n if request.method=='GET':\n uuid=request.GET.get('uuid', ' ')\n try:\n dataset=Dataset.nodes.get(uuid=uuid)\n return JsonResponse(dataset.serialize, safe=False)\n except :\n return JsonResponse({\"error\":\"Error occurred\"}, safe=False)",
"def get_data( obj, prm, lev, date, timelevel=0 ):\n \n parameter = obj( name = prm, level = lev, dataDate = date )[ timelevel ]\n print( parameter.dataDate )\n \n #-----Checking grit type----------------------------------------------\n if parameter.gridType == \"sh\":\n lat, lon, data = sh( parameter.values )\n elif parameter.gridType == \"reduced_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n elif parameter.gridType == \"regular_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n else: \n print ( parameter.gridType )\n \n return lat, lon, data",
"async def plot_GM_assignments_in_3d_new(data: np.ndarray, assignments, show_now=True, **kwargs) -> Tuple[object, object]:\n # TODO: find out why attaching the log entry/exit decorator kills the streamlit graph-rotation app\n if not isinstance(data, np.ndarray):\n err = f'Expected `data` to be of type numpy.ndarray but instead found: {type(data)} (value = {data}).'\n raise TypeError(err)\n # Parse kwargs\n s = kwargs.get('s', 1.5)\n marker = kwargs.get('marker', 'o')\n alpha = kwargs.get('alpha', 0.8)\n title = kwargs.get('title', 'Drug Labels Embedded in 3 Dimensions based on similarity by tSNE')\n azim_elev = kwargs.get('azim_elev', (70, 135))\n # Plot graph\n unique_assignments = list(np.unique(assignments))\n R = np.linspace(0, 1, len(unique_assignments))\n colormap = plt.cm.get_cmap(\"nipy_spectral\")(R)\n tsne_x, tsne_y, tsne_z = data[:, 0], data[:, 1], data[:, 2]\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n # Loop over assignments\n for i, g in enumerate(unique_assignments):\n # Select data for only assignment i\n idx = np.where(assignments == g)\n # Assign to colour and plot\n ax.scatter(tsne_x[idx], tsne_y[idx], tsne_z[idx], c=colormap[i],\n #label=(g if g == 'default' else None),\n s=s, marker=marker, alpha=alpha)\n ax.set_xlabel('tSNE Dim 1')\n ax.set_ylabel('tSNE Dim 2')\n ax.set_zlabel('tSNE Dim 3')\n ax.view_init(*azim_elev)\n plt.title(title)\n plt.legend(ncol=3)\n # Show now?\n if show_now:\n plt.show()\n else:\n plt.draw()\n\n return fig, ax"
]
| [
"0.611185",
"0.56681174",
"0.5536008",
"0.54843163",
"0.5468018",
"0.54487526",
"0.5419295",
"0.5360861",
"0.5259989",
"0.5253528",
"0.5136372",
"0.51328814",
"0.5104171",
"0.50537735",
"0.50513554",
"0.5044633",
"0.5022114",
"0.4999167",
"0.4984658",
"0.4978159",
"0.49689788",
"0.49565876",
"0.49272838",
"0.49170768",
"0.4895735",
"0.48913836",
"0.48885232",
"0.48725122",
"0.48686558",
"0.48664862"
]
| 0.6321538 | 0 |
Get the visual history for a specific student and assignment. lightly cached per assignment and user | def visual_history_assignment_netid(assignment_id: str, netid: str):
# Get the assignment object
assignment = Assignment.query.filter(
Assignment.id == assignment_id
).first()
# If the assignment does not exist, then stop
req_assert(assignment is not None, message='assignment does not exist')
# Get the student
student = User.query.filter(User.netid == netid).first()
# Make sure that the student exists
req_assert(student is not None, message='user does not exist')
# Assert that both the course and the assignment are
# within the view of the current admin.
assert_course_context(student, assignment)
# Get tha cached assignment history
return success_response(get_assignment_history(assignment.id, student.netid)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_history(self):\r\n cursor = connection.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT id, created, student_module_id FROM courseware_studentmodulehistory\r\n \"\"\")\r\n return cursor.fetchall()",
"def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)",
"def list_history(request):\n history = History.objects\n\n if not is_admin(request.user):\n history = history.filter(submitter=request.user)\n history = history.order_by('-submission_date')\n\n return render('editor/list_history.mako', request, {\n 'history': history,\n })",
"def history():\n query = Records.query.filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"history.html\", rows=query)",
"def history():\n \n value_dicts = db.execute(\"SELECT * FROM history WHERE user_id = :usid\", usid=session[\"user_id\"])\n return render_template(\"history.html\", value_dicts=value_dicts)",
"def history():\n rows = db.execute(\"SELECT * FROM histories WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", rows=rows)",
"def get_history_for_student_modules(self, student_module_id):\r\n cursor = connection.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT id, created FROM courseware_studentmodulehistory\r\n WHERE student_module_id = %s\r\n ORDER BY created, id\r\n \"\"\",\r\n [student_module_id]\r\n )\r\n history = cursor.fetchall()\r\n return history",
"def get_history(self):\r\n\r\n return self.board_history",
"def history():\n user_history=db.execute(\"SELECT * FROM history WHERE user_id=:u_i\",u_i=session[\"user_id\"])\n return render_template(\"history.html\",s=user_history)",
"def get_exercise_history():\n user_id = session.get(\"email\")\n\n history = fm.full_attempt_history(user_id)\n\n msg = \"Attempt history found for user: {}. {} records.\"\\\n .format(user_id, len(history))\n app.logger.info(msg)\n return jsonify({\"history\": history})",
"def history():",
"def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)",
"def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", history = history)",
"def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", history = history)",
"def history():\n \n # only prints shifts from current user\n usernum = db.execute(\"SELECT * FROM users WHERE id=:id\", id = session[\"user_id\"])[0][\"id\"]\n \n # stores shift data into hours\n hours = db.execute(\"SELECT * FROM history WHERE User=:id\", id = usernum)\n \n # calculates total amount of cash ever paid to user\n cash = db.execute(\"SELECT sum(total) FROM history WHERE User=:id\", id = session[\"user_id\"])[0][\"sum(total)\"]\n \n return render_template(\"history.html\", hours = hours, Total = cash)",
"def history():\n # query database for history\n transactions = db.execute(\"SELECT symbol, volume, share_price, dtstamp FROM `transaction` WHERE id = :id\", id = session[\"user_id\"])\n\n # initialise dict\n dic = {}\n\n # interate through history array\n\n # pass data to template\n return render_template(\"history.html\", transactions = transactions)",
"def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)",
"def show_history(user_id):\n return History.where('user_id', user_id).get()",
"def history():\n\n user_id = session.get('user_id')\n table_name = f'stocks_user{user_id}'\n rows = db.execute(\"SELECT * FROM ?\", table_name)\n\n return render_template('history.html', rows=rows)",
"def history():\n # name variable to show current users name in template\n name = db.execute(\"SELECT username FROM users WHERE id=:id\", id=session[\"user_id\"])\n\n # user's transaction history\n hist = db.execute(\"SELECT transactid, name, price, quantity, date FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n\n # return the template with the relevant objects for jinja\n return render_template(\"history.html\", name=name, hist=hist)\n\n # if function fails\n return apology(\"Can't display history\", 400)",
"def history():\n transactions = db.execute(\"SELECT * FROM history WHERE user_id = ?\", session[\"user_id\"])\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n \n return render_template(\"history.html\", transactions=transactions, user_name=user_name[0][\"username\"])",
"def history():\n current_userid = session[\"user_id\"]\n userbalance = get_userbal(db, current_userid)\n userstocks = get_userstock(db, current_userid)\n stockhistory = get_history(db, current_userid)\n stocklist = get_stocklist(db, stocksid=True, prices=True)\n if request.method == \"GET\":\n return render_template(\"history.html\", userbalance=usd(userbalance),\n userstocks=userstocks, buystocks=stocklist,\n stockhistory=stockhistory)\n else:\n return apology(\"TODO\")",
"def fetch_history(*args, **kwargs):\n return collect_history(*args, **kwargs)",
"def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)",
"def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)",
"def get_game_history(self, request):\n return games_ctrl.get_game_history(request.urlsafe_game_key)",
"def submission_history(request, course_id, student_username, location):\r\n try:\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n except (InvalidKeyError, AssertionError):\r\n return HttpResponse(escape(_(u'Invalid course id.')))\r\n\r\n try:\r\n usage_key = course_key.make_usage_key_from_deprecated_string(location)\r\n except (InvalidKeyError, AssertionError):\r\n return HttpResponse(escape(_(u'Invalid location.')))\r\n\r\n course = get_course_with_access(request.user, 'load', course_key)\r\n staff_access = has_access(request.user, 'staff', course)\r\n\r\n # Permission Denied if they don't have staff access and are trying to see\r\n # somebody else's submission history.\r\n if (student_username != request.user.username) and (not staff_access):\r\n raise PermissionDenied\r\n\r\n try:\r\n student = User.objects.get(username=student_username)\r\n student_module = StudentModule.objects.get(\r\n course_id=course_key,\r\n module_state_key=usage_key,\r\n student_id=student.id\r\n )\r\n except User.DoesNotExist:\r\n return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))\r\n except StudentModule.DoesNotExist:\r\n return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(\r\n username=student_username,\r\n location=location\r\n )))\r\n history_entries = StudentModuleHistory.objects.filter(\r\n student_module=student_module\r\n ).order_by('-id')\r\n\r\n # If no history records exist, let's force a save to get history started.\r\n if not history_entries:\r\n student_module.save()\r\n history_entries = StudentModuleHistory.objects.filter(\r\n student_module=student_module\r\n ).order_by('-id')\r\n\r\n context = {\r\n 'history_entries': history_entries,\r\n 'username': student.username,\r\n 'location': location,\r\n 'course_id': course_key.to_deprecated_string()\r\n }\r\n\r\n return render_to_response('courseware/submission_history.html', context)",
"def History(self):\n return self.historydict.get('history', [])",
"def history():\n\n userId = session[\"user_id\"]\n\n shares = db.execute(f\"SELECT symbol, shares, price, trans_time FROM transactions WHERE user_id={userId} ORDER BY trans_id DESC\")\n\n return render_template(\"history.html\", shares=shares)",
"def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)"
]
| [
"0.6364211",
"0.6215497",
"0.61883956",
"0.6163177",
"0.61592215",
"0.6120353",
"0.60890216",
"0.60595787",
"0.59833455",
"0.5978124",
"0.5969229",
"0.5968126",
"0.59636605",
"0.5947022",
"0.5911437",
"0.5892776",
"0.5835226",
"0.58197695",
"0.5789049",
"0.57594925",
"0.57415617",
"0.5728396",
"0.5712903",
"0.56970996",
"0.5682565",
"0.5661614",
"0.5658296",
"0.56515115",
"0.56496936",
"0.5641687"
]
| 0.66048294 | 0 |
Get the summary sundial data for an assignment. This endpoint is ridiculously IO intensive. heavily cached | def visual_sundial_assignment(assignment_id: str):
# Get the assignment object
assignment = Assignment.query.filter(
Assignment.id == assignment_id
).first()
# If the assignment does not exist, then stop
req_assert(assignment is not None, message='assignment does not exist')
# Assert that the assignment is within the view of
# the current admin.
assert_course_context(assignment)
# Pull the (maybe cached) sundial data
return success_response({'sundial': get_assignment_sundial(assignment.id)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_evaluation_summary(self):\n self.logger.debug(\n f\"Getting summary for assignment {self.assignment_id}, eval_id {self.eval_id}\"\n )\n result = self.interactor.get_policy_eval_summary(self.assignment_id)\n\n if result.status_code != 200:\n self.logger.debug(\n f\"Could not get summary for assignment {self.assignment_id} for eval_id {self.eval_id} - {result.text}\"\n )\n raise Exception(\n f\"Summary could not be retrived: {result.status_code} - {result.text}\"\n )\n\n return result.json()[\"value\"][0][\"results\"]",
"def summary(self):\n response = self._get(self.uri_for(\"summary\"))\n return json_to_py(response)",
"def getSummary(self):\n return self.base.get(\"summary\", [])",
"def summary(self):\n if hasattr(self,\"_summary\"):\n return self._summary\n else:\n return {}",
"def get_summary(self, **kwargs):\n authorized_args = [\n 'begin', 'end', 'tenant_id', 'service', 'groupby', 'all_tenants']\n if kwargs.get('groupby', None):\n kwargs['groupby'] = ','.join(kwargs['groupby'])\n url = self.get_url('summary', kwargs, authorized_args)\n return self.api_client.get(url).json()",
"def get_grade_summary(self):\r\n\r\n fake_request = self.factory.get(\r\n reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n )\r\n\r\n return grades.grade(self.student_user, fake_request, self.course)",
"def getSummary(self):\n return self.summary",
"def _obtain_summary(self):\n if self._summary is None:\n if self._metadata:\n self._summary = ResultSummary(\n self._connection.unresolved_address, **self._metadata\n )\n elif self._connection:\n self._summary = ResultSummary(\n self._connection.unresolved_address,\n server=self._connection.server_info\n )\n\n return self._summary",
"def get_progress_summary(self):\r\n\r\n fake_request = self.factory.get(\r\n reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n )\r\n\r\n progress_summary = grades.progress_summary(\r\n self.student_user, fake_request, self.course\r\n )\r\n return progress_summary",
"def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)",
"def summary(self) -> str:\n return pulumi.get(self, \"summary\")",
"def get_account_summary(self):\r\n return self.get_object('GetAccountSummary', {}, SummaryMap)",
"def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def summary(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json",
"def student_summary(self, student_id, request, activity):\n try:\n student = User.objects.get(id=student_id)\n except User.DoesNotExist:\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n if not activity.is_member(student):\n return HttpResponseNotFound(\"Cet étudiant ne fait pas partie de ce cours\")\n\n activities = [acti for acti in activity.indexed_activities() if acti.open]\n indexed_pl = {a: a.indexed_pl() for a in activities}\n all_pl = []\n for indexed in indexed_pl.values():\n all_pl += list(indexed)\n teacher_list = activity.teacher.all()\n tl_id = [t.id for t in teacher_list]\n student_list = activity.student.exclude(id__in=tl_id)\n nb_student = len(student_list) if student_list else 1\n\n grades_query = HighestGrade.objects.filter(activity__in=activities,\n pl__in=all_pl,\n user__in=student_list)\n d_grade = dict()\n for g in grades_query:\n if g.grade is not None:\n d_grade[(g.user.id, g.pl.id)] = int(g.grade)\n\n tp = list()\n for a in activities:\n question = list()\n for pl in a.indexed_pl():\n all_mark = list()\n for s in student_list:\n if (s.id, pl.id) in d_grade:\n ms = max([0, d_grade[(s.id, pl.id)]])\n else:\n ms = 0\n all_mark.append(ms)\n if (student.id, pl.id) not in d_grade:\n mark_student = 0\n else:\n mark_student = max([0, d_grade[(student.id, pl.id)]])\n state = Answer.pl_state(pl, student)\n question.append({\n 'state': state,\n 'name': pl.json['title'],\n 'all_mark': all_mark,\n 'mark': mark_student,\n 'mean': round(sum(all_mark) / (5*nb_student), 2),\n 'min': round(min(all_mark) / 5, 2),\n 'max': round(max(all_mark) / 5, 2),\n })\n len_tp = len(question) if question else 1\n all_grouped_mark = list()\n for i in range(nb_student):\n all_grouped_mark.append(sum([q['all_mark'][i] for q in question]) / len_tp)\n tp.append({\n 'name': a.activity_data['title'],\n 'activity_name': a.name,\n 'id': a.id,\n 'width': str(100 / len_tp),\n 'pl': question,\n 'all_mark': all_grouped_mark,\n 'mark': round(sum([q['mark'] for q in question]) / (5*len_tp), 2),\n 'mean': round(sum(all_grouped_mark) / (5*nb_student), 2),\n 'min': round(min(all_grouped_mark) / 5, 2),\n 'max': round(max(all_grouped_mark) / 5, 2),\n })\n\n len_act = sum([len(t['pl']) for t in tp]) if [len(t['pl']) for t in tp] else 1\n all_act_mark = list()\n for i in range(nb_student):\n sum_mark = 0\n for t in tp:\n sum_mark += sum([e['all_mark'][i] for e in t['pl']])\n all_act_mark.append(sum_mark / len_act)\n course_mark = sum([sum([e['mark'] for e in t['pl']]) for t in tp]) / len_act\n return render(request, 'activity/activity_type/course/student_summary.html', {\n 'state': [i for i in State if i != State.ERROR],\n 'course_name': activity.name,\n 'student': student,\n 'activities': tp,\n 'course_id': activity.id,\n 'mark': round(course_mark / 5, 2),\n 'mean': round(sum(all_act_mark) / (5*nb_student), 2),\n 'min': round(min(all_act_mark) / 5, 2),\n 'max': round(max(all_act_mark) / 5, 2),\n 'nb_more': sum([1 for m in all_act_mark if m > course_mark]),\n 'nb_less': sum([1 for m in all_act_mark if m < course_mark]),\n })",
"def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + \".summary\"] = summary\n return summary",
"def grade_summary(request, course_key):\r\n course = get_course_with_access(request.user, 'staff', course_key)\r\n\r\n # For now, just a page\r\n context = {'course': course,\r\n 'staff_access': True, }\r\n return render_to_response('courseware/grade_summary.html', context)",
"def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + '.summary'] = summary\n return summary",
"def get_assignment_info(self):\n url = self.server_url + \"/api/v1/courses/\" + str(self.course_id) + '/assignments/' + str(self.assignment_id)\n r = requests.get(url, headers=self.headers, params=self.params)\n assignment = json.loads(r.text)\n return assignment",
"def summary():\r\n\r\n average_age, counted = _find_average_age()\r\n male, female = _find_male_female_percentage()\r\n headings = [\"Total Number of Patients\", \"Average Age\",\r\n \"Patients Involved In Average Age\", \"Percentage of Male\",\r\n \"Percentage of Female\"]\r\n data = [len(resources), average_age, counted, male, female]\r\n return render_template(\"summary.html\", headings=headings, data=data)",
"def export_getDBSummary(self):\n gLogger.info(\"RequestManagerHandler.getDBSummary: Attempting to obtain database summary.\")\n try:\n res = requestDB.getDBSummary()\n return res\n except Exception,x:\n errStr = \"RequestManagerHandler.getDBSummary: Exception while getting database summary.\"\n gLogger.exception(errStr,lException=x)\n return S_ERROR(errStr)",
"def cf_api_space_summary(space_guid):\n return HttpClientFactory.get(CloudFoundryConfigurationProvider.get()).request(\n method=HttpMethod.GET,\n path=\"spaces/{}/summary\".format(space_guid),\n msg=\"CF: get space summary\",\n )",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary",
"def summary(self):\n return self._summary"
]
| [
"0.65082306",
"0.61989534",
"0.60656804",
"0.59108555",
"0.58665276",
"0.58484167",
"0.5837141",
"0.57975644",
"0.57505107",
"0.5686875",
"0.5664202",
"0.5660172",
"0.5644205",
"0.5644205",
"0.5644205",
"0.5629081",
"0.5628967",
"0.56143475",
"0.56098056",
"0.55950415",
"0.55914456",
"0.55287457",
"0.55087394",
"0.5505955",
"0.5505955",
"0.5505955",
"0.5505955",
"0.5505955",
"0.5505955",
"0.5505955"
]
| 0.7021534 | 0 |
Reshapes an input variable without copy. | def reshape(x, shape):
if x.shape == shape:
return chainer.as_variable(x)
y, = Reshape(shape).apply((x,))
return y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reshape(x, shape):\n return Reshape(shape)(x)",
"def reshape_var(var):\n dims = np.shape(var)\n nx = dims[0]\n ny = dims[1]\n nz = dims[2]\n\n var_2d = var.reshape(nx * ny, nz)\n return var_2d",
"def flatten_reshape(variable):\n dim = 1\n for d in variable.get_shape()[1:].as_list():\n dim *= d\n return tf.reshape(variable, shape=[-1, dim])",
"def resh(x):\r\n a = x.shape[0]\r\n b = x.shape[1]\r\n return x.reshape(a*b, 1), a, b",
"def reshape(input):\n\n input = input / 255\n input = trans.resize(input, (args.size, args.size))\n input = np.reshape(input, input.shape + (1,))\n input = np.reshape(input, (1,) + input.shape)\n return input",
"def reshape(self, *shape):\n return F.Reshape.apply(self, shape)",
"def reshape(x, shape):\n return float(x) if shape is None else jnp.reshape(x, shape)",
"def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))",
"def reshape_input(input_data, input_size, single=True, warning=False):\n with suppress(Exception):\n input_data = torch.from_numpy(input_data)\n\n if input_size is None:\n if warning is True:\n print(\"No size was given and no reshaping can occur\")\n return input_data\n\n # Reshape the data regardless of batch size\n start = len(input_data)\n\n alternate = list(input_size)\n alternate[0] = start\n alternate = tuple(alternate)\n\n try:\n if single:\n input_data = input_data.reshape(alternate)\n else:\n input_data = input_data.reshape(input_size)\n except Exception:\n if warning is True:\n print(\"Warning: Data loss is possible during resizing.\")\n if single:\n input_data = input_data.resize_(alternate)\n else:\n input_data = input_data.resize_(input_size)\n return input_data",
"def relay_reshape(c, v, shp):\n nv = c.ref(v)\n assert shp.is_constant(tuple)\n trim = False\n if shp.value == ():\n shp = (1,)\n trim = True\n else:\n shp = shp.value\n res = relay.op.reshape(nv, newshape=shp)\n if trim:\n res = relay.op.take(res, relay.const(0), mode='fast')\n return res",
"def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)",
"def numpyReshape(array):\n return np.array(array, dtype = float).reshape(1, len(array))",
"def _reshape_feature(self, X, size):\n new_shape = (X.shape[0],) + size + (X.shape[-1],)\n return X.reshape(new_shape)",
"def _ReshapeToInput(op: ops.Operation, grad):\n return array_ops.reshape(\n _IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0]))",
"def _reshape_like(mat: Tensor, shape: Tuple[int]) -> Tensor:\n return mat.reshape(-1, *shape)",
"def reshape(tensor, newshape):\n raise NotImplementedError",
"def flatten(x):\n return reshape(x, (x.shape[0], -1))",
"def reshape(a, shape=None, name=None):\n if K.is_sparse(a):\n reshape_op = tf.sparse.reshape\n else:\n reshape_op = tf.reshape\n\n return reshape_op(a, shape=shape, name=name)",
"def flatten(x_tensor):\n with tf.name_scope('input_reshape'):\n x = x_tensor.get_shape().as_list()[1]\n y = x_tensor.get_shape().as_list()[2]\n z = x_tensor.get_shape().as_list()[3]\n image_shaped_input = tf.reshape(x_tensor, [-1, x*y*z])\n return image_shaped_input",
"def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)",
"def add_reshape(self, input_name, shape, name=None, attr={}):\n return self._build_op('Reshape', [input_name, shape], name=name)",
"def preserve_shape(func):\n @wraps(func)\n def wrapped_function(img, *args, **kwargs):\n shape = img.shape\n result = func(img, *args, **kwargs)\n result = result.reshape(shape)\n return result\n\n return wrapped_function",
"def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))",
"def img_reshape(self, input_img):\n _img = np.transpose(input_img, (1, 2, 0)) \n _img = np.flipud(_img)\n _img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))\n return _img",
"def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)",
"def _create_reshape(cls, onnx_node, inputs, opset_version):\n shape = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(shape)",
"def reshape(self, *shape):\n return Signal(self._initial_value.reshape(*shape),\n name=\"%s.reshape(%s)\" % (self.name, shape),\n base=self.base)",
"def flatten(x, name=\"flatten\"):\n all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])\n o = tf.reshape(x, [-1, all_dims_exc_first], name=name)\n return o",
"def _reshape_like(F, x, y):\n return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)",
"def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))"
]
| [
"0.7709701",
"0.73209643",
"0.709344",
"0.7088738",
"0.6896953",
"0.67383665",
"0.67162436",
"0.6679906",
"0.63716567",
"0.63189733",
"0.63096625",
"0.63083035",
"0.6296938",
"0.62918407",
"0.62788635",
"0.62673515",
"0.6232674",
"0.61841697",
"0.61799854",
"0.614986",
"0.6125671",
"0.6071603",
"0.6062815",
"0.6056227",
"0.60536015",
"0.6051136",
"0.60169387",
"0.6011743",
"0.5999932",
"0.59932005"
]
| 0.78366554 | 0 |
Convolve image [img] with [kernel]. | def convolution(img, kernel, padding='fill'):
kernel = np.rot90(kernel, 2)
h,w = kernel.shape[:2]
t,b,l,r = (h-1)//2, h//2, (w-1)//2, w//2 # Use numpy padding because it works for >2d
padshape = [(t,b),(l,r)]+[(0,0)]*(len(img.shape[2:]))
padded_img = np.pad(img, padshape, mode={'fill':'constant','replicate':'edge'}[padding])
conved_img = np.zeros_like(img)
for i in 1+np.arange(-h//2,h//2):
for j in 1+np.arange(-w//2,w//2):
if kernel[t+i,l+j]==0: continue
conved_img += kernel[t+i,l+j]*padded_img[t+i:-b+i or None,l+j:-r+j or None]
return conved_img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convolve_2d(kernel, image):\n\n nx = image.shape[0]\n ny = image.shape[1]\n nkx = kernel.shape[0]\n nky = kernel.shape[1]\n wkx = nkx // 2\n wky = nky // 2\n\n result = np.zeros(image.shape, dtype=float32)\n\n for i in prange(0, nx, 1):\n iimin = max(i - wkx, 0)\n iimax = min(i + wkx + 1, nx)\n for j in prange(0, ny, 1):\n jjmin = max(j - wky, 0)\n jjmax = min(j + wky + 1, ny)\n num = 0.0\n for ii in range(iimin, iimax, 1):\n iii = wkx + ii - i\n for jj in range(jjmin, jjmax, 1):\n jjj = wky + jj - j\n num += kernel[iii, jjj] * image[ii, jj]\n result[i, j] = num\n\n return result",
"def image_conv(image, kernel):\n \n # Filter2D used for performance\n return cv2.filter2D(image, -1, kernel)",
"def convolve_grayscale_same(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n # conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n conved = np.zeros(imgshape)\n ph = int((kh) / 2)\n pw = int((kw) / 2)\n # print(conved.shape)\n # print(kernel.shape, images.shape)\n # print(kernel[None, :, :].shape)\n padimg = np.pad(images, ((0, 0), (ph, ph), (pw, pw)), 'constant',\n constant_values=0)\n for i in range(0, h):\n for j in range(0, w):\n subs = padimg[:, i:i + kh, j:j + kw]\n # ip = i + ph\n # jp = j + pw\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved",
"def convolution_2d(img, kernel):\n # TODO write convolution of arbritrary sized convolution here\n # Hint: you need the kernelsize\n\n offset = int(kernel.shape[0] / 2)\n irows, icols = img.shape\n newimg = np.zeros((irows - offset, icols - offset, offset * 2 + 1, offset * 2 + 1))\n nrows, ncols, _, _ = newimg.shape\n for x in range(nrows - 1):\n for y in range(ncols - 1):\n newimg[x, y, :, :] = img[x:x + offset * 2 + 1, y:y + offset * 2 + 1]\n newimg *= kernel\n\n newimg = np.sum(newimg, axis=3)\n newimg = np.sum(newimg, axis=2)\n\n return newimg",
"def convolve_grayscale_same(images, kernel):\n\n # num images\n n_images = images.shape[0]\n\n # input_width and input_height\n i_h = images.shape[1]\n i_w = images.shape[2]\n\n # kernel_width and kernel_height\n\n k_h = kernel.shape[0]\n k_w = kernel.shape[1]\n\n # pad_h ⊛ = int (k_h - 1)/2\n # pad_w ⊛ = int (k_w - 1)/2\n p_h = int((k_h - 1) / 2)\n p_w = int((k_w - 1) / 2)\n\n if k_h % 2 == 0:\n p_h = int(k_h / 2)\n\n if k_w % 2 == 0:\n p_w = int(k_w / 2)\n\n # output_height and output_width\n # H = i_h + 2pad - k_h + 1, W = i_w + 2pad - k_w + 1\n o_h = i_h + 2 * p_h - k_h + 1\n o_w = i_w + 2 * p_w - k_w + 1\n\n if k_h % 2 == 0:\n o_h = i_h + 2 * p_h - k_h\n\n if k_w % 2 == 0:\n o_w = i_w + 2 * p_w - k_w\n\n # creating outputs of size: n_images, o_h x o_w\n outputs = np.zeros((n_images, o_h, o_w))\n\n # creating pad of zeros around the output images\n padded_imgs = np.pad(images,\n pad_width=((0, 0), (p_h, p_h), (p_w, p_w)),\n mode=\"constant\",\n constant_values=0)\n\n # vectorizing the n_images into an array\n imgs_arr = np.arange(0, n_images)\n\n # iterating over the output array and generating the convolution\n for x in range(o_h):\n for y in range(o_w):\n x1 = x + k_h\n y1 = y + k_w\n outputs[imgs_arr, x, y] = np.sum(np.multiply(\n padded_imgs[imgs_arr, x: x1, y: y1], kernel), axis=(1, 2))\n\n return outputs",
"def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.",
"def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding as used in the previous assignment can make\n # derivatives at the image boundary very big.\n \n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge') \n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.flipud(np.fliplr(kernel)) # flip h/v\n for h in range(Hi):\n for w in range(Wi):\n out[h, w] = np.sum(np.multiply(kernel, padded[h : h + Hk, w : w + Wk]))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out",
"def convolve_2d(image: xr.DataArray,\n kernel,\n pad=True,\n use_cuda=True) -> xr.DataArray:\n # Don't allow padding on (1, 1) kernel\n if (kernel.shape[0] == 1 and kernel.shape[1] == 1):\n pad = False\n\n if pad:\n pad_rows = kernel.shape[0] // 2\n pad_cols = kernel.shape[1] // 2\n pad_width = ((pad_rows, pad_rows),\n (pad_cols, pad_cols))\n else:\n # If padding is not desired, set pads to 0\n pad_rows = 0\n pad_cols = 0\n pad_width = 0\n\n padded_image = np.pad(image, pad_width=pad_width, mode=\"reflect\")\n result = np.empty_like(padded_image)\n\n if has_cuda() and use_cuda:\n griddim, blockdim = cuda_args(padded_image.shape)\n _convolve_2d_cuda[griddim, blockdim](result, kernel, padded_image)\n else:\n result = _convolve_2d(kernel, padded_image)\n\n if pad:\n result = result[pad_rows:-pad_rows, pad_cols:-pad_cols]\n\n if result.shape != image.shape:\n raise ValueError(\"Output and input rasters are not the same shape.\")\n\n return result",
"def convolve_grayscale_same(images, kernel):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n padh = int(kh / 2)\n padw = int(kw / 2)\n pad = ((0, 0), (padh, padh), (padw, padw))\n conv = np.zeros([m, h, w])\n imagePad = np.pad(images, pad_width=pad, mode='constant')\n for i in range(h):\n for j in range(w):\n image = imagePad[:, i:i+kh, j:j+kw]\n conv[:, i, j] = np.multiply(image, kernel).sum(axis=(1, 2))\n return conv",
"def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result",
"def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out",
"def convolve(self, kernel):\n kernel_rows, kernel_cols = kernel.shape\n img_rows, img_cols = self.img_array.shape\n\n print(\"imgae shape: \", self.img_array.shape)\n print(self.img_array[:10,:10])\n\n # flip the kernel\n flipped_kernel = np.zeros(kernel.shape)\n \n ## column flips\n for i in range(flipped_kernel.shape[1]):\n flipped_kernel[:,i] = kernel[:,kernel_cols-i-1]\n kernel = flipped_kernel.copy()\n\n ## row flips\n for i in range(flipped_kernel.shape[0]):\n flipped_kernel[i,:] = kernel[kernel_rows-i-1,:]\n kernel = flipped_kernel.copy()\n print(\"Flipped kernel:\\n\", kernel)\n\n # Handle broders by padding the image with white pixels.\n ## padwidth = kernel_rows // 2 \n padwidth = kernel_rows // 2\n self.img_array_padded = np.pad(self.img_array, padwidth, \n mode='constant', constant_values=255)\n \n # cross correlation\n self.img_array_out = np.zeros(self.img_array.shape)\n\n for y in range(img_cols):\n for x in range(img_rows):\n self.img_array_out[x, y] = \\\n (kernel * self.img_array_padded[x:x+kernel_cols, y:y+kernel_rows]).sum()\n \n # print(self.img_array_out.shape)\n # print(self.img_array_out[:10,:10])\n return self.img_array_out",
"def convolution(image: np.array, kernel: np.array) -> np.array:\n\n # default condition: apply SAME padding, and keep stride at 1\n stride_x = 1\n stride_y = 1\n padding_y = int(len(kernel - 1) / 2)\n padding_x = int(len((kernel[0]) - 1) / 2)\n # create the return array with with the same dimensions as <image>,\n # and then create a padded image\n convolved_image = np.zeros((len(image), len(image[0])))\n padded_image = np.zeros((len(image) + 2 * padding_y,\n len(image[0]) + 2 * padding_x))\n padded_image[padding_x: -padding_x, padding_y: -padding_y] = image\n\n for py in range(0, len(padded_image) - len(kernel), stride_y):\n for px in range(0, len(padded_image[0]) - len(kernel[0]), stride_x):\n # scan the matrix over columns in image array, then shift the matrix\n # down, and repeat\n padded_image_section = padded_image[py: py + len(kernel[0]),\n px: px + len(kernel)]\n # print(padded_image_section)\n convolved_image[py, px] = int(np.tensordot(padded_image_section,\n kernel))\n\n return convolved_image",
"def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result",
"def convolve_image(image, kernel, mode='valid', boundary='symm'):\n chs = []\n for d in range(image.shape[2]):\n channel = sig.convolve2d(image[:, :, d], kernel, mode=mode, boundary=boundary)\n chs.append(channel)\n return np.stack(chs, axis=2)",
"def conv_2D(img,kernel,stride=1):\n\n m,n = img.shape\n r,c = kernel.shape\n\n kernel = np.flip(kernel,axis=1)\n kernel = np.flip(kernel,axis=0)\n\n c_m, c_n = int(np.ceil((m-r+1)/stride)), int(np.ceil((n-c+1)/stride))\n img_conv = np.zeros((c_m,c_n),dtype=float)\n\n for i,j in it.product(range(c_m),range(c_n)):\n img_conv[i,j] = (img[i*stride:i*stride+r,j*stride:j*stride+c] * kernel).sum()\n\n return img_conv",
"def convolution(image, kernel, scale=None, offset=0):\n kernel = np.array(kernel).flatten().tolist()\n if len(kernel)==9:\n size = (3,3)\n elif len(kernel)==25:\n size = (5,5)\n else:\n raise ValueError('Kernel size must be (3,3) or (5,5).')\n return image.filter(ImageFilter.Kernel(size, kernel, scale, offset))",
"def convolve_grayscale_valid(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n\n for i in range(0, h - kh + 1):\n for j in range(0, w - kw + 1):\n subs = images[:, i:i + kh, j:j + kw]\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved",
"def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))",
"def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result",
"def conv_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n k = np.flip(np.flip(kernel, 1), 0)\n padding_image = zero_pad(image, Hk//2, Wk//2)\n for i in range(Hi):\n for j in range(Wi):\n out[i, j] = np.sum(np.multiply(padding_image[i:i+Hk, j:j+Wk], k))\n\n return out",
"def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)",
"def discreteConvolution2D( iImage, iKernel ): \n # pretvori vhodne spremenljivke v np polje in\n # inicializiraj izhodno np polje\n iImage = np.asarray( iImage )\n iKernel = np.asarray( iKernel )\n #------------------------------- za hitrost delovanja\n oImage = ni.convolve( iImage, iKernel, mode='nearest' ) \n return oImage",
"def fftconvolve(array, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpynp.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x * y))))",
"def convolve(self, *args, **kwargs):\n return _image.image_convolve(self, *args, **kwargs)",
"def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output",
"def convolveH(image=None,kernel=None):\n res = sg.fftconvolve(image,kernel,mode='full')\n res = res/res.sum()\n rowM,colM = nd.center_of_mass(res)\n size = image.shape[0]/2\n resnew = res[int(rowM)-size:int(rowM)+size,int(colM)-size:int(colM)+size]\n return resnew",
"def conv_nested(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n H = Hk // 2\n W = Wk // 2\n for i1 in range(Hi):\n for j1 in range(Wi):\n for i2 in range(Hk):\n for j2 in range(Wk):\n i = i2 - H\n j = j2 - W\n if i1-i<0 or j1-j<0 or i1-i>=Hi or j1-j>=Wi:\n continue\n out[i1, j1] += kernel[i2, j2]*image[i1-i, j1-j]\n return out",
"def convolve(images, kernels, padding='same', stride=(1, 1)):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernels.shape[0]\n kw = kernels.shape[1]\n nc = kernels.shape[3]\n sh = stride[0]\n sw = stride[1]\n\n if padding == 'same':\n ph = max((h - 1) * sh + kh - h, 0)\n pt = int(np.ceil(ph / 2))\n pb = pt\n pw = max((w - 1) * sw + kw - w, 0)\n pl = int(np.ceil(pw / 2))\n pr = pl\n elif padding == 'valid':\n pt, pb, pl, pr = 0, 0, 0, 0\n else:\n pt, pb = padding[0], padding[0]\n pl, pr = padding[1], padding[1]\n\n oh = ((h - kh + pt + pb) // sh) + 1\n ow = ((w - kw + pl + pr) // sw) + 1\n\n images = np.pad(images, pad_width=((0, 0), (pt, pb), (pl, pr), (0, 0)),\n mode='constant', constant_values=0)\n\n conv = np.zeros((m, oh, ow, nc))\n for k in range(nc):\n for i in range(oh):\n for j in range(ow):\n aux = images[:, i * sh:i * sh + kh, j * sw:j * sw + kw] \\\n * kernels[:, :, :, k]\n conv[:, i, j, k] = np.sum(aux, axis=(1, 2, 3))\n return conv",
"def apply_kernel(x, h, out=None):\n if out is None:\n out = np.zeros(x.shape[1])\n else:\n out.fill(0)\n\n for ind in range(len(h)):\n out += np.convolve(h[ind], x[ind])[:len(out)]\n\n return out"
]
| [
"0.79295087",
"0.7629679",
"0.75895846",
"0.753147",
"0.7514754",
"0.7478905",
"0.7469306",
"0.74531704",
"0.7446393",
"0.7420992",
"0.74199206",
"0.7359379",
"0.73560745",
"0.733671",
"0.7332733",
"0.73180735",
"0.72574836",
"0.72479475",
"0.71656466",
"0.7125642",
"0.7124279",
"0.70667785",
"0.7061848",
"0.7012672",
"0.7002983",
"0.6942834",
"0.69250244",
"0.69162935",
"0.68071306",
"0.6792913"
]
| 0.77688134 | 1 |
Given two list of keypoint locations and descriptions, compute the correspondences. | def find_correspondences(pts1, pts2, desc1, desc2, match_score_type='ratio'):
N = pts1.shape[0]
X = np.sum(desc1**2, axis=1, keepdims=True)
Y = np.sum(desc2**2, axis=1, keepdims=True).T
XY = np.dot(desc1,desc2.T)
L = X + Y - 2*XY
D = (np.maximum(L, 0))
scores = np.min(D, axis = 1)
indices = np.argmin(D,axis = 1)
corr = []
for j,index in enumerate(indices):
corr.append(np.hstack([pts1[j],pts2[index]]))
if match_score_type=='ratio':
p = np.sort(D, axis = 1)
scores = p[:,0]/p[:,1]
return np.array(corr), indices, scores | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores",
"def apply_feature_matching(desc1: np.ndarray, desc2: np.ndarray,\n match_calculator: Callable[[list, list], float]) -> list:\n\n # Check descriptors dimensions are 2\n assert desc1.ndim == 2, \"Descriptor 1 shape is not 2\"\n assert desc2.ndim == 2, \"Descriptor 2 shape is not 2\"\n\n # Check that the two features have the same descriptor type\n assert desc1.shape[1] == desc2.shape[1], \"Descriptors shapes are not equal\"\n\n # If there is not key points in any of the images\n if desc1.shape[0] == 0 or desc2.shape[0] == 0:\n return []\n\n # number of key points in each image\n num_key_points1 = desc1.shape[0]\n num_key_points2 = desc2.shape[0]\n\n # List to store the matches scores\n matches = []\n\n # Loop over each key point in image1\n # We need to calculate similarity with each key point in image2\n for kp1 in range(num_key_points1):\n # Initial variables which will be updated in the loop\n distance = -np.inf\n y_index = -1\n\n # Loop over each key point in image2\n for kp2 in range(num_key_points2):\n\n # Match features between the 2 vectors\n value = match_calculator(desc1[kp1], desc2[kp2])\n\n # SSD values examples: (50, 200, 70), we need to minimize SSD (Choose 50)\n # In case of SSD matching: (value is returned as a \"negative\" number) (-50, -200, -70)\n # So we compare it with -np.inf. (The sorting will be reversed later)\n\n # NCC values examples: (0.58, 0.87, 0.42), we need to maximize NCC (Choose 0.87)\n # In case of NCC matching: (value is returned as a \"positive\" number) (-58, -0.87, -0.42)\n # So we compare it with -np.inf. (The sorting will be reversed later)\n\n if value > distance:\n distance = value\n y_index = kp2\n\n # Create a cv2.DMatch object for each match and set attributes:\n # queryIdx: The index of the feature in the first image\n # trainIdx: The index of the feature in the second image\n # distance: The distance between the two features\n cur = cv2.DMatch()\n cur.queryIdx = kp1\n cur.trainIdx = y_index\n cur.distance = distance\n matches.append(cur)\n\n return matches",
"def associate(first_list, second_list, offset, max_difference):\n ## obatin all keys\n first_keys = list(first_list)\n second_keys = list(second_list)\n potential_matches = [(abs(a - (b + offset)), a, b)\n for a in first_keys\n for b in second_keys\n if abs(a - (b + offset)) < max_difference]\n potential_matches.sort()\n matches = []\n for diff, a, b in potential_matches:\n if a in first_keys and b in second_keys:\n first_keys.remove(a)\n second_keys.remove(b)\n matches.append((a, b))\n \n matches.sort()\n return matches",
"def match(desc1,desc2):\n\t\n\tdesc1 = array([d/linalg.norm(d) for d in desc1])\n\tdesc2 = array([d/linalg.norm(d) for d in desc2])\n\t\n\tdist_ratio = 0.6\n\tdesc1_size = desc1.shape\n\t\n\tmatchscores = zeros((desc1_size[0],1))\n\tdesc2t = desc2.T #precompute matrix transpose\n\tfor i in range(desc1_size[0]):\n\t\tdotprods = dot(desc1[i,:],desc2t) #vector of dot products\n\t\tdotprods = 0.9999*dotprods\n\t\t#inverse cosine and sort, return index for features in second image\n\t\tindx = argsort(arccos(dotprods))\n\t\t\n\t\t#check if nearest neighbor has angle less than dist_ratio times 2nd\n#\t\tif arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n\t\tmatchscores[i] = int(indx[0])\n\t\n\treturn matchscores",
"def match(desc1, desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n\n dist_ratio = 0.6\n disc1_size = desc1.shape\n\n matchscores = zeros((desc1_size[0]), \"int\")\n desc2t = desc2.T\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t)\n dotprods = 0.9999 * dotprods\n\n indx = argsort(arccos(dotprods))\n\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n\n return matchscores",
"def get_pairwise_matches(pos1, descs1, pos2, descs2, up_to=30):\n assert pos1.shape[0] * pos2.shape[0] < 1e8, \\\n \"Too many points: increase cornerness threshold\"\n assert pos1.shape[0] > 10 and pos1.shape[0] > 10, \\\n \"Not enough points: lower cornerness threshold\"\n # get the similarities between all descriptors\n sims = np.dot(descs1, descs2.T)\n # get the best matches\n mi2 = sims.argmax(axis=1).squeeze()\n ms = sims.max(axis=1).squeeze()\n bmi1 = ms.argsort()[::-1][:up_to]\n bmi2 = mi2[bmi1]\n # return their positions\n bp1 = pos1[bmi1]\n bp2 = pos2[bmi2]\n return bp1, bp2",
"def sources_at_point_pair(self, chrom1, pos1, chrom2, pos2, strain_names):\n coords = [self.genome_index(chrom1, pos1), self.genome_index(chrom2, pos2)]\n mins = [0] * 2\n maxes = [np.sum(self.sizes)] * 2\n coords.sort()\n output = {}\n samples = [[[] for _ in subspecies.iter_subspecies(True)] for _ in subspecies.iter_subspecies(True)]\n key = [subspecies.to_string(s) for s in subspecies.iter_subspecies(True)]\n for strain_name in strain_names:\n intervals = self.sample_dict[strain_name][0]\n sources = self.sample_dict[strain_name][1]\n # find interval containing each location\n i = 0\n interval_indices = [None, None]\n for loc_num in xrange(2):\n while intervals[i] < coords[loc_num]:\n i += 1\n if i > 0:\n mins[loc_num] = max(mins[loc_num], intervals[i - 1])\n maxes[loc_num] = min(maxes[loc_num], intervals[i])\n interval_indices[loc_num] = i\n samples[subspecies.to_ordinal(sources[interval_indices[0]])][\n subspecies.to_ordinal(sources[interval_indices[1]])].append(strain_name)\n output['Key'] = key\n output['Samples'] = samples\n output['Intervals'] = [\n self.chrom_and_pos(mins[0], maxes[0]),\n self.chrom_and_pos(mins[1], maxes[1])\n ]\n return output",
"def _get_points_from_matches(self, keypoints_1, keypoints_2, matches):\n points_1, points_2 = [], []\n \n for match in matches:\n points_1.append(keypoints_1[match.queryIdx].pt)\n points_2.append(keypoints_2[match.trainIdx].pt)\n\n return np.array(points_1), np.array(points_2)",
"def match_keypoints(desc1, desc2, k=2, thresh=.9, matchertype=None):\n if not matchertype:\n # default is brute forcce\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.knnMatch(desc1, desc2, k=3)\n print(matches)\n\n elif matchertype == \"FlannORB\":\n #\n FLANN_INDEX_LSH = 6\n search_params = dict(checks=50)\n index_params = dict(algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1) # 2\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n elif matchertype == \"FlannSURF\":\n FLANN_INDEX_KDTREE = 2\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=500)\n search_params = dict(checks=50)\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n goodmatches = []\n for m, n in matches:\n if m.distance < thresh * n.distance:\n goodmatches.append(m)\n return goodmatches",
"def match_objects(coords1,coords2,tail1=(),tail2=(),accuracy=1.):\n acc2=accuracy**2\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n if dist[i_min]<acc2:match[j]=i_min\n good=greater_equal(match,0)\n n1=compress(good,list(range(np1))) \n match=compress(good,match)\n a1=compress(good,a1)\n salida=list(a1)\n for i in range(nt1):\n if type(tail1[i][0])==type('si'):\n t=[]\n for j in n1: t.append(tail1[i][j])\n else:\n t=take(tail1[i],n1)\n salida.append(t)\n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n return salida",
"def match(desc1,desc2,threshold=0.5):\n n = len(desc1[0])\n # pair-wise distances\n d = -np.ones((len(desc1),len(desc2)))\n for i in range(len(desc1)):\n for j in range(len(desc2)):\n d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])\n d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])\n ncc_value = sum(d1 * d2) / (n-1)\n if ncc_value > threshold:\n d[i,j] = ncc_value\n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n return matchscores",
"def correspondence_points(img1, img2, tag='c'):\n if len(img1.shape) == 3:\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n if len(img2.shape) == 3:\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.SURF(800)\n norm = cv2.NORM_L2\n flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)\n kp1, desc1 = detector.detectAndCompute(img1, None)\n kp2, desc2 = detector.detectAndCompute(img2, None)\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\n if len(p1) >= 4:\n H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n print '%d / %d inliers/matched' % (np.sum(status), len(status))\n status = status.reshape(-1) # flatten\n p1 = p1[status == 1]\n p2 = p2[status == 1]\n kp_pairs = [kp_pairs[i] for i in range(len(kp_pairs)) if status[i] == 1]\n else:\n # Just depend on the thresholding for filtering matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches, ratio=0.3)\n\n draw_correspondence_points(img1, img2, kp_pairs, tag=tag)\n return p1, p2, kp_pairs",
"def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result",
"def get_correspondences(P1, P2):\r\n # Find the closest points to triangle 1 by exhaustive search using the\r\n # squared Euclidean distance\r\n P2matches, A_2 = get_closest_points_2d(P1, P2)\r\n\r\n # The matching pairs may contain irrelevant data. Keep only the matching\r\n # points that are close enough within a threshold parameter\r\n\r\n threshold = numpy.std(A_2)*2\r\n # TODO: determine a good threshold. Set the 'threshold' variable\r\n # to be your best choice, and write your justification to this answer\r\n # in the below:\r\n #\r\n # [Task 4 answer goes here]\r\n\r\n # Question 4: In selecting to use the standard deviation as how to evaluate the threshold, my code ensures that\r\n # the threshold is strictly dependent on any array of points (i.e. will work regardless of images geometry). Also,\r\n # by taking two standard deviations as the threshold, some uncertainty in the calculations is accounted for.\r\n # Finally, this threshold was found to minimize the mean squared error over 20 iterations for this specific\r\n # problem. The final MSE should be 5.35.\r\n\r\n S1, S2 = threshold_closest_points(P1,P2matches,threshold)\r\n\r\n return S1,S2",
"def _common_keypoints(self, *others):\n matches = self._match_table.dropna(0)\n keypoints = []\n for other in others:\n indices = matches[other.position.id].astype(int).values\n # the coordinates have to be flipped for later processing, hence the ::-1\n keypoints.append(other.keypoints[indices, ::-1])\n return np.stack(keypoints, axis=1)",
"def match_features(desc1, desc2, min_score):\n\n ## Reshape D1,2 to (N1, k^2), (k^2, N2)\n\n D1 = desc1.reshape((desc1.shape[0], -1))\n D2 = desc2.reshape((desc2.shape[0], -1)).T\n\n ## Calc M = D1 * D2, shape (N1, N2)\n ## Mi,j = match score of pt i from I1 and pt 2 from I2.\n\n M = D1.dot(D2)\n\n ## Get candidates list I1 and I2, shape (2, N2), (\n ## total_cand = 4-j cands of the 2-i cands in index i\n\n cols_cand = np.argpartition(M, -2, axis=0)[-2:]\n rows_cand = np.argpartition(M.T, -2, axis=0)[-2:]\n total_cand = rows_cand[:, cols_cand]\n\n ## Mark matches where i appear in the ith col\n ## concat matches.\n\n index_map = np.ones(cols_cand.shape, dtype=np.int) * np.arange(cols_cand.shape[-1])\n match = (total_cand == index_map)\n\n desc1_match = np.concatenate((cols_cand[match[0]],\n cols_cand[match[1]]))\n desc2_match = np.concatenate((index_map[match[0]],\n index_map[match[1]]))\n\n ## Discard matches below min_score\n\n satisfty_min = np.where(M[desc1_match, desc2_match] >= min_score)\n desc1_match = desc1_match[satisfty_min]\n desc2_match = desc2_match[satisfty_min]\n\n ## Remove duplicate matches, keep max score pair.\n\n order = np.argsort(M[desc1_match, desc2_match])[::-1]\n desc1_match = desc1_match[order]\n desc2_match = desc2_match[order]\n\n unqe = np.unique(desc1_match, return_index=True)[1]\n desc1_match = desc1_match[unqe]\n desc2_match = desc2_match[unqe]\n\n return [desc1_match, desc2_match]",
"def match_features(desc1, desc2, min_score):\n\n match = []\n flat_des1 = desc1.reshape(desc1.shape[0],desc1.shape[1] * desc1.shape[2])\n flat_des2 = desc2.reshape(desc2.shape[0], desc2.shape[1] * desc2.shape[2])\n combine = np.dot(flat_des1,flat_des2.transpose())\n\n for i in range (combine.shape[0]):\n max_index_row = combine[i].argsort()[-2:]\n for j in max_index_row:\n max_index_col = combine[:,j].argsort()[-2:]\n if i in max_index_col and combine[i][j] >= min_score:\n match.append([i,j])\n\n match = np.array(match)\n return [match.transpose()[0],match.transpose()[1]] #todo should be list?",
"def vsone_feature_matching(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={},\n flann1=None, flann2=None, verbose=None):\n import vtool as vt\n import pyflann\n from vtool import spatial_verification as sver\n #import vtool as vt\n sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)\n ratio_thresh = cfgdict.get('ratio_thresh', .625)\n refine_method = cfgdict.get('refine_method', 'homog')\n symmetric = cfgdict.get('symmetric', False)\n K = cfgdict.get('K', 1)\n Knorm = cfgdict.get('Knorm', 1)\n #ratio_thresh = .99\n # GET NEAREST NEIGHBORS\n checks = 800\n #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2\n #pseudo_max_dist_sqrd = 2 * (512 ** 2)\n if verbose is None:\n verbose = True\n\n flann_params = {'algorithm': 'kdtree', 'trees': 8}\n if flann1 is None:\n flann1 = vt.flann_cache(vecs1, flann_params=flann_params, verbose=verbose)\n\n #print('symmetric = %r' % (symmetric,))\n if symmetric:\n if flann2 is None:\n flann2 = vt.flann_cache(vecs2, flann_params=flann_params, verbose=verbose)\n\n try:\n try:\n num_neighbors = K + Knorm\n fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann1, vecs2, num_neighbors, checks)\n #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)\n if symmetric:\n fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(flann2, vecs1, K, checks)\n\n except pyflann.FLANNException:\n print('vecs1.shape = %r' % (vecs1.shape,))\n print('vecs2.shape = %r' % (vecs2.shape,))\n print('vecs1.dtype = %r' % (vecs1.dtype,))\n print('vecs2.dtype = %r' % (vecs2.dtype,))\n raise\n if symmetric:\n is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)\n fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)\n fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)\n\n assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)\n\n fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup\n fm_ORIG = np.vstack((fx1_match, fx2_match)).T\n fs_ORIG = 1 - np.divide(match_dist, norm_dist)\n # APPLY RATIO TEST\n fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match, fx1_norm,\n match_dist, norm_dist,\n ratio_thresh)\n\n # SPATIAL VERIFICATION FILTER\n #with ut.EmbedOnException():\n match_weights = np.ones(len(fm_RAT))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_RAT) = svtup[0:3]\n else:\n H_RAT = np.eye(3)\n homog_inliers = []\n fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)\n fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)\n fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]\n\n top_percent = .5\n top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)\n fm_TOP = fm_ORIG.take(top_idx, axis=0)\n fs_TOP = fx2_to_dist.T[0].take(top_idx)\n #match_weights = np.ones(len(fm_TOP))\n #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))\n match_weights = 1 - fs_TOP\n #match_weights = np.ones(len(fm_TOP))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_TOP, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_TOP) = svtup[0:3]\n np.sqrt(homog_errors[0] / dlen_sqrd2)\n else:\n H_TOP = np.eye(3)\n homog_inliers = []\n fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)\n fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)\n\n matches = {\n 'ORIG' : MatchTup2(fm_ORIG, fs_ORIG),\n 'RAT' : MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),\n 'RAT+SV' : MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),\n 'TOP' : MatchTup2(fm_TOP, fs_TOP),\n 'TOP+SV' : MatchTup2(fm_TOP_SV, fs_TOP_SV),\n }\n output_metdata = {\n 'H_RAT': H_RAT,\n 'H_TOP': H_TOP,\n }\n\n except MatchingError:\n fm_ERR = np.empty((0, 2), dtype=np.int32)\n fs_ERR = np.empty((0, 1), dtype=np.float32)\n H_ERR = np.eye(3)\n matches = {\n 'ORIG' : MatchTup2(fm_ERR, fs_ERR),\n 'RAT' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'RAT+SV' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'TOP' : MatchTup2(fm_ERR, fs_ERR),\n 'TOP+SV' : MatchTup2(fm_ERR, fs_ERR),\n }\n output_metdata = {\n 'H_RAT': H_ERR,\n 'H_TOP': H_ERR,\n }\n\n return matches, output_metdata",
"def prepare_lists(dist1, dist2):\n\tx_dist = []\n\ty_dist = []\n\tfor i, freq in dist1.items():\n\t\tif i not in dist2:\n\t\t\tcontinue\n\t\tx_dist.append(freq)\n\t\ty_dist.append(dist2[i])\n\tx_dist=np.array(x_dist)\n\ty_dist=np.array(y_dist)\n\treturn x_dist, y_dist",
"def compute_pck(descs0, kps0, descs1, kps1, thresh=3):\n # matches points from image 0 to image 1, using NN\n if len(descs0.shape) == 3:\n idxs = nn_set2set_match(descs0, descs1)\n else:\n idxs = nn_match(descs0, descs1) # matched image 1 keypoint indices\n predicted = kps1[idxs] # matched image 1 keypoints\n\n correct = np.linalg.norm(predicted - kps1, 2, axis=1) <= thresh\n\n return np.sum(correct) / len(correct)",
"def main(a, b):\n edit_distance = make_edit(set(a+b))\n dist, align = edit_distance(a, b)\n print('Distance: {0}'.format(dist))\n x, y = zip(*align)\n print(''.join(x))\n print(''.join(y))",
"def match_min2(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n dist_min=zeros(np1)*1.\n x2=zeros(np1)*1.\n y2=zeros(np1)*1.\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n x2[j],y2[j]=a2[0,i_min],a2[1,i_min]\n match[j]=i_min\n \n salida=list(a1)\n salida.append(x2)\n salida.append(y2)\n\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)",
"def painting_matchings_local_desc(query_imgs, db_imgs, text_masks, options):\n tmp_img_format = []\n tmp_mask_format = []\n for i in range(len(query_imgs)):\n for j in range(len(query_imgs[i])):\n tmp_img_format.append(query_imgs[i][j])\n tmp_mask_format.append(text_masks[i][j])\n\n ## Keypoint + local descriptor extraction\n print(\"Obtaining query set descriptors\")\n kp_qs = keypoint_extraction(tmp_img_format, tmp_mask_format, options)\n desc_qs = descriptor_extraction(tmp_img_format, tmp_mask_format, kp_qs, options)\n\n print(\"Obtaining database descriptors\")\n kp_db = keypoint_extraction(db_imgs, None, options)\n desc_db = descriptor_extraction(db_imgs, None, kp_db, options)\n\n ## Match local descriptors \n print(\"Comparing...\") \n num_matches = match_descriptors_qs_db(tmp_img_format, db_imgs, desc_qs, desc_db, kp_qs, kp_db, options.km, options)\n return num_matches",
"def match_coords(list1, list2, tol = 1.5):\n tol1, tol2 = tol/3600.0, (tol/3600.0)**2\n jmin, match_idx, best_dr2 = 0, [None]*len(list1), [tol2]*len(list1)\n # This next bit is rather tricky because we have to sort the lists,\n # but also remember where we put each item. Here i, j = sorted indices,\n # while ci[i], ci[j] = corresponding actual indices in original lists.\n ci = np.array([c.ra for c in list1]).argsort()\n cj = np.array([c.ra for c in list2]).argsort()\n for i in range(len(list1)):\n CD = np.cos(list1[ci[i]].dec*np.pi/180.0)\n ramin, ramax = list1[ci[i]].ra - tol1/CD, list1[ci[i]].ra + tol1/CD\n decmin, decmax = list1[ci[i]].dec - tol1, list1[ci[i]].dec + tol1\n # Inch along in list2 until we find the part that matches list1 in RA\n while jmin < len(list2) and list2[cj[jmin]].ra < ramin: jmin += 1\n if jmin == len(list2): break\n # Now go through all the RA matches and check the RA+DEC distance.\n j = jmin\n while j < len(list2) and list2[cj[j]].ra < ramax:\n dr2 = ((CD*(list1[ci[i]].ra-list2[cj[j]].ra))**2 +\n (list1[ci[i]].dec-list2[cj[j]].dec)**2)\n if dr2 < best_dr2[ci[i]]:\n match_idx[ci[i]], best_dr2[ci[i]] = cj[j], dr2\n j += 1\n # Save and return the index in list2 of the best-matched object to each\n # item in list1, and the distances between corresponding best matches.\n best_dr = [np.sqrt(dr2)*3600 for dr2 in best_dr2]\n return match_idx, best_dr",
"def testFindCorrespondence(self):\n # Create some dummy keypoints and descriptors to match. Make the descriptors really far apart to be sure.\n keypoints1 = []\n descriptors1 = numpy.zeros(shape=(3, 1))\n keypoint = cv2.KeyPoint()\n for i in range(3):\n keypoint.pt = (float(i), 0.0)\n keypoints1.append(keypoint)\n descriptors1[i] = i*100.0\n keypoints2 = []\n descriptors2 = numpy.zeros(shape=(5, 1))\n for i in range(5):\n keypoint.pt = (0.0, float(i))\n keypoints2.append(keypoint)\n descriptors2[i] = i*105.0\n (first_points, second_points) = self.evaluator._findCorrespondence(\n keypoints1, descriptors1, keypoints2, descriptors2)\n expected_first = numpy.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]])\n expected_second = numpy.array([[0.0, 0.0], [0.0, 1.0], [0.0, 2.0]])\n self.assertTrue(numpy.array_equal(first_points, expected_first))\n self.assertTrue(numpy.array_equal(second_points, expected_second))",
"def distance_map(xs1, xs2):\n return jax.vmap(lambda x1: jax.vmap(lambda x2: euclidean_distance(x1, x2))(xs2))(xs1)",
"def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty",
"def match_coords_v2(list1, list2, tol = 1.5):\n tol1, tol2 = tol/3600.0, (tol/3600.0)**2\n jmin, match_idx, best_dr2 = 0, [None]*len(list1), [tol2]*len(list1)\n # Unpack the RAs and DECs of everything into numpy arrays.\n list1ra = np.array([c.ra for c in list1])\n list2ra = np.array([c.ra for c in list2])\n list1dec = np.array([c.dec for c in list1])\n list2dec = np.array([c.dec for c in list2])\n # This next bit is rather tricky because we have to sort the lists,\n # but also remember where we put each item. Here i, j = sorted indices,\n # while ci[i], ci[j] = corresponding actual indices in original lists.\n # Introduce transformed lists to economize on indexing.\n ci, cj = list1ra.argsort(), list2ra.argsort()\n slist1ra, slist1dec = list1ra[ci], list1dec[ci]\n slist2ra, slist2dec = list2ra[cj], list2dec[cj]\n # Since there's no sense in searching outside the bounds of the arrays,\n # extract the indices of only those elements within the rectangular\n # overlap area (we'll mostly be dealing with equatorial rectangles).\n # Reindex the lists to the originals.\n R0, R1 = max(min(list1ra), min(list2ra)), min(max(list1ra), max(list2ra))\n D0, D1 = max(min(list1dec), min(list2dec)), min(max(list1dec), max(list2dec))\n ci = ci[np.all([slist1ra >= R0, slist1dec >= D0,\n slist1ra <= R1, slist1dec <= D1], axis=0)]\n cj = cj[np.all([slist2ra >= R0, slist2dec >= D0,\n slist2ra <= R1, slist2dec <= D1], axis=0)]\n slist1ra, slist1dec = list1ra[ci], list1dec[ci]\n slist2ra, slist2dec = list2ra[cj], list2dec[cj]\n # Finally, start going through the lists again.\n for i in range(len(slist1ra)):\n decmin, decmax = slist1dec[i] - tol1, slist1dec[i] + tol1\n CD = np.cos(0.5*(decmin+decmax)*np.pi/180.0)\n ramin, ramax = slist1ra[i] - tol1/CD, slist1ra[i] + tol1/CD\n # Inch along in list2 until we find the part that matches list1 in RA\n while jmin < len(slist2ra) and slist2ra[jmin] < ramin: jmin += 1\n # No point going past the end of the list\n if jmin == len(slist2ra): break\n # Now go through all the RA matches and check the RA+DEC distance.\n j = jmin\n while j < len(slist2ra) and slist2ra[j] < ramax:\n # Check in the box before finding the angular distance\n if slist2dec[j] > decmin and slist2dec[j] < decmax:\n dr2 = ((CD*(slist1ra[i]-slist2ra[j]))**2 +\n (slist1dec[i]-slist2dec[j])**2)\n if dr2 < best_dr2[ci[i]]:\n match_idx[ci[i]], best_dr2[ci[i]] = cj[j], dr2\n j += 1\n # Save and return the index in list2 of the best-matched object to each\n # item in list1, and the distances between corresponding best matches.\n best_dr = [np.sqrt(dr2)*3600 for dr2 in best_dr2]\n return match_idx, best_dr",
"def sift_keypt_extractor(img1, img2, ratio=0.7, max_matches=-1, visualize=False, max_features=-1):\n sift = cv2.xfeatures2d.SIFT_create(max_features) if max_features > 0 else cv2.xfeatures2d.SIFT_create()\n\n img1_g = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img2_g = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n try:\n kp1, des1 = sift.detectAndCompute(img1_g, None)\n kp2, des2 = sift.detectAndCompute(img2_g, None)\n\n # FLANN parameters\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n pts1 = []\n pts2 = []\n filtered_kp1 = []\n filtered_kp2 = []\n\n # ratio test as per Lowe's paper\n for i, (m, n) in enumerate(matches):\n if m.distance < ratio * n.distance:\n pts1.append(kp1[m.queryIdx].pt)\n pts2.append(kp2[m.trainIdx].pt)\n filtered_kp1.append(kp1[m.queryIdx])\n filtered_kp2.append(kp2[m.trainIdx])\n\n if max_matches > 0 and len(pts1) > max_matches - 1:\n break\n\n if visualize:\n draw_matches(img1, filtered_kp1, img2, filtered_kp2, plot_title=\"\")\n\n return kp1, kp2, pts1, pts2\n except:\n return None, None, None, None",
"def find_matching_points(img1, img2, max_pix_movement=50, normalize=True, show=False):\n\n # Initiate ORB detector\n orb = cv2.ORB_create()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # Draw first 10 matches.\n if show:\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:500], None,flags=2)\n plt.imshow(img3),plt.show()\n # Get the matching keypoints for each of the images\n\n list_kp1 = []\n list_kp2 = []\n for mat in matches:\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n list_kp1.append(kp1[img1_idx].pt)\n list_kp2.append(kp2[img2_idx].pt)\n\n n_kp1, n_kp2 = np.float32(list_kp1), np.float32(list_kp2)\n n_kp1 /= np.asarray([img1.shape[1], img1.shape[0]], np.float32)\n n_kp2 /= np.asarray([img2.shape[1], img2.shape[0]], np.float32)\n n_kp1 = n_kp1 * 2. - 1.\n n_kp2 = n_kp2 * 2. - 1.\n\n return np.int32(list_kp1), np.int32(list_kp2), n_kp1, n_kp2"
]
| [
"0.63015306",
"0.62716055",
"0.626144",
"0.62531865",
"0.6191865",
"0.6147474",
"0.6070767",
"0.5903749",
"0.5840502",
"0.5816759",
"0.5778241",
"0.5762855",
"0.57483983",
"0.570337",
"0.5675253",
"0.56423503",
"0.563314",
"0.5577304",
"0.55716807",
"0.5569695",
"0.5564171",
"0.555735",
"0.5557182",
"0.55485654",
"0.55471146",
"0.55199486",
"0.55077153",
"0.54884744",
"0.5483749",
"0.5478279"
]
| 0.65722686 | 0 |
Compute the Symmetrical Epipolar Distance. | def sym_epipolar_dist(corr, F):
corrs_temp = np.zeros(4)
corrs_temp[1] = corr[0]
corrs_temp[0] = corr[1]
corrs_temp[2] = corr[3]
corrs_temp[3] = corr[2]
corr = corrs_temp
p1 = np.hstack([corr[:2],1])
p2 = np.hstack([corr[2:],1])
first_term = (F @ p1)[:-1]
second_term = (F.T @ p2)[:-1]
coeff = (p2.T @ F @ p1)**2
return coeff * (1/(np.linalg.norm(first_term)**2) + 1/(np.linalg.norm(second_term)**2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sym_distance(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n return np.minimum(norm(p - q), norm(p + q))",
"def sym_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()",
"def sym_adj(adj):\n adj = ss.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = ss.diags(d_inv_sqrt)\n return np.array(adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense())",
"def euc_dist(self, squared=True):",
"def distancePairs(self):\n return spsd.squareform(spsd.pdist(self.coordinates()))",
"def anti_symmeterize(self):\n A = self.to_coo_matrix()\n symg = wgraph_from_adjacency((A - A.T) / 2)\n self.E = symg.E\n self.edges = symg.edges\n self.weights = symg.weights\n return self.E",
"def sym_distance(cls, q0, q1):\n q = Quaternion.sym_log_map(q0, q1)\n return q.norm",
"def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c",
"def _pairwise_distance(x):\n x_inner = -2*torch.matmul(x, x.transpose(2, 1))\n x_square = torch.sum(torch.mul(x, x), dim=-1, keepdim=True)\n return x_square + x_inner + x_square.transpose(2, 1)",
"def get_euclid_distance_to(self, atom):\n return linalg.norm(self.get_coords() - atom.get_coords())",
"def pairwise_euclidean_distance(M, axis=0):\n\n return pairwise_distance(M, 'euclidean', axis=axis)",
"def calculate_euclidean_dist(self):\n x_dist = self._current_loc.get_column() - self._goal_loc.get_column()\n y_dist = self._current_loc.get_row() - self._goal_loc.get_row()\n # Note ** is power operator in Python\n return self._current_cost + sqrt(x_dist**2 + y_dist**2)",
"def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr",
"def symmeterize(self):\n A = self.to_coo_matrix()\n symg = wgraph_from_adjacency((A + A.T) / 2)\n self.E = symg.E\n self.edges = symg.edges\n self.weights = symg.weights\n return self",
"def euclidian_distance(p):\n return(np.sqrt(sum([(p[0][i]-p[1][i])**2 for i, _ in enumerate(p)])))",
"def _pairwise_dist(self,s1,s2):\n\n return 0.0",
"def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))",
"def is_symmetric(self):\n _is_sym = self._is_sym\n if _is_sym is not None:\n return _is_sym\n\n n = self.degree\n if n >= 8:\n if self.is_transitive():\n _is_alt_sym = self._eval_is_alt_sym_monte_carlo()\n if _is_alt_sym:\n if any(g.is_odd for g in self.generators):\n self._is_sym, self._is_alt = True, False\n return True\n\n self._is_sym, self._is_alt = False, True\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)\n\n self._is_sym, self._is_alt = False, False\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)",
"def getEuclideanDistance():\r\n global euclideanDistance\r\n return euclideanDistance",
"def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)",
"def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))",
"def pairwise_euclidean_similarity(x, y):\n s = 2 * torch.mm(x, torch.transpose(y, 1, 0))\n diag_x = torch.sum(x * x, dim=-1)\n diag_x = torch.unsqueeze(diag_x, 0)\n diag_y = torch.reshape(torch.sum(y * y, dim=-1), (1, -1))\n\n return s - diag_x - diag_y",
"def test_silly_symmetrical_case(self):\n complex_id_list = range(1,4) #1~3\n dataloader = lambda d: d\n calc_func = lambda d1,d2: d1 + d2\n \n actual,_ = complex_pairwise_calc(complex_id_list, dataloader, calc_func, callback = None, symmetry=True)\n \n expected = [(1, 2, 3), (1, 3, 4), (2, 3, 5),\n (1, 1, 2), (2, 2, 4), (3, 3, 6)]\n \n self.assertEqual(actual, expected)",
"def cosine_distances_pure(X, Y=None):\n S = cosine_similarity_pure(X, Y)\n func = lambda x: _clip(-x + 1, 0, 2)\n S = apply_2d(S, func)\n if X is Y or Y is None:\n S = _set_diag(S)\n return S",
"def pairwise_euclidean_distance(x, y):\n m, n = x.size(0), y.size(0)\n dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() \\\n - 2 * torch.matmul(x, y.t())\n # for numerical stability\n dist_mat = dist_mat.clamp(min=1e-12).sqrt()\n return dist_mat",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def sym_intrinsic_distance(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n return np.where(norm(p - q) < norm(p + q),\n norm(riemann_log_map(p, q)),\n norm(riemann_log_map(p, -q))\n )",
"def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))",
"def _nn_euclidean_distance(x, y):\n distances = _pdist(x, y)\n return np.maximum(0.0, distances.min(axis=0))"
]
| [
"0.58554745",
"0.58154327",
"0.5790009",
"0.578392",
"0.5774197",
"0.5734582",
"0.5672689",
"0.564891",
"0.5642469",
"0.56281424",
"0.562024",
"0.5614998",
"0.56049925",
"0.55514705",
"0.55148494",
"0.550842",
"0.5503222",
"0.5497139",
"0.5496303",
"0.545838",
"0.5450096",
"0.5447234",
"0.5427067",
"0.53881174",
"0.5388063",
"0.5377396",
"0.5377396",
"0.5363521",
"0.53510195",
"0.53510195"
]
| 0.5896209 | 0 |
Main function with assistance of helper functions finds all variable declarations matching regex_string inside the file and returns them inside a list. | def parse_file(self, file_name):
with open(file_name, "r") as input_file:
file_contents = input_file.read()
"""
Regex is done on line by line basis - to ensure that irrespective
of the formatting all docstrings are identified and all variable
specifications are found.
"""
file_contents = file_contents.replace("\n", "NEWLINE")
docstrings = self.get_docstrings(file_contents)
variable_declarations = self.select_variable_declarations(docstrings)
variable_declarations = [x.replace("NEWLINE", "\n") for x in variable_declarations]
return variable_declarations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetDefinitions(filename,obj):\n file=open(filename)\n content=file.read().replace(\"\\t\",\" \")\n file.close\n pat=re.compile(obj +' \\{([\\S\\s]*?)\\}',re.DOTALL)\n finds=pat.findall(content)\n return finds",
"def regex_findall_variables(raw_string: Text) -> List[Text]:\n try:\n match_start_position = raw_string.index(\"$\", 0)\n except ValueError:\n return []\n\n vars_list = []\n while match_start_position < len(raw_string):\n\n # Notice: notation priority\n # $$ > $var\n\n # search $$\n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n continue\n\n # search variable like ${var} or $var\n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n vars_list.append(var_name)\n match_start_position = var_match.end()\n continue\n\n curr_position = match_start_position\n try:\n # find next $ location\n match_start_position = raw_string.index(\"$\", curr_position + 1)\n except ValueError:\n # break while loop\n break\n\n return vars_list",
"def extract_strings(f):\n strings = re.findall(strregex,f)\n return strings",
"def retrieve_variables(content):\n variables = []\n in_var_section = False\n for line in content.splitlines():\n #print line\n if in_var_section:\n var_def = re.split(' +', line)\n if len(var_def) > 1:\n #print var_def[0], ':', var_def[1]\n var_name = var_def[0]\n def_value = var_def[1]\n if not def_value.startswith('%'): #not environment variable which would be directly passed to robot\n variables.append([var_name.strip('${').strip('}'), def_value])\n if '*** Variables ***' in line:\n in_var_section = True\n elif in_var_section and '*** ' in line:\n #end of Variables section\n break\n return variables",
"def scan_source(filename, def_re = None, cb = (lambda l,m : None) ) :\n import re\n\n if not def_re :\n storage_class = br\"(static\\s+)?\"\n type_and_name = br\"int\\s+(?P<n>test_[_a-zA-Z0-9]*)\";\n args=br\"\\(.*\\)\";\n def_re = re.compile(b\"\\s*\" + storage_class +\n type_and_name + b\"\\s*\" +\n args );\n\n tests = set()\n with open(filename, \"rb\") as f:\n for line in f:\n m = def_re.match(line)\n if not m : continue\n cb(line, m)\n tests.add( m.group('n').strip().decode('utf-8') )\n return tests",
"def parseVars(fp):\n\n try:\n ln = fp.readline()\n p = re.compile(r'^Variaveis\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.rnamesMsg)\n\n ln = fp.readline()\n p = re.compile(r'^\\{\\s*(.*)\\s*\\}\\s*(?:#.*)?$')\n m = p.match(ln)\n if m == None:\n raise ParseError(ParseError.rnamesMsg)\n\n a = m.group(1).split(',')\n a[:] = map(str.strip, a)\n return set(a)\n\n except:\n raise",
"def get_variable_names(filepath):\n variables = set()\n with open(filepath, \"r\") as f:\n previous = \"\"\n for line in f.readlines():\n if line[0] == \"#\":\n previous = line\n var_names = None\n continue\n if var_names is not None:\n continue\n var_names = previous.split()[1:]\n while \"vs\" in var_names:\n var_names.remove(\"vs\")\n for name in var_names:\n variables.add(name)\n return list(variables)",
"def find_template_variables(code):\n return re.findall(re_template_var, code)",
"def fileReSeekList(fh, regexList):\n\n compiled = [re.compile(r) for r in regexList]\n\n while True:\n line = fh.readline()\n if line == '':\n return None\n for i, p in enumerate(compiled):\n match = p.match(line)\n if match:\n return (match, i)",
"def buildMatchingWordList(searchWord, fileList, addCurrentBuffer=0):\n\t# build list of words that match from all imports that have .py source\n\t# speed this up by using a map?\n\twordList = []\n\tregex = re.compile(\"(\\W|\\A)\"+searchWord)\n\tregexWord = re.compile(\"\\A\"+searchWord+\"[a-zA-Z0-9_]+\")\n\t_addMatchingWords( vim.current.buffer, regex, regexWord, wordList)\n\tfor f in fileList:\n\t\tlines = open(f).readlines()\n\t\t_addMatchingWords( lines, regex, regexWord, wordList)\n\treturn wordList",
"def extractDef(c: Cmdr, s: str) -> str:\n for pat in c.config.getData('extract-patterns') or []:\n try:\n pat = re.compile(pat)\n m = pat.search(s)\n if m:\n return m.group(1)\n except Exception:\n g.es_print('bad regex in @data extract-patterns', color='blue')\n g.es_print(pat)\n for pat in extractDef_patterns:\n m = pat.search(s)\n if m:\n return m.group(1)\n return ''",
"def scanvars(reader, frame, locals):\n import tokenize\n import keyword\n vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__\n for ttype, token, start, end, line in tokenize.generate_tokens(reader):\n if ttype == tokenize.NEWLINE:\n break\n if ttype == tokenize.NAME and token not in keyword.kwlist:\n if lasttoken == '.':\n if parent is not __UNDEF__:\n value = getattr(parent, token, __UNDEF__)\n vars.append((prefix + token, prefix, value))\n else:\n where, value = lookup(token, frame, locals)\n vars.append((token, where, value))\n elif token == '.':\n prefix += lasttoken + '.'\n parent = value\n else:\n parent, prefix = None, ''\n lasttoken = token\n return vars",
"def search_file(path, regex, ignore_case, include_undefined, printer):\n re_flags = re.IGNORECASE if ignore_case else 0\n object_files = parse_file(path)\n results = []\n for object_file in object_files:\n for symbol in object_file.symbols:\n if not include_undefined and not symbol.is_defined:\n continue\n if re.search(regex, symbol.name, flags=re_flags):\n results.append(SearchResult(SymbolMatch(symbol, regex, ignore_case),\n ObjectFileLocation(object_file)))\n printer.print_results(results)",
"def get_source_file_string_placeholders(file):\n placeholders = {}\n root = ET.parse(file).getroot()\n for element in root.findall('string'):\n name = element.get('name')\n value = ''.join(element.itertext())\n placeholder = get_placeholders(value)\n if placeholder:\n placeholders[name] = placeholder\n return placeholders",
"def read_atts(self, file_name):\n\n match = re.match(self.regex_pattern, file_name)\n\n return match.groupdict()",
"def extract_expressions(s: str) -> List:\n s = regex.sub(r';;.*', \"\", s.strip()) # rimuove i commenti\n\n exprs = list()\n for match in regex.finditer(rex, s):\n start, end = match.span()\n exprs.append(s[start:end].strip())\n\n return exprs",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def regex_findall_functions(content: Text) -> List[Text]:\n try:\n return function_regex_compile.findall(content)\n except TypeError as ex:\n capture_exception(ex)\n return []",
"def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist",
"def file_entry(file_path, regex, default=()):\n with open(file_path, 'r') as f:\n for line in f:\n search = re.search(regex, line, re.IGNORECASE)\n if search:\n return search.groups()\n\n return default",
"def parse_requirements(fn):\n with open(fn) as f:\n rv = []\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n rv.append(line)\n return rv",
"def get_variable_matches(text):\n return _property_pattern.findall(text)",
"def regexSearch(regexStr, folderPath):\n if not os.path.isdir(folderPath):\n return 'Input a directory path'\n\n userRegex = re.compile(regex)\n\n for filename in os.listdir(folderPath):\n\n if filename.endswith('.txt'):\n\n with open(filename) as file:\n\n for line in file:\n mo = userRegex.search(line)\n \n if mo:\n print(line, end='')",
"def list_of_vars_in_user_file():\n # parser = argparse.ArgumentParser()\n # parser.add_argument(\"path\")\n # path = parser.parse_args().path\n # path = DUMMY_FILE_PATH\n path = parser.parse_args().path\n logger.info(\"Using the file: {}\".format(path))\n\n if not os.path.exists(path):\n msg = \"The file ({}) does not exist.\".format(path)\n raise RuntimeError(msg)\n with cdms2.open(path) as f:\n return f.variables.keys()",
"def select_variable_declarations(self, docstrings):\n return [x for x in docstrings if self.docstring_contains_variable_declaration(x)]",
"def variable_dicts(self):\n \n def get_variable_text(rtf_file):\n \"Returns a list of variable_texts for each variable\"\n st='Pos. = '\n return rtf_file.split(st)[1:]\n \n def get_variable_name(variable_text):\n st='Variable = '\n b=variable_text.split(st)[1]\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_pos(rtf):\n a=rtf\n b=a\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_variable_label(rtf):\n try:\n a=rtf\n b=a.split('Variable label = ')[1]\n return b[b.find(' ')+1:b.find('\\\\par')]\n except IndexError:\n return None\n \n def find_variable_type(rtf):\n if not 'This variable is ' in rtf: return ''\n a=rtf\n b=a.split('This variable is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('}')\n return b[i1:i2]\n \n def find_SPSS_measurement_level(rtf):\n if not 'the SPSS measurement level is ' in rtf: return ''\n a=rtf\n b=a.split('the SPSS measurement level is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('\\\\par')\n return b[i1:i2]\n \n def find_SPSS_user_missing_values(rtf):\n if not 'SPSS user missing values = ' in rtf: return dict()\n a=rtf\n d=a.split('SPSS user missing values = ')\n if len(d)<2: return None\n e=d[1]\n i1=e.find(' ')+1\n i2=i1+e[i1:].find('\\\\par')\n f=e[i1:i2]\n g=f.split(' ')\n i=' '.join([g[0],g[2],g[4]])\n return i\n \n def find_value_labels(rtf):\n if not 'Value = ' in rtf: return dict()\n a=rtf\n d=a.split('Value = ')[1:]\n z={}\n for e in d:\n value=e[e.find(' ')+1:e.find('\\t')]\n value=float(value)\n f=e.split('Label = ')[1]\n label=f[f.find(' ')+1:f.find('\\\\par')]\n z[value]=label\n #print(z)\n return z\n \n variable_texts=get_variable_text(self.rtf)\n #pprint(variable_texts[0:2])\n \n result=[]\n for variable_text in variable_texts:\n d={'pos':find_pos(variable_text),\n 'variable':get_variable_name(variable_text),\n 'variable_label':find_variable_label(variable_text),\n 'variable_type':find_variable_type(variable_text),\n 'SPSS_measurement_level':find_SPSS_measurement_level(variable_text),\n 'SPSS_user_missing_values':find_SPSS_user_missing_values(variable_text),\n 'value_labels':find_value_labels(variable_text) \n }\n result.append(d)\n \n return result",
"def file_to_list_of_parsed(nameoffile):\n a = Grammar()\n b = a.syntax()\n file1 = open(nameoffile,'r')\n parsed = []\n for line in file1:\n parsed.append(b.parseString(line))\n return parsed",
"def __grep(findwhat, filename, ignorecase, regexp):\n\t\tresult = []\n\t\ttry:\n\t\t\tencoding = \"utf8\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\texcept FileNotFoundError:\n\t\t\treturn result\n\t\texcept UnicodeDecodeError:\n\t\t\tencoding = \"latin-1\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\t\t\n\t\tif __search(findwhat, content, ignorecase, regexp):\n\t\t\tlines = open(filename,\"r\", encoding=encoding).readlines()\n\t\t\tlineNumber = 1\n\t\t\tfor line in lines:\n\t\t\t\tif __search(findwhat, line, ignorecase, regexp):\n\t\t\t\t\tresult.append((filename, lineNumber, line.strip()))\n\t\t\t\tlineNumber += 1\n\t\treturn result",
"def find_vars_in_str(self, line):\n self.E_str = \"find_vars_in_str\"\n any_vars = [i[1:] for i in re.findall(IN_STR_VAR_REGEX, line)]\n for check_var in any_vars:\n # Check the variable exists\n if check_var not in self.variables:\n self.print_error(f\"Can't find variable '{check_var}'\")\n\n Var = getattr(self, check_var)\n if hasattr(Var, \"data\") and type(Var.data) == str:\n str_var = str(Var)\n str_var = re.sub(\"Variable .*:[\\n]+\", \"\", str_var)\n line = line.replace(f\"${check_var}\", str_var)\n\n elif hasattr(Var, \"data\"):\n line = line.replace(f\"${check_var}\", str(Var.data))\n\n else:\n line = line.replace(f\"${check_var}\", str(Var))\n\n return line, any_vars",
"def parse(file):\r\n # Check cache before parsing file\r\n global _parsed_file_cache\r\n if file in _parsed_file_cache:\r\n return _parsed_file_cache[file]\r\n \r\n FuncDefnRegexp = r'^def.*\\{'\r\n FuncEndRegexp = r'^\\}.*$'\r\n with open(file, 'r') as f:\r\n data = f.read()\r\n file_lines = data.split(\"\\n\")\r\n all_fns = []\r\n fn_lines = ''\r\n for line in file_lines:\r\n if len(fn_lines) > 0:\r\n if re.match(FuncEndRegexp, line):\r\n fn_lines += line + \"\\n\"\r\n all_fns.append(fn_lines)\r\n fn_lines = ''\r\n else:\r\n fn_lines += line + \"\\n\"\r\n elif re.match(FuncDefnRegexp, line):\r\n fn_lines += line + \"\\n\"\r\n \r\n func_results = []\r\n for fn in all_fns:\r\n func_results += [GroovyFunctionParser.parse(fn)]\r\n \r\n _parsed_file_cache[file] = func_results\r\n return func_results"
]
| [
"0.646614",
"0.6357556",
"0.617826",
"0.60977983",
"0.60213876",
"0.5975594",
"0.59012467",
"0.58486295",
"0.5812768",
"0.58079183",
"0.5763601",
"0.5752095",
"0.57495373",
"0.574335",
"0.57063615",
"0.565007",
"0.5638748",
"0.56160724",
"0.55971515",
"0.5569792",
"0.5567144",
"0.5564763",
"0.5556011",
"0.55465686",
"0.55447316",
"0.5541371",
"0.54915076",
"0.5490068",
"0.54816157",
"0.5432537"
]
| 0.6841503 | 0 |
Attempts to find the specified regex_string in a docstring. Returns True if matched, False otherwise. | def docstring_contains_variable_declaration(self, docstring):
found = re.search(self.regex_string, docstring)
if found == None:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(self, string):\n fid = open(os.path.join(self.output_path, \"%s.html\" % TEST_FILE_STEM), \"r\")\n found = False\n for line in fid.readlines():\n if re.search(string, line):\n found = True\n break\n fid.close()\n return found",
"def search(self, regexp):\n try:\n self.rematch = regexp.search(self.matchstring)\n except AttributeError:\n self.rematch = re.search(regexp, self.matchstring)\n return bool(self.rematch)",
"def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False",
"def found(self, command, regex):\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False",
"def is_regex_in_string(regex, regex_string):\n try:\n match = re.search(regex, regex_string)\n does_nothing(match.group())\n return True;\n except Exception, e:\n return False;",
"def REGEXMATCH(text, regular_expression):\n return bool(re.search(regular_expression, text))",
"def match(self, string):\n matched = False\n cmd = None\n\n if string in self.commands.keys():\n matched = True\n cmd = string\n\n else:\n for command in self.commands.keys():\n if \"regex\" in self.commands[command].keys() \\\n and re.match(self.commands[command][\"regex\"], string):\n matched = True\n cmd = command\n break\n \n if cmd and len(cmd) > 0:\n self._last_matched_command = cmd\n else:\n self._last_matched_command = None\n\n return matched",
"def exists(self, regex: str) -> bool:\n for _ in self.find(regex):\n return True\n return False",
"def search(self, regex):\n if isinstance(regex, str):\n regex = re.compile(regex, re.IGNORECASE)\n return regex.search(self.sequence)",
"def _find_reg(self, reg_str, content):\n reg_find = re.findall(reg_str, content)\n assert reg_find is not None, \"ERROR: Could not extract any content, check regex string\"\n return reg_find",
"def __search(findwhat, content, ignorecase, regexp):\n\t\tfrom re import search, IGNORECASE\n\t\tif regexp:\n\t\t\tif ignorecase:\n\t\t\t\tflag = IGNORECASE\n\t\t\telse:\n\t\t\t\tflag = 0\n\t\t\tif search(findwhat, content, flag):\n\t\t\t\treturn True\n\t\telse:\n\t\t\tif ignorecase:\n\t\t\t\tcontent = content.lower()\n\t\t\t\tfindwhat = findwhat.lower()\n\t\t\t\t\n\t\t\tif content.find(findwhat) != -1:\n\t\t\t\treturn True\n\t\treturn False",
"def is_sphinx_markup(docstring):\n # this could be made much more clever\n return (\"`\" in docstring or \"::\" in docstring)",
"def match_string(self, string_to_match, regexp):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not isinstance(string_to_match, str):\n\t\t\treturn None\n\t\tlines = string_to_match.split('\\r\\n')\n\t\t# sometimes they're separated by just a carriage return...\n\t\tnew_lines = []\n\t\tfor line in lines:\n\t\t\tnew_lines = new_lines + line.split('\\r')\n\t\t# and sometimes they're separated by just a newline...\n\t\tfor line in lines:\n\t\t\tnew_lines = new_lines + line.split('\\n')\n\t\tlines = new_lines\n\t\tif not shutit_util.check_regexp(regexp):\n\t\t\tself.fail('Illegal regexp found in match_string call: ' + regexp) # pragma: no cover\n\t\tfor line in lines:\n\t\t\tmatch = re.match(regexp, line)\n\t\t\tif match is not None:\n\t\t\t\tif match.groups():\n\t\t\t\t\treturn match.group(1)\n\t\t\t\treturn True\n\t\treturn None",
"def regex_match(text, pattern):\n try:\n pattern = re.compile(\n pattern,\n flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,\n )\n except BaseException:\n return False\n return pattern.search(text) is not None",
"def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None",
"def _is_regex_match(s, pat):\n\n pat = pat.rstrip()\n m = re.search(Settings._REPAT, pat)\n if m:\n flags_combined = 0\n if m.group('flag'):\n char_to_flag = {\n 'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}\n for flag in list(m.group('flag')):\n flags_combined |= char_to_flag[flag]\n return bool(re.search(m.group('pat'), s, flags_combined))\n raise InvalidRegexError(pat)",
"def match(self, regexp):\n try:\n self.rematch = regexp.match(self.matchstring)\n except AttributeError:\n self.rematch = re.match(regexp, self.matchstring)\n return bool(self.rematch)",
"def match(pattern, s):\n # The regexp compilation caching is inlined in both Match and Search for\n # performance reasons; factoring it out into a separate function turns out\n # to be noticeably expensive.\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].match(s)",
"def matches_regex(self, regex):\n match = re.match(regex, self.text)\n if not match:\n return False\n\n self.regex_groups = match.groups()\n return True",
"def contains_code(notebook, regex_list):\n source = code_cells(notebook)\n for cell_source in source:\n for line in cell_source:\n # Ignore comments\n if line.startswith('#'):\n continue\n # if the line contains any of the regexes, return True\n for regex in regex_list:\n if re.search(regex, line, re.IGNORECASE):\n return True\n return False",
"def search(self, text):\n if self.sense:\n return (self.regex.search(text) != None)\n else:\n return (self.regex.search(text) == None)",
"def validate_string_search(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.search(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out",
"def has_string(filepath, string):\n with open(filepath) as yaml_file:\n for line in yaml_file:\n if string.search(line):\n return True\n return False",
"def regex_search(regex, regex_string):\n match = re.search(regex, regex_string)\n if match is not None:\n return match.group()",
"def regMatch(value, regex):\n if regex == \"*\": # Accounts for python wildcard bug\n regex = \"(.*)\"\n pattern = re.compile(regex)\n match_obj = pattern.search(value)\n return bool(match_obj)",
"def match(cls, text):\r\n return cls.main.pattern.match(text)",
"def regex_search(regex, *fields):\n for match_field in fields:\n if re.search(regex, match_field):\n return True\n return False",
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def match(self, s):\n self.matches = self.re.search(s)\n return self.matches",
"def contains_match(self, regexp):\n # If the regexp is not found, find will return a tuple (-1, -1) in Sublime 3 or None in Sublime 2 \n # https://github.com/SublimeTextIssues/Core/issues/534\n contains_import = self.view.find(regexp, 0)\n return contains_import.size() > 0 if float(sublime.version()) >= 3000 else contains_import is not None"
]
| [
"0.6317987",
"0.61371905",
"0.6057289",
"0.6057289",
"0.60314524",
"0.60288835",
"0.59919053",
"0.59886557",
"0.5945532",
"0.5905841",
"0.58453524",
"0.5827701",
"0.5811089",
"0.5714999",
"0.5651282",
"0.56070274",
"0.56042236",
"0.5587269",
"0.55047214",
"0.5490997",
"0.5484419",
"0.5463313",
"0.545058",
"0.5432568",
"0.53887665",
"0.537917",
"0.53787047",
"0.5372848",
"0.53690434",
"0.53495264"
]
| 0.64932317 | 0 |
ultraio = chrlength uniqueratio / chr_total_reads | def ultratio(chrlength, uniqueratio, chrtotalreads, frcount):
ultratio = chrlength * uniqueratio / (chrtotalreads - frcount)
return ultratio | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))",
"def IOU(s1, e1, s2, e2):\r\n if (s2 > e1) or (s1 > e2):\r\n return 0\r\n Aor = max(e1, e2) - min(s1, s2)\r\n Aand = min(e1, e2) - max(s1, s2)\r\n return float(Aand) / Aor",
"def efficiency(self):\n if self.byte_total == 0:\n return 1\n return self.entropy() / 8",
"def _U_table(ci, hi, co, ho):\n # TODO: Base U on Table 18.5, Warren D. Seider et. al. Product and Process Design Principles. (2016)\n cip, hip, cop, hop = ci.phase, hi.phase, co.phase, ho.phase\n phases = cip + hip + cop + hop\n if 'g' in phases:\n if ('g' in hip and 'l' in hop) and ('l' in cip and 'g' in cop):\n return 1.0\n else:\n return 0.5\n else:\n return 0.5",
"def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper",
"def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)",
"def test_nids_super_res_width():\n f = Level3File(get_test_data('nids/KLZK_H0W_20200812_1305'))\n width = f.map_data(f.sym_block[0][0]['data'])\n assert np.nanmax(width) == 15",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5",
"def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc",
"def perfectrefl(wavelength):\n return 1.0",
"def ramfinc(self):\n return 0",
"def len23(self) -> float:\n ...",
"def micros() -> int:",
"def pulse_width_percent(self) -> float:",
"def convert_length_2_current_u(self,val):\n return val/conversion_facs_length[self.current_units[\"length\"]]",
"def get_bleu(hypo, ref, seq_len=10, bleu_len=5):\n stats = np.array([0.]*seq_len)\n for h, r in zip(hypo, ref):\n h = np.trim_zeros(h)\n r = np.trim_zeros(r)\n stats += np.array(bleu_stats(h, r, bleu_len))\n return 100 * bleu(stats)",
"def amaoccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n occupancy_list = []\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n occupancy = 1\n occupancy_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n occupancy *= 0.25\n occupancy_rc *= 0.25\n else:\n occupancy *= pwm_dictionary[seq[j + i]][j]\n occupancy_rc *= pwm_dictionary_rc[seq[j + i]][j]\n occupancy_list.append(occupancy + occupancy_rc)\n ama_occupancy = sum(occupancy_list) / len(occupancy_list)\n return ama_occupancy",
"def gomeroccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n gomer_occupancy = 1\n area_pwm_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(pwm_length - 1, 1, -1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length):\n if j <= i:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n elif (j + i) > len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n # print \"got to else\"\n s = seq[j + i]\n prod_gomer *= pwm_dictionary[s][j]\n prod_gomer_rc *= area_pwm_rc[s][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n for i in range(len(seq) - 1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n prod_gomer *= pwm_dictionary[seq[j + i]][j]\n prod_gomer_rc *= area_pwm_rc[seq[j + i]][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n gomer_occupancy = 1 - gomer_occupancy\n\n return gomer_occupancy",
"def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def sumoccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n sum_occupancy = 0\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n occupancy = 1\n occupancy_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n occupancy *= 0.25\n occupancy_rc *= 0.25\n elif seq[j + i] not in [\"A\", \"C\", \"G\", \"T\"]:\n occupancy *= 0.25\n occupancy_rc *= 0.25\n else:\n occupancy *= pwm_dictionary[seq[j + i]][j]\n occupancy_rc *= pwm_dictionary_rc[seq[j + i]][j]\n sum_occupancy += occupancy + occupancy_rc\n return sum_occupancy / 2",
"def TCMB(rs):\n\n return 0.235e-3 * rs",
"def _osLen(self):\n return int(np.ceil(self.minOverscan * self.sampleRate / self.downsample) * self.downsample)\n\n #osv = self.osVector\n #return np.ceil(np.linalg.norm(osv) / self.pixelWidth)",
"def critical_copy_number(rlen, clen):\n \n if rlen < clen: \n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + (rlen/clen)",
"def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity",
"def airydisk(unit_r, fno, wavelength):\n u_eff = unit_r * np.pi / wavelength / fno\n return abs(2 * jinc(u_eff)) ** 2",
"def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe",
"def single_reads(reads_pos):\n #TODO: use more complicated estimator?\n return (len(reads_pos) + 1) / (len(reads_pos) - 1) * (reads_pos[-1] - reads_pos[0])",
"def genus(self):\n return 1 - self.euler_characteristic() // 2",
"def us(self):\n return 1000 * 1000 * self.read()"
]
| [
"0.62412286",
"0.57697046",
"0.57169855",
"0.5621136",
"0.5599058",
"0.5457794",
"0.54397404",
"0.54225004",
"0.54191625",
"0.53480023",
"0.53407586",
"0.53328043",
"0.5329747",
"0.53244",
"0.5299296",
"0.5296787",
"0.52902675",
"0.5287149",
"0.52871114",
"0.52828074",
"0.52759475",
"0.5272418",
"0.52452594",
"0.52396494",
"0.52349555",
"0.52319485",
"0.5229351",
"0.52264476",
"0.52029264",
"0.5192423"
]
| 0.80247974 | 0 |
For a single string, checks if it is one of the FlowFrame's channel names. For a collection, checks if all its items are a channel name. | def __contains__(self, item):
if isinstance(item, basestring):
return item in self._channels
elif hasattr(item, '__iter__'):
return all(ch in self._channels for ch in item)
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_channel_name(name):\n return name.startswith('#') or name.startswith('&')",
"def is_valid_channel_name(channel):\n if not is_channel_name(channel):\n return False\n\n test_section = channel[1:]\n\n if not MIN_CHANNEL_NAME_LEN < len(channel) < MAX_CHANNEL_NAME_LEN:\n return False\n\n valid_symbols = '#\\\\|^`[]{}_'\n valid_chars = string.ascii_letters + string.digits + valid_symbols\n\n for char in channel:\n if char not in valid_chars:\n return False",
"def _check_has_channel(data):\r\n return re.findall(\r\n r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'JOIN #([a-zA-Z0-9_]+)$', data)",
"def is_channel(target, channel_prefixes='!&#+'):\n return len(target) > 1 and target[0] in channel_prefixes",
"def test_unicode_channel_name(self):\n channel_layer.send(\"\\u00a3_test\", {\"value\": \"blue\"})\n # Get just one first\n channel, message = channel_layer.receive_many([\"\\u00a3_test\"])\n self.assertEqual(channel, \"\\u00a3_test\")\n self.assertEqual(message, {\"value\": \"blue\"})",
"async def is_channel_shortcut_name(argument, context, verbose=True):\n return await is_shortcut_name(argument, context, \"channel\", verbose)",
"def _parse_channels(self, dataset, channels):\n if type(channels) == str:\n data = dataset.get(channels)\n if data is None:\n raise ValueError\n else:\n try:\n data = [dataset.get(channel) for channel in channels]\n except Exception:\n raise ValueError\n for channel_data in data:\n if channel_data is None:\n raise ValueError\n return data",
"def string_chan_detected(self, topicq_cdict, node_name):\n test = '/test/string' in topicq_cdict.keys() and (\n topicq_cdict['/test/string'].name == '/test/string'\n and topicq_cdict['/test/string'].type == 'std_msgs/String'\n and len([n for n in topicq_cdict['/test/string'].nodes if n[0].startswith(node_name)]) > 0 # sometime the node gets suffixes with uuid\n )\n if not test:\n print \"Expected : name:{name} type:{type} node:{node}\".format(name='/test/string', node=node_name, type='std_msgs/String')\n print \"NOT FOUND IN DICT : {0}\".format(topicq_cdict)\n return test",
"def isChannelCapability(capability):\n if ',' in capability:\n (channel, capability) = capability.split(',', 1)\n return ircutils.isChannel(channel) and isCapability(capability)\n else:\n return False",
"def _identify_channels(self, name):\n\n channel_list = []\n if self.nuke_node.Class() == \"Cryptomatte\":\n # nuke_node is a keyer gizmo\n channel_list = self.nuke_node.node('Input1').channels()\n else:\n # nuke_node might a read node\n channel_list = self.nuke_node.channels()\n\n relevant_channels = [x for x in channel_list if x.startswith(name)]\n pure_channels = []\n for channel in relevant_channels:\n suffix = \".red\"\n if not channel.endswith(suffix):\n continue\n # to do: validate this somewhere else\n pure_channel = channel[:-len(suffix)]\n pure_channels.append(pure_channel)\n\n return sorted(pure_channels)",
"def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def channels(self, val):\n if type(val) in [types.TupleType, types.ListType]:\n \n # If we have a real list\n if len([x for x in val if x in SENSORS] = len(val):\n \n #if all the values correspond to possible channels\n self._channels = tuple(val) # Make sure it's a tuple\n \n else:\n \n # Should raise a \"Unknown Channel\" error or sorts\n pass\n else:\n # Should raise an exception",
"def string_detected(self, topicq_clist, conn_type, node_name):\n test = False\n for i, conn in enumerate(topicq_clist): # loop through all connections in the list\n test = (conn.name == '/test/string'\n and conn.type == conn_type\n and conn.node.startswith(node_name) # sometime the node gets suffixes with uuid ??\n and conn.type_info == 'std_msgs/String'\n and len(conn.xmlrpc_uri) > 0)\n if test: # break right away if found\n break\n if not test:\n print \"Expected : name:{name} type:{type} node:{node} topic_type:{type_info}\".format(name='/chatter', type=conn_type, node=node_name, type_info='std_msgs/String')\n print \"NOT FOUND IN LIST : {0}\".format(topicq_clist)\n return test",
"def chatter_chan_detected(self, topicq_cdict, node_name):\n test = '/chatter' in topicq_cdict.keys() and (\n topicq_cdict['/chatter'].name == '/chatter'\n and topicq_cdict['/chatter'].type == 'std_msgs/String'\n and len([n for n in topicq_cdict['/chatter'].nodes if n[0].startswith(node_name)]) > 0 # sometime the node gets suffixes with uuid\n )\n if not test:\n print \"Expected : name:{name} type:{type} node:{node}\".format(name='/chatter', node=node_name, type='std_msgs/String')\n print \"NOT FOUND IN DICT : {0}\".format(topicq_cdict)\n return test",
"def check_series(s: pd.Series) -> bool:\n\n error_string = (\n \"The input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroSeries) for more information.\"\n )\n\n if not isinstance(s.iloc[0], str) or s.index.nlevels != 1:\n raise TypeError(error_string)",
"def find_where_ref_channel(ome_meta: str, ref_channel: str):\n\n channels, channel_names, channel_ids, channel_fluors = extract_channel_info(str_to_xml(ome_meta))\n\n # strip cycle id from channel name and fluor name\n if channel_fluors != []:\n fluors = [re.sub(r'^(c|cyc|cycle)\\d+(\\s+|_)', '', fluor) for fluor in channel_fluors] # remove cycle name\n else:\n fluors = None\n names = [re.sub(r'^(c|cyc|cycle)\\d+(\\s+|_)', '', name) for name in channel_names]\n\n # check if reference channel is present somewhere\n if ref_channel in names:\n matches = names\n elif fluors is not None and ref_channel in fluors:\n matches = fluors\n else:\n if fluors is not None:\n message = 'Incorrect reference channel. Available channel names: {names}, fluors: {fluors}'\n raise ValueError(message.format(names=', '.join(set(names)), fluors=', '.join(set(fluors))))\n else:\n message = 'Incorrect reference channel. Available channel names: {names}'\n raise ValueError(message.format(names=', '.join(set(names))))\n\n return matches",
"def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False",
"def chatter_detected(self, topicq_clist, conn_type, node_name):\n test = False\n for i, conn in enumerate(topicq_clist): # lop through all connections in the list\n test = (conn.name == '/chatter'\n and conn.type == conn_type\n and conn.node.startswith(node_name) # sometime the node gets suffixes with uuid ??\n and conn.type_info == 'std_msgs/String'\n and len(conn.xmlrpc_uri) > 0)\n if test: # break right away if found\n break\n if not test:\n print \"Expected : name:{name} type:{type} node:{node} topic_type:{type_info}\".format(name='/chatter', type=conn_type, node=node_name, type_info='std_msgs/String')\n print \"NOT FOUND IN LIST : {0}\".format(topicq_clist)\n return test",
"def is_channel(self):\n return True",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False",
"async def test_name_replacement_multiple_channels(self):\n message = \"Current. The following should be replaced: {channel}.\"\n await self.cog.send_message(message, *self.text_channels, alert_target=True)\n\n self.text_channels[0].send.assert_awaited_once_with(message.format(channel=self.text_channels[0].mention))\n self.text_channels[1].send.assert_awaited_once_with(message.format(channel=\"current channel\"))",
"def is_selector(cls, s):\n\n if isinstance(s, Selector):\n return True\n # elif type(s) is str:\n elif isinstance(s, basestring):\n return cls.is_selector_str(s)\n elif np.iterable(s):\n return cls.is_selector_seq(s)\n else:\n return False",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False",
"def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)",
"def names(self, channel, *args, **kwargs):\n pass",
"def is_identifier(cls, s):\n\n if isinstance(s, Selector):\n return len(s) == 1\n\n if np.iterable(s):\n\n # Try to expand string:\n if isinstance(s, basestring):\n try:\n s_exp = cls.expand(s)\n except:\n return False\n else:\n if len(s_exp) == 1:\n return True\n else:\n return False\n\n # If all entries are lists or tuples, try to expand:\n elif all([(isinstance(x, (list, slice))) for x in s]):\n if len(cls.expand(s)) == 1:\n return True\n else:\n return False\n\n # A sequence of integers and/or strings is a valid port identifier:\n elif all(map(lambda x: isinstance(x, (int, long, basestring)), s)):\n #elif set(map(type, s)).issubset([int, basestring]):\n return True\n else:\n return False\n\n # A non-iterable cannot be a valid identifier:\n else:\n return False",
"def test_new_channel(self):\n pattern = \"test.?.foo.?\"\n name1 = channel_layer.new_channel(pattern)\n self.assertIsInstance(name1, six.text_type)\n # Send a message and make sure new_channel on second pass changes\n channel_layer.send(name1, {\"value\": \"blue\"})\n name2 = channel_layer.new_channel(pattern)\n # Make sure the two ?s are replaced by the same string\n bits = name2.split(\".\")\n self.assertEqual(bits[1], bits[3], \"New channel random strings don't match\")\n # Make sure we can consume off of that new channel\n channel, message = channel_layer.receive_many([name1, name2])\n self.assertEqual(channel, name1)\n self.assertEqual(message, {\"value\": \"blue\"})",
"async def _check_channel(\n self, starboard: StarboardEntry, channel: discord.TextChannel\n ) -> bool:\n if starboard.whitelist_channel:\n return channel.id in starboard.whitelist_channel\n else:\n return channel.id not in starboard.blacklist_channel",
"def is_valid_collection_name(collection_name):\n\n collection_name = to_text(collection_name)\n\n return bool(re.match(AnsibleCollectionRef.VALID_COLLECTION_NAME_RE, collection_name))"
]
| [
"0.62983626",
"0.59590423",
"0.58351296",
"0.55657226",
"0.5565677",
"0.54451466",
"0.54303205",
"0.5344488",
"0.5261591",
"0.5208733",
"0.51922476",
"0.5166167",
"0.5158738",
"0.5141583",
"0.5093375",
"0.5037257",
"0.50335044",
"0.50283056",
"0.49992242",
"0.49898973",
"0.49775583",
"0.4948108",
"0.49293616",
"0.49165004",
"0.4910071",
"0.49016765",
"0.4899317",
"0.48988023",
"0.48955625",
"0.4885235"
]
| 0.61462975 | 1 |
Creates a new FlowFrame with copy of this one's data. The copy will not be linked to the same FCS file. | def copy(self, ID=None):
if ID is None:
match = re.match(r'^(.*-copy)(\d*)$', self._ID)
if match is not None:
ID = match.group(1) + str(int(match.group(2) or 1) + 1)
else:
ID = self._ID + '-copy'
return FlowFrame(self.data.copy(), ID=ID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __copy__(self):\n logger.debug(\"Copying Flow() object.\")\n c = Flow()\n c.workingDir = self.workingDir \n c.cleanupTemp = self.cleanupTemp\n c.default_inputpaths = self.default_inputpaths\n c.default_outputpath = self.default_outputpath\n c.startNode = self.startNode\n c.lastNode = self.lastNode\n return c",
"def _construct(self, dataflow):\n dataflow = copy_dataflow(dataflow, self.container)\n return dataflow",
"def copy (self):\n copy = NFFG(id=self.id, name=self.name, version=self.version,\n mode=self.mode, metadata=self.metadata.copy(),\n status=self.status)\n copy.network = self.network.copy()\n return copy",
"def clone(self):\n\n copy = self.__class__(self.name, self.data)\n\n copy.set_fixed_variables_from_pdf(self)\n \n return copy",
"def _copy_stream(self):\n return DataStream(\n data_type=self.data_type,\n name=self.name,\n labels=self.labels.copy(),\n callbacks=self.callbacks.copy(),\n uid=self.uid)",
"def copy(self):\n return Struct(self)",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def copy(self):\n new = self\n return new",
"def copy(self) -> 'DataFrame':\n new_data: Dict[str, ndarray] = {dt: arr.copy() for dt, arr in self._data.items()}\n new_columns: ColumnT = self._columns.copy()\n new_column_info: ColInfoT = self._copy_column_info()\n new_str_reverse_map = deepcopy(self._str_reverse_map)\n return self._construct_from_new(new_data, new_column_info, new_columns, new_str_reverse_map)",
"def clone(self):\n return _libsbml.FbcAnd_clone(self)",
"def copy(self, new_name: Optional[str] = None):\n name = new_name or self.name\n return FigureData(figure=self.figure, name=name, metadata=copy.deepcopy(self.metadata))",
"def clone(self):\n return _libsbml.FbcOr_clone(self)",
"def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new",
"def copy(self):\n return self.as_dataframe(self.data.copy())",
"def clone(self):\r\n cp = self.__class__(self.type, self.data, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n return cfft(self.nx, self.dx, self.fft.copy(), ny=self.ny, dy=self.dy)",
"def __copy__(self):\n return self.__constructor__(\n self._data,\n length=self._length_cache,\n width=self._width_cache,\n ip=self._ip_cache,\n call_queue=self.call_queue,\n )",
"def copy(self):",
"def copy(self, deep=True):\r\n data = self._data\r\n if deep:\r\n data = data.copy()\r\n return SpatialDataFrame(data, sr=self.sr).__finalize__(self)",
"def copy(self):\n aliases = _copy.deepcopy(self.aliases) if (self.aliases is not None) \\\n else None\n return CircuitPlaquette(self.base, self.rows, self.cols,\n self.elements[:], aliases, self.fidpairs)",
"def _transfer(self, dfnew):\n newobj = copy.deepcopy(self) #This looks like None, but is it type (MetaPandasObject, just __union__ prints None\n newobj._frame = dfnew\n \n # THESE ARE NEVER TRANSFERED AT DF LEVEL, JUST CREATED NEW. TRY\n # df.loc\n # a = df*50\n # a._loc ---> Will be None\n #newobj._loc = self._loc\n #newobj._iloc = self._iloc\n #newobj._ix = self._ix \n return newobj",
"def copy (self):\n return self.__class__(self.name, self[:])",
"def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new",
"def clone(self):",
"def copy(self):\n newVertices = [v.copy() for v in self.vertices]\n return face(newVertices)",
"def copy(self):\n node_new = Node(self.state.copy(), self.parent, self.children.copy(), self.RRT, self.path_length)\n node_new.vs = self.vs.copy()\n node_new.RRT = self.RRT\n node_new.observed = self.observed\n node_new.observation_node = self.observation_node\n node_new.observation_area = self.observation_area\n\n return node_new",
"def copy(self):\n return KFData(self.vec,self.cov,self.zrun,self.pars)"
]
| [
"0.6915684",
"0.6491118",
"0.6262288",
"0.6196588",
"0.6177096",
"0.60805684",
"0.6039008",
"0.6006622",
"0.59907234",
"0.5963644",
"0.59613174",
"0.5920683",
"0.5904271",
"0.5897508",
"0.5892225",
"0.588936",
"0.588936",
"0.588936",
"0.58768296",
"0.5876595",
"0.58375454",
"0.5823636",
"0.58192116",
"0.5781307",
"0.57730097",
"0.5760815",
"0.57594055",
"0.57556665",
"0.57496035",
"0.5740416"
]
| 0.6979232 | 0 |
Creates a new FlowFrame from a subset of this one's events. The data is copied (changing the new data wont' affect the old) and the new FlowFrame won't be linked to the same FCS file. | def filter(self, which, **kwargs):
# Get data (if lazy loading, want to avoid accessing this attribute
# twice)
data = self.data
# Get filtered data frame
df = data.iloc[which]
# Some indexing methods return a *view* on the original data, meaning
# changes to one will affect the other. We don't want this.
if df.values.base is data.values.base:
df = df.copy()
# Reset indices to sequential integers
df.index = xrange(df.shape[0])
# Create from DataFrame and any additional arguments
return FlowFrame(df, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gather_events(self, newframe_event):\n if not self.closed:\n for pg_event in pg.event.get():\n event = self._pygame_event_to_event(pg_event)\n if event is not None:\n self.event_hub.raise_event(event)\n self._add_animation_events()",
"def clone(self):\n return _libsbml.ListOfEvents_clone(self)",
"def copy_content_from_previous_event(previous_event, new_event):\n previous_event.refresh_from_db()\n for obj in previous_event.content.all():\n new_content = obj\n new_content.id = None\n new_content.event = new_event\n new_content.save()",
"def copy(self, event):\n return",
"def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))",
"def clone(self):\n return _libsbml.Event_clone(self)",
"def dframe_from_update(self, new_data, labels_to_slugs):\n self._ensure_dframe()\n\n if not isinstance(new_data, list):\n new_data = [new_data]\n\n filtered_data = []\n columns = self.dframe.columns\n dframe_empty = not len(columns)\n\n if dframe_empty:\n columns = self.dataset.schema.keys()\n\n for row in new_data:\n filtered_row = dict()\n for col, val in row.iteritems():\n # special case for reserved keys (e.g. _id)\n if col in MONGO_RESERVED_KEYS:\n if (not len(columns) or col in columns) and\\\n col not in filtered_row.keys():\n filtered_row[col] = val\n else:\n # if col is a label take slug, if it's a slug take col\n slug = labels_to_slugs.get(\n col, col if col in labels_to_slugs.values() else None)\n\n # if slug is valid of there is an empty dframe\n if (slug or col in labels_to_slugs.keys()) and (\n dframe_empty or slug in columns):\n filtered_row[slug] = self.dataset.schema.convert_type(\n slug, val)\n\n filtered_data.append(filtered_row)\n\n return BambooFrame(filtered_data)",
"def new_frame(self, frame_name, function):\n frame = Frame(frame_name, fp_location=self.fp_location)\n return frame",
"def _transfer(self, dfnew):\n newobj = copy.deepcopy(self) #This looks like None, but is it type (MetaPandasObject, just __union__ prints None\n newobj._frame = dfnew\n \n # THESE ARE NEVER TRANSFERED AT DF LEVEL, JUST CREATED NEW. TRY\n # df.loc\n # a = df*50\n # a._loc ---> Will be None\n #newobj._loc = self._loc\n #newobj._iloc = self._iloc\n #newobj._ix = self._ix \n return newobj",
"def clone(self):\n return _libsbml.EventAssignment_clone(self)",
"def load_new_events_list(self):\n self._event_index_list = self.gdc.new_events_indices\n self.populate_event_list_from_index_list()",
"def copied_blocks(self,courseevent):\n return self.filter(courseevent=courseevent,\n original_lesson__isnull=False, level=1)",
"def make_movie_crossflow(field='uu1', datadir='data/', proc=-1, extension='yz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n import matplotlib.patches as patches\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n#\n# ax.add_patch(patches.Rectangle(\n# (220,0),\n# 80,\n# 240,\n# hatch='/'\n# )\n# )\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.cla()\n ax.imshow(plane, vmin=amin, vmax=amax)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)",
"def _copy(source, track, filter_f=lambda x: True, coef=1000):\n for msg in source:\n if filter_f(msg):\n track.append(msg.copy(time=int(msg.time*coef)))",
"def _construct(self, dataflow):\n dataflow = copy_dataflow(dataflow, self.container)\n return dataflow",
"def apply(self, df_events):\n pass",
"def bindEvents(fusionEvents,divisionEvents, buff):\n #1/Finding correspondances\n fusion_indices = []\n fusion_labels = []\n fusion_labels_2 = [] # In label 2 says with which cell the disappearded one has\n for events,label in fusionEvents:\n index,osef,labels = events\n fusion_indices.append(index)\n fusion_labels.append(labels[0])\n fusion_labels_2.append(label)\n \n division_indices = []\n division_labels = []\n division_labels_2 = [] # Tells in which cell it is created\n for events,label in divisionEvents:\n index,osef,labels = events\n division_indices.append(index)\n division_labels.append(labels[0])\n division_labels_2.append(label)\n \n associated_division_list = []\n associated_indexes = []\n for i in fusion_indices:\n ind = next((x for x in division_indices if x>i),-1)\n if ind>0:\n associated_division_list.append((i,ind))\n corr_ind_fusion = fusion_indices.index(i)\n corr_ind_division = division_indices.index(ind)\n associated_indexes.append((corr_ind_fusion,corr_ind_division))\n\n \n #2/removing corresponding elements\n for j in range(len(associated_division_list)):\n index_fus, index_div = associated_indexes[j]\n if division_labels_2[index_div]==fusion_labels_2[index_fus]:\n #If they are not equal, means that the process of division/fusion \n #has not happened on the same blob and hence is not relevant\n big_label = division_labels_2[index_div]\n small_label = fusion_labels[index_fus]\n new_label = division_labels[index_div] #Replace after division this label by small label\n first_index = fusion_indices[index_fus]\n second_index = division_indices[index_div]\n \n for k in range(second_index-first_index):\n splitCell(buff,first_index+k,big_label,small_label)\n \n #Attribution of the new created cells to each one of the previous cells:\n #For this, we take the closest centroid\n #centroid of the big label\n last_image = buff[:,:,second_index]\n xs,ys = centroids2(last_image,[big_label,new_label])\n xs0,ys0 = centroids2(buff[:,:,second_index-1],[big_label,small_label])\n dist_regular = (xs0[0]-xs[0])**2 + (ys0[0]-ys[0])**2 + (xs0[1]-xs[1])**2 + (ys0[1]-ys[1])**2\n dist_inverted = (xs0[0]-xs[1])**2 + (ys0[0]-ys[1])**2 + (xs0[1]-xs[0])**2 + (ys0[1]-ys[0])**2\n \n if dist_regular>dist_inverted:\n print \"ca marche pas gael euh quoi?\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[buff[:,:,second_index:]==big_label]=small_label\n tmp_stack[buff[:,:,second_index:]==new_label]=big_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if (x!=new_label and x!=big_label) else big_label if x==new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels]\n division_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in division_labels_2]\n fusion_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels_2]\n else:\n print \"ca marche bien gael\"\n \"\"\"Reassigning new labels\"\"\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[tmp_stack==new_label] = small_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if x!=new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label else small_label for x in fusion_labels]\n division_labels_2 = [x if x!=new_label else small_label for x in division_labels_2]\n fusion_labels_2 = [x if x!=new_label else small_label for x in fusion_labels_2]",
"def from_flow_spec(cls, flow_spec=None, deep_copy_tasks=True):\n flow = Flow(**Flow.sanitize_flow_kwargs(flow_spec))\n for i, task in enumerate(flow_spec.get('tasks', [])):\n if deep_copy_tasks:\n task = copy.deepcopy(task)\n flow.add_task(task=task)\n return flow",
"def add_frame_specific_cbf_tables(cbf, wavelength, timestamp, trusted_ranges, diffrn_id = \"DS1\", is_xfel = True, gain = 1.0, flux = None):\n\n \"\"\"Data items in the DIFFRN_RADIATION category describe\n the radiation used for measuring diffraction intensities,\n its collimation and monochromatization before the sample.\n\n Post-sample treatment of the beam is described by data\n items in the DIFFRN_DETECTOR category.\"\"\"\n if flux:\n cbf.add_category(\"diffrn_radiation\", [\"diffrn_id\",\"wavelength_id\",\"probe\",\"beam_flux\"])\n cbf.add_row([diffrn_id,\"WAVELENGTH1\",\"x-ray\",\"%f\"%flux])\n else:\n cbf.add_category(\"diffrn_radiation\", [\"diffrn_id\",\"wavelength_id\",\"probe\"])\n cbf.add_row([diffrn_id,\"WAVELENGTH1\",\"x-ray\"])\n\n \"\"\" Data items in the DIFFRN_RADIATION_WAVELENGTH category describe\n the wavelength of the radiation used in measuring the diffraction\n intensities. Items may be looped to identify and assign weights\n to distinct wavelength components from a polychromatic beam.\"\"\"\n cbf.add_category(\"diffrn_radiation_wavelength\", [\"id\",\"wavelength\",\"wt\"])\n cbf.add_row([\"WAVELENGTH1\",str(wavelength),\"1.0\"])\n\n \"\"\"Data items in the DIFFRN_MEASUREMENT category record details\n about the device used to orient and/or position the crystal\n during data measurement and the manner in which the\n diffraction data were measured.\"\"\"\n cbf.add_category(\"diffrn_measurement\",[\"diffrn_id\",\"id\",\"number_of_axes\",\"method\",\"details\"])\n cbf.add_row([diffrn_id,\n \"INJECTION\" if is_xfel else \"unknown\",\"0\",\n \"electrospray\" if is_xfel else \"unknown\"\n \"crystals injected by electrospray\" if is_xfel else \"unknown\"])\n\n \"\"\" Data items in the DIFFRN_SCAN category describe the parameters of one\n or more scans, relating axis positions to frames.\"\"\"\n cbf.add_category(\"diffrn_scan\",[\"id\",\"frame_id_start\",\"frame_id_end\",\"frames\"])\n cbf.add_row([\"SCAN1\",\"FRAME1\",\"FRAME1\",\"1\"])\n\n \"\"\"Data items in the DIFFRN_SCAN_FRAME category describe\n the relationships of particular frames to scans.\"\"\"\n cbf.add_category(\"diffrn_scan_frame\",[\"frame_id\",\"frame_number\",\"integration_time\",\"scan_id\",\"date\"])\n cbf.add_row([\"FRAME1\",\"1\",\"0.0\",\"SCAN1\",timestamp])\n\n \"\"\" Data items in the ARRAY_INTENSITIES category record the\n information required to recover the intensity data from\n the set of data values stored in the ARRAY_DATA category.\"\"\"\n # More detail here: http://www.iucr.org/__data/iucr/cifdic_html/2/cif_img.dic/Carray_intensities.html\n array_names = []\n cbf.find_category(b\"diffrn_data_frame\")\n while True:\n try:\n cbf.find_column(b\"array_id\")\n array_names.append(cbf.get_value().decode())\n cbf.next_row()\n except Exception as e:\n assert \"CBF_NOTFOUND\" in str(e)\n break\n\n if not isinstance(gain, list):\n gain = [gain] * len(array_names)\n\n\n cbf.add_category(\"array_intensities\",[\"array_id\",\"binary_id\",\"linearity\",\"gain\",\"gain_esd\",\"overload\",\"underload\",\"undefined_value\"])\n for i, array_name in enumerate(array_names):\n overload = trusted_ranges[i][1] + 1\n underload = trusted_ranges[i][0]\n undefined = underload - 1\n cbf.add_row([array_name,str(i+1),\"linear\",\"%f\"%gain[i],\"0.0\",str(overload),str(underload),str(undefined)])",
"def create_new_event(self):\n pass",
"def pulsEphem(self):\n\n hduMain = fits.open(self.ft1)\n\n # --------------------------------------------------------------------------------------------- #\n # Split the FT1 file every 4000 events\n noEv = 0\n deltEv = 5000\n count = 0\n wfil = open(os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'), 'w')\n while noEv <= self.nevents:\n hduCols = []\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:noEv+deltEv], format=form, unit=uni) )\n # Updte the tstart and tstop in the header in order for tempo2 to work...\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header) \n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n noEv += deltEv\n count += 1\n if noEv != self.nevents:\n hduCols = []\n noEv -= deltEv\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:self.nevents], format=form, unit=uni) )\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header)\n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n wfil.close()\n\n hduMain.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run tempo2 for each piece of the FT1\n rfil = open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r')\n percent = 0\n nbFiles = sum(1 for line in open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r'))\n count = 0\n for tmpFil in rfil:\n # Print a progression bar every 5%\n if ( count / np.floor(nbFiles) * 100 ) >= percent:\n self._progressBar(percent, printEvery=5)\n percent += 5\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(['/dsm/fermi/fermifast/glast/tempo2-2013.9.1/tempo2',\n '-gr', 'fermi', '-ft1', tmpFil[:-1], '-ft2', self.ft2, '-f', self.ephem,\n '-phase'], stdout=devnull, stderr=subprocess.STDOUT)\n count += 1\n # Replace the old ft1 by the new one with the PULSE_PHASE column\n #os.remove()\n self._gtSelect(data = os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'))\n\n\n\n\n #self.nevents\n #J2032+4127_54683_57791_chol_pos.par\n #os.popen(\"tempo2 -gr fermi -ft1 {} -ft2 {} -f {} -phase\".format(self.ft1, self.ft2, self.ephem))",
"def __init__(self):\n super(FseventsdEventData, self).__init__(data_type=self.DATA_TYPE)\n self.event_identifier = None\n self.file_entry_modification_time = None\n self.flags = None\n self.node_identifier = None\n self.path = None",
"def copy(self, ID=None):\n\t\tif ID is None:\n\t\t\tmatch = re.match(r'^(.*-copy)(\\d*)$', self._ID)\n\t\t\tif match is not None:\n\t\t\t\tID = match.group(1) + str(int(match.group(2) or 1) + 1)\n\t\t\telse:\n\t\t\t\tID = self._ID + '-copy'\n\t\treturn FlowFrame(self.data.copy(), ID=ID)",
"def __copy__(self):\n logger.debug(\"Copying Flow() object.\")\n c = Flow()\n c.workingDir = self.workingDir \n c.cleanupTemp = self.cleanupTemp\n c.default_inputpaths = self.default_inputpaths\n c.default_outputpath = self.default_outputpath\n c.startNode = self.startNode\n c.lastNode = self.lastNode\n return c",
"def merge(self, new_video, fields):\n merged_video = Video(self.event)\n merged_video.filename = self.filename\n for field in self.metadata:\n if field in set(fields):\n merged_video.metadata[field] = new_video.metadata.get(field)\n else:\n merged_video.metadata[field] = self.metadata.get(field)\n return merged_video",
"def EventFrame (self):\n pass",
"def create_new_frame(image_file, green_file, process_file):\n\n # this print() statement is there to help see which frame is being processed\n print(f'{process_file[-7:-4]}', end=',', flush=True)\n\n image_img = Image.open(image_file)\n green_img = Image.open(green_file)\n\n # Make Numpy array\n np_img = np.array(green_img)\n\n # Mask pixels \n mask = (np_img[:, :, BLUE] < 120) & (np_img[:, :, GREEN] > 120) & (np_img[:, :, RED] < 120)\n\n # Create mask image\n mask_img = Image.fromarray((mask*255).astype(np.uint8))\n\n image_new = Image.composite(image_img, green_img, mask_img)\n image_new.save(process_file)",
"def update_events(self, new_events):\n\n for new_event in new_events:\n self.__events.setdefault(new_event.type, []).append(new_event)",
"def __init__(self, events):\n self.events = events",
"def event(self,evt,evn):\n #import pdb; pdb.set_trace()\n if (evt.get(\"skip_event\")):\n return\n # check if FEE data is one or two dimensional\n data = evt.get(Camera.FrameV1, self.src)\n if data is None:\n one_D = True\n data = evt.get(Bld.BldDataSpectrometerV1, self.src)\n else:\n one_D = False\n # get event timestamp\n timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format\n\n if data is None:\n self.nnodata +=1\n #self.logger.warning(\"event(): No spectrum data\")\n evt.put(skip_event_flag(),\"skip_event\")\n\n if timestamp is None:\n evt.put(skip_event_flag(),\"skip_event\")\n #self.logger.warning(\"event(): No TIMESTAMP, skipping shot\")\n\n elif data is not None:\n self.nshots +=1\n # get data as array and split into two half to find each peak\n if one_D:\n # filtering out outlier spikes in FEE data\n data = np.array(data.hproj().astype(np.float64))\n for i in range(len(data)):\n if data[i]>1000000000:\n data[i]=data[i]-(2**32)\n if self.dark is not None:\n data = data - self.dark\n spectrum = data\n spectrum1 = data[:data.shape[0]//2]\n spectrum2 = data[data.shape[0]//2:]\n else:\n data = np.array(data.data16().astype(np.int32))\n if self.dark is not None:\n data = data - self.dark\n data = np.double(data)\n data_split1 = data[:,:data.shape[1]//2]\n data_split2 = data[:,data.shape[1]//2:]\n # make a 1D trace of entire spectrum and each half to find peaks\n spectrum = np.sum(data,0)/data.shape[0]\n spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]\n spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]\n if not one_D:\n # the x-coordinate of the weighted center of peak region\n weighted_peak_one_positions = []\n for i in range(self.peak_one_range_min,self.peak_one_range_max):\n weighted_peak_one_positions.append(spectrum[i]*i)\n weighted_sum_peak_one = np.sum(weighted_peak_one_positions)\n weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])\n\n weighted_peak_two_positions = []\n for i in range(self.peak_two_range_min,self.peak_two_range_max):\n weighted_peak_two_positions.append(spectrum[i]*i)\n weighted_sum_peak_two = np.sum(weighted_peak_two_positions)\n weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])\n\n # normalized integrated regions between the peaks\n #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])\n\n #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])\n\n int_right_region = np.sum(spectrum[self.peak_two_range_max:])\n\n int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])\n\n # normalized integrated peaks\n int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n if not one_D:\n if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:\n print(\"event(): inflection peak too high\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy left of low energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy right of high energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n #self.logger.info(\"TIMESTAMP %s accepted\" %timestamp)\n self.naccepted += 1\n self.ntwo_color += 1\n print(\"%d Remote shot\" %self.ntwo_color)\n print(\"%s Remote timestamp\" %timestamp)"
]
| [
"0.52557224",
"0.5220436",
"0.5044389",
"0.49591452",
"0.49120632",
"0.48713523",
"0.4860083",
"0.48562306",
"0.48033205",
"0.47910637",
"0.47875854",
"0.47843662",
"0.4754704",
"0.47503555",
"0.47374755",
"0.47197166",
"0.47195277",
"0.47106525",
"0.4694523",
"0.4671501",
"0.46702412",
"0.46679685",
"0.46592322",
"0.4647873",
"0.4631188",
"0.4626793",
"0.46258345",
"0.46256945",
"0.45949098",
"0.45936307"
]
| 0.5344701 | 0 |
cast to str instead of HttpUrl model instance | def url_to_string(cls, v, values, **kwargs):
return str(v) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n return '<Url %r' % self.url",
"def put(self, obj):\n\n if obj is None:\n return\n\n assert isinstance(obj, str), (\n f\"object is not of type string, \"\n f\"but {type(obj)} for link attribute\")\n\n obj = obj.strip()\n\n if not self.is_url(obj):\n raise dj.DatajointError(\n f\"string {obj} is not a url for attribute {self}\"\n )\n\n return obj",
"def test_model(self):\n url = Urls('https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have-done-these-few-things-e5aaac451942', 'http://172.104.63.163/n4lm9')\n self.assertEqual(url.long,'https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have-done-these-few-things-e5aaac451942')\n self.assertEqual(url.short,'http://172.104.63.163/n4lm9')",
"def Url(self) -> str:",
"def __str__(self):\n return repr(self.api_url)",
"def _make_url(self):\n ...",
"def url(value: Any) -> str:\n url_in = str(value)\n\n if urlparse(url_in).scheme in [\"http\", \"https\"]:\n return cast(str, vol.Schema(vol.Url())(url_in))\n\n raise vol.Invalid(\"Invalid URL\")",
"def url(self) -> str:\n return self.url_as()",
"def __str__(self):\n if self._str is None:\n # special cases\n if self == URI.INVALID():\n self._str = \"[invalid]\"\n elif self == URI.EMPTY():\n self._str = \"\"\n elif self == URI.INLINE():\n self._str = \"[inline]\"\n elif self == URI.EVAL():\n self._str = \"[eval]\"\n elif not self._isEmpty(self._scheme) and self._isEmpty(self._host) and self._isEmpty(self._port) and self._isEmpty(self._path) and self._isEmpty(self._query):\n self._str = self._scheme + \":\"\n else:\n self._str = \"\"\n if self._scheme in defaults.schemesWithNoDoubleSlash:\n self._str += self._scheme + \":\"\n elif self._scheme is not None:\n self._str += self._scheme + \"://\"\n \n self._str += self._host\n \n if self._port is not None:\n self._str += \":\" + str(self._port)\n \n if self._path is not None:\n self._str += urllib.quote(self._path.encode('utf8')).decode('ascii')\n \n if self._query is not None:\n self._str += \"?\" + self._query\n return self._str",
"def __str__(self):\r\n self.query = urllib.urlencode(self.args)\r\n self.query = urllib.unquote(self.query)\r\n return urlparse.urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))",
"def u(obj):\n return obj if isinstance(obj, str) else str(obj)",
"def url_to_text(self, url):\r\n return url.toString()",
"def test_observable_url_normalize(self):\n result = self.api.observable_add('http://test.com')\n self.assertEqual(result['value'], 'http://test.com/')\n result = self.api.observable_add('https://test.com/something/../asd')\n self.assertEqual(result['value'], 'https://test.com/asd')",
"def normalize_url(self, url):\n pass",
"def enforce_url(value: Union[\"URL\", bytes, str], *, name: str) -> \"URL\":\n if isinstance(value, (bytes, str)):\n return URL(value)\n elif isinstance(value, URL):\n return value\n\n seen_type = type(value).__name__\n raise TypeError(f\"{name} must be a URL, bytes, or str, but got {seen_type}.\")",
"def raw_url(self) -> str:\n return self.url_as(raw=True)",
"def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)",
"def __str__(self):\n if self.external_form:\n return self.external_form\n if self.url:\n return self.format('url')\n if self.uuid:\n return self.format('uuid')\n return self.format('path')",
"def test_urlattr(self):\n\n e = create_engine(\n \"mysql+mysqldb://scott:tiger@localhost/test\",\n module=mock_dbapi,\n _initialize=False,\n )\n u = url.make_url(\"mysql+mysqldb://scott:tiger@localhost/test\")\n e2 = create_engine(u, module=mock_dbapi, _initialize=False)\n assert e.url.drivername == e2.url.drivername == \"mysql+mysqldb\"\n assert e.url.username == e2.url.username == \"scott\"\n assert e2.url is u\n eq_(\n u.render_as_string(hide_password=False),\n \"mysql+mysqldb://scott:tiger@localhost/test\",\n )\n assert repr(u) == \"mysql+mysqldb://scott:***@localhost/test\"\n assert repr(e) == \"Engine(mysql+mysqldb://scott:***@localhost/test)\"\n assert repr(e2) == \"Engine(mysql+mysqldb://scott:***@localhost/test)\"",
"def url(self):\n # type: () -> string_types\n return self._url",
"def __str__(self):\n\t\treturn '{0} ({1})'.format (self.name, self.link)",
"def _tostr(obj): # pragma: no cover\n return obj if isinstance(obj, str) else obj.decode()",
"def object_url(self, object_t, object_id=None, relation=None):\n if object_t not in self.objects_types:\n raise TypeError(f\"{object_t} is not a valid type\")\n request_items = (\n str(item) for item in [object_t, object_id, relation] if item is not None\n )\n request_path = \"/\".join(request_items)\n return self.url(request_path)",
"def form_url_str(self, type = 'cur_quotes'):\n if type == 'cur_quotes':\n self.form_cur_quotes_stock_url_str()\n \n # form the property. 2 methods enabled.\n if self.enable_form_properties_fr_exceltable:\n self.form_cur_quotes_property_url_str_fr_excel()\n else:\n self.form_cur_quotes_property_url_str()\n \n self.cur_quotes_full_url = self.cur_quotes_start_url + self.cur_quotes_stock_portion_url +\\\n self.cur_quotes_property_portion_url + self.cur_quotes_end_url",
"def url_for_object(self, key: typing.Optional[str]=None) -> str:\n ...",
"def getURLForThing(thing):",
"def format_output_url(cls, url, **kw):\r\n u = UrlParser(url)\r\n\r\n if u.is_reddit_url():\r\n # make sure to pass the port along if not 80\r\n if not kw.has_key('port'):\r\n kw['port'] = request.port\r\n \r\n # disentagle the cname (for urls that would have cnameframe=1 in them)\r\n u.mk_cname(**kw)\r\n \r\n # make sure the extensions agree with the current page\r\n if c.extension:\r\n u.set_extension(c.extension)\r\n\r\n # unparse and encode it un utf8\r\n return _force_unicode(u.unparse()).encode('utf8')",
"def url( self ):\n return str(self._urlEdit.text())",
"def test_websites_str(self):\n expected = self.website.name\n self.assertEqual(expected, str(self.website))",
"def from_url(cls, url, **kwargs):\n kwargs = dict(kwargs)\n kwargs['format'] = 'url'\n # for now we only support ascii\n return cls(value=url.encode('ascii'), **kwargs)"
]
| [
"0.6546976",
"0.6429486",
"0.6398717",
"0.6329465",
"0.6277657",
"0.6267619",
"0.61520636",
"0.61243427",
"0.60166615",
"0.60107136",
"0.59760755",
"0.5960031",
"0.5904033",
"0.58582616",
"0.5780172",
"0.5760385",
"0.57133275",
"0.56834507",
"0.5584159",
"0.55777216",
"0.55696064",
"0.5564608",
"0.55414474",
"0.5526757",
"0.5504108",
"0.5493198",
"0.549279",
"0.5489505",
"0.5475474",
"0.5468763"
]
| 0.6620633 | 0 |
Get the matrix of doc into the feature matrix of length | def get_features(docs, max_length):
docs = list(docs)
Xs = numpy.zeros((len(docs), max_length), dtype='int32')
for i, doc in enumerate(docs):
j = 0
for token in doc:
vector_id = token.vocab.vectors.find(key=token.orth)
if vector_id >= 0:
Xs[i, j] = vector_id
else:
Xs[i, j] = 0
j += 1
if j >= max_length:
break
return Xs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def docExtract(self):\n\n self.fv = []\n for doc in self.documents:\n self.fv.append(self.featureSet.extract(doc))\n\n # Convert to a numpy matrix.\n return np.array(np.asmatrix(self.fv))\n # return self.fv",
"def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx",
"def build_matrix(file, feature_mode):\n\n nlp = spacy.load('de_core_news_sm')\n\n conn = sql.connect(file)\n\n sql_select = \"\"\"SELECT COMP, ISCOMP, SENTENCE FROM sentences WHERE ISCOMP!=-1\"\"\"\n\n c = conn.cursor()\n c.execute(sql_select)\n\n rows = c.fetchall()\n\n nltk_data = list()\n\n for r in rows:\n comp = r[0]\n label = r[1]\n sentence = r[2]\n\n sentence = sentence.replace('<comp>', '')\n sentence = sentence.replace('</comp>', '')\n doc = nlp(sentence)\n\n features = process_row(doc, comp, feature_mode)\n\n nltk_tuple = (features, label, sentence)\n nltk_data.append(nltk_tuple)\n\n return nltk_data",
"def build_term_doc_matrix(self):\n\n print(\"Inside build_term_doc_matrix >>> \")\n self.term_doc_matrix = np.zeros([self.number_of_documents,self.vocabulary_size])\n for kVal in range(0, self.number_of_documents):\n for lVal,wordVocab in enumerate(self.vocabulary):\n wrd_doc = 0\n for oVal in range(0, len(self.documents[kVal])):\n if (wordVocab == self.documents[kVal][oVal]):\n wrd_doc = wrd_doc +1\n self.term_doc_matrix[kVal][lVal] = wrd_doc\n #print(\"term_doc_matrix >>> \" + self.term_doc_matrix)",
"def feature_matrix(self):\n return self._feat_matrix",
"def docs2matrix(docs):\n # [token for doc in docs for token in doc]\n term_dictionary = corpora.Dictionary(docs)\n doc_matrix = [term_dictionary.doc2bow(doc) for doc in docs]\n logging.info(\"Len of raw corpus: %d | Len of matrix: %d\" % (len(docs), len(doc_matrix)))\n return doc_matrix, term_dictionary",
"def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]",
"def createFeatureMatrix(self,batch):\n \n feature_dim = self.__flags.no_inner_unit * self.__flags.no_outer_unit\n data = np.zeros((len(batch), self.__flags.embedding_dim, 2 * feature_dim), dtype=np.float32)\n\n count = 0\n for obj in batch:\n m1 = self.__object2Matrix(obj)\n m2 = self.__object2Matrix(obj)\n data[count, :self.__flags.embedding_dim, :feature_dim] = m1\n data[count, :self.__flags.embedding_dim, feature_dim:2 * feature_dim] = m2\n count += 1\n scores = np.zeros(len(batch), dtype=np.float32)\n\n return (data,scores)",
"def document_to_lda_features(lda_model, document):\n topic_importances = lda_model.get_document_topics(document, minimum_probability=0)\n topic_importances = numpy.array(topic_importances)\n return topic_importances[:,1]",
"def get_embed_from_matrix(features, fc):\n return []",
"def doc_term_matrix(text, vectorizer = 'CV', stop_words = 'english'):\n\n\tfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\timport pandas as pd\n\n\tif vectorizer == 'CV':\n\t vec = CountVectorizer(stop_words = stop_words)\n\telif vectorizer == 'TFIDF':\n\t vec = TfidfVectorizer(stop_words = stop_words)\n\n\tfit = vec.fit_transform(text)\n\tdf = pd.DataFrame(fit.toarray(), columns = vec.get_feature_names())\n\treturn df",
"def build_feature_matrix(node2feature, length, features):\n num_nodes = len(node2feature)\n num_features = len(features)\n X = np.zeros((length, num_nodes, num_features))\n \n for key, val in node2feature.items():\n for i, f in enumerate(features):\n X[:,key,i] = val[f]\n \n return X",
"def get_topic_matrix(self):\n print('get topic matrix')\n\n topic_words_dict = self.config['topic_words']\n\n topic_matrix = np.empty((0, self.wordvec.embedding_dim))\n\n topic_id = 0\n for topic in topic_words_dict.keys():\n topic_words = topic_words_dict[topic]\n topic_vector = self.wordvec.avg_words_vector(topic_words)\n\n topic_matrix = np.append(topic_matrix, topic_vector, axis=0)\n\n self.id2topic[str(topic_id)] = topic\n topic_id += 1\n\n return topic_matrix",
"def generate_matrix(doc):\n bv_stru=doc['bv_structure']\n lattice=bv_stru['lattice']\n l_matrix=lattice['matrix']\n #manipulate over lattice matrix\n mat_list=[]\n for list_m in l_matrix:\n str_null=''\n for items in list_m:\n items=Decimal(items).quantize(Decimal('0.000000'))\n str_null=str_null+str(items)+' '\n str_null=str_null[:len(str_null)-1]\n mat_list.append(str_null)\n return mat_list",
"def computeWordMatrix( Docs, Keywords ) :\n\n w2vec_count = CountVectorizer( ngram_range=(1, 4), vocabulary=Keywords )\n X_Count = w2vec_count.fit_transform( Docs )\n\n return X_Count",
"def build_matrix(docs):\n nrows = len(docs)\n idx = {}\n tid = 0\n nnz = 0\n # Remove all ratings\n for d in docs:\n #d = d[1:]\n nnz += len(set(d))\n for w in d:\n if w not in idx:\n idx[w] = tid\n tid += 1\n ncols = len(idx)\n print nrows\n print ncols\n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for d in docs:\n #d = d[1:]\n cnt = Counter(d)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n\n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n\n return mat",
"def matrix_to_lists(doc_word):\n if np.count_nonzero(doc_word.sum(axis=1)) != doc_word.shape[0]:\n logger.warning(\"all zero row in document-term matrix found\")\n if np.count_nonzero(doc_word.sum(axis=0)) != doc_word.shape[1]:\n logger.warning(\"all zero column in document-term matrix found\")\n sparse = True\n try:\n # if doc_word is a scipy sparse matrix\n doc_word = doc_word.copy().tolil()\n except AttributeError:\n sparse = False\n\n if sparse and not np.issubdtype(doc_word.dtype, int):\n raise ValueError(\"expected sparse matrix with integer values, found float values\")\n\n ii, jj = np.nonzero(doc_word)\n if sparse:\n ss = tuple(doc_word[i, j] for i, j in zip(ii, jj))\n else:\n ss = doc_word[ii, jj]\n\n n_tokens = int(doc_word.sum())\n DS = np.repeat(ii, ss).astype(np.intc)\n WS = np.empty(n_tokens, dtype=np.intc)\n startidx = 0\n for i, cnt in enumerate(ss):\n cnt = int(cnt)\n WS[startidx:startidx + cnt] = jj[i]\n startidx += cnt\n return WS, DS",
"def make_document_term_matrix(token_list):\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n j_indices = []\n \"\"\"Construct an array.array of a type suitable for scipy.sparse indices.\"\"\"\n indptr = array.array(str(\"i\"))\n values = array.array(str(\"i\"))\n indptr.append(0)\n\n for tokens in token_list:\n feature_counter = {}\n for token in tokens:\n feature_idx = vocabulary[token]\n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n vocabulary = dict(vocabulary)\n j_indices = np.asarray(j_indices, dtype=np.intc)\n indptr = np.frombuffer(indptr, dtype=np.intc)\n values = np.frombuffer(values, dtype=np.intc)\n\n X = scipy.sparse.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.int64)\n X.sort_indices()\n return X, vocabulary",
"def create_Tf_matrix(\n corpus,\n filename_npz=\"../data/tfidf/data_tf.npz\",\n filename_features=\"../data/tfidf/data_feature_names.pkl\",\n):\n\n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print(\"-Vectorized matrix, \", X.toarray().shape)\n print(\" first line:\")\n print(X.toarray()[0])\n print(\"- Nombre de features :\" + str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], \" ...\")\n\n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features)\n print(\"tf feature names - saved\")\n sparse.save_npz(filename_npz, X)\n print(\"tf matrix:\", filename_npz, \" - saved\")",
"def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec",
"def load_feature_matrix(src):\n feat_mat = smat_util.load_matrix(src)\n if isinstance(feat_mat, np.ndarray):\n feat_mat = np.ascontiguousarray(feat_mat)\n elif isinstance(feat_mat, smat.spmatrix):\n feat_mat = feat_mat.tocsr()\n feat_mat.sort_indices()\n return feat_mat",
"def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec",
"def gram_matrix(features, normalize=True):\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def idf_object_features_set(set_id):\n # idf for calc features of new docs\n # object-features for learning model\n # doc_index links doc_id and row index in object-features\n # lemma_index links lemmas and column index in object-features\n\n # get lemmas of all docs in set\n docs = db.get_lemmas_freq(set_id)\n\n # document frequency - number of documents with lemma\n doc_freq = {}\n # number (sum of weights) of lemmas in document\n doc_size = {}\n # index of lemma in overall list\n lemma_index = {}\n # lemma counter in overall list\n lemma_counter = 0\n # document index\n doc_index = {}\n # document counter in overall list\n doc_counter = 0\n\n for doc_id in docs:\n # initialize doc_size\n doc_size[doc_id] = 0\n # add document in overall list by giving index\n doc_index[doc_id] = doc_counter\n doc_counter += 1\n # count lemmas of doc\n for lemma in docs[doc_id]:\n # increase number of docs with lemma\n doc_freq[lemma] = doc_freq.get(lemma, 0) + 1\n # increase number of lemmas in document\n doc_size[doc_id] += docs[doc_id][lemma]\n\n # compute idf\n idf = {}\n for lemma in doc_freq:\n idf[lemma] = - math.log(doc_freq[lemma]/doc_counter)\n\n # and lemmas add in overall list by giving index\n for lemma in idf:\n if idf[lemma] != 0:\n lemma_index[lemma] = lemma_counter\n lemma_counter += 1\n\n # initialization objects-features matrix\n object_features = np.zeros((doc_counter, lemma_counter))\n\n # fill objects-features matrix\n for doc_id in docs:\n doc_lemmas = docs[doc_id]\n for lemma in doc_lemmas:\n if lemma_index.get(lemma, -1) != -1:\n object_features[doc_index[doc_id], lemma_index[lemma]] = \\\n doc_lemmas[lemma] / doc_size[doc_id] * idf[lemma]\n\n # check features with 0 for all documents\n feat_max = np.sum(object_features, axis=0)\n # print_lemmas(set_id, [k for k, v in enumerate(feat_max) if v == 0], lemma_index, idf)\n # check documents with 0 for all lemmas\n # print(np.min(np.sum(object_features, axis=1)))\n\n # save to db: idf, indexes and object_features\n db.put_training_set_params(set_id, idf, doc_index, lemma_index, object_features)\n\n # print(idf)\n # print(doc_index)\n # print(lemma_index)\n # print(object_features)",
"def build_matrix(docs):\n nrows = len(docs)\n idx = {}\n tid = 0\n nnz = 0\n for d in docs:\n nnz += len(set(d))\n for w in d:\n if w not in idx:\n idx[w] = tid\n tid += 1\n ncols = len(idx)\n \n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for d in docs:\n cnt = Counter(d)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n \n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n \n return mat",
"def build_matrix(docs):\n nrows = len(docs)\n idx = {}\n tid = 0\n nnz = 0\n for d in docs:\n nnz += len(set(d))\n for w in d:\n if w not in idx:\n idx[w] = tid\n tid += 1\n ncols = len(idx)\n \n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for d in docs:\n cnt = Counter(d)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n \n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n \n return mat",
"def build_matrix(docs):\n nrows = len(docs)\n idx = {}\n tid = 0\n nnz = 0\n for d in docs:\n nnz += len(set(d))\n for w in d:\n if w not in idx:\n idx[w] = tid\n tid += 1\n ncols = len(idx)\n \n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for d in docs:\n cnt = Counter(d)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n \n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n \n return mat",
"def matrix_features(self):\n return self._matrix_features",
"def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector",
"def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec"
]
| [
"0.73008066",
"0.6616102",
"0.65988714",
"0.65695107",
"0.6559335",
"0.6553229",
"0.63959837",
"0.63307846",
"0.6287775",
"0.62767667",
"0.6253159",
"0.62464654",
"0.6183584",
"0.6170977",
"0.6160313",
"0.61482793",
"0.61415446",
"0.61357987",
"0.61317766",
"0.60834575",
"0.6077498",
"0.6064735",
"0.6061318",
"0.6044977",
"0.6039411",
"0.6039411",
"0.6039411",
"0.6028348",
"0.60220677",
"0.60181755"
]
| 0.673142 | 1 |
add two numbers of different bases and return the sum | def flexibase_add(str1, str2, base1, base2):
n1 = base_to_int(str1, base1)
n2 = base_to_int(str2, base2)
#result = int_to_base(tmp, base1)
return n1+n2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flexibase_add(str1, str2, base1, base2):\n result = int_to_base(tmp, base1)\n return result",
"def sum(self, a, b):\n return int(a) + int(b)",
"def sum_num(a, b):\n return a + b",
"def add(num1,num2):\n if(num2==0):\n return num1\n return add((num1^num2),(num1&num2)<<1)",
"def add_numbers(a,b):\r\n return a+ b",
"def add_wo_carry(n1, n2):\n l1 = [int(x) for x in str(n1)]\n l2 = [int(x) for x in str(n2)] \n res1 = map(operator.add, l1, l2)\n res2 = [str(x)[-1] for x in res1]\n return \"\".join(res2)",
"def add(left: int, right: int) -> int:\n\n return left + right",
"def _add(a, b):\n\n # Todo: What if numbers have bigger length than 8\n a = _I2B(a, fixed_length=8)\n b = _I2B(b, fixed_length=8)\n return _B2I([i ^ j for i, j in zip(a, b)])",
"def sum(num1, num2):\n return num1 + num2",
"def add_binary(a, b):\n return bin(a + b)[2:]",
"def sum(num_1, num_2):\n return num_1 + num_2",
"def add_numbers(first_number, second_number):",
"def add(num1, num2):\n\n sums = num1 + num2\n return sums",
"def add(n1, n2):\n return n1 + n2",
"def sum(a, b):\n return a + b",
"def sum(a, b):\n return a + b",
"def add_numbers(a: int, b: int) -> int:\n return a + b",
"def add(a: int, b: int) -> int:\n if a == 0:\n return b\n elif b == 0:\n return a\n else: return add(a-1, b+1)",
"def add(n1, n2):\n return n1 + n2",
"def add_bitwise(b1,b2):\n \n \n \n \n \n if b1 == \"\":\n \n return b2\n \n elif b2 == \"\":\n \n return b1\n \n elif b1 == \"\" and b2 == \"\":\n \n return \"\"\n \n elif b1 == \"1\" and b2 == \"1\":\n \n return \"10\"\n \n else: \n \n rest = add_bitwise(b1[:-1],b2[:-1])\n \n if len(b1) == len(b2): \n \n if b1[-1] == \"0\" and b2[-1] == \"0\":\n \n return rest + \"0\"\n \n elif b1[-1] == \"1\" and b2[-1] == \"0\":\n \n return rest + \"1\"\n \n elif b1[-1] == \"0\" and b2[-1] == \"1\":\n \n return rest + \"1\"\n \n \n elif b1[-1] == \"1\" and b2[-1] == \"1\" and len(b1) != 1 and len(b2) != 1:\n \n rest = add_bitwise(b1[:-1],b2[:-1])\n \n if rest == \"10\":\n \n rest = \"11\" \n \n elif rest == \"\":\n \n rest = \"10\"\n \n elif rest == \"1\":\n \n rest = \"10\"\n \n else: \n \n return \"1\" + rest \n \n return rest + \"0\"\n \n \n elif len(b1) > len(b2):\n \n b2_with_zeroes = \"0\"*(len(b1) - len(b2)) + b2\n \n return add_bitwise(b1,b2_with_zeroes) \n \n \n elif len(b2) > len(b1):\n \n b1_with_zeroes = \"0\"*(len(b2) - len(b1)) + b1\n \n return add_bitwise(b1_with_zeroes,b2)",
"def sum_num(n1=2, n2=4):\n return n1 + n2",
"def get_sum(a,b):\n return",
"def add(num1, num2):\n sum = num1 + num2\n return sum",
"def suma(a, b):\n\n\ttotal = a + b\n\treturn total",
"def getSum(self, a: int, b: int) -> int:\n i = 0\n carry = 0\n res = 0\n while i < 12:\n curr_a_bit = (a >> i) & 1\n curr_b_bit = (b >> i) & 1\n # print(curr_a_bit, curr_b_bit)\n curr_bit = curr_a_bit ^ curr_b_bit ^ carry\n res |= (curr_bit << i)\n if curr_a_bit & curr_b_bit == 1 or curr_a_bit & carry == 1 or curr_b_bit & carry == 1:\n carry = 1\n else:\n carry = 0\n i += 1\n # print(res, bin(res), bin(a), bin(b))\n # 不用把 第 13 位 i = 12 时 carry 加上 result, 因为 这一位 判断 正 负 不需要\n if res >= 2 ** 11:\n # 举例:最大和2000 ,res < 2 ^ 11, 最小和 -2000, res > 2 ^ 11\n # 如果 和 是 0,比如 1 和 -1, res = 0\n # 如果和 是 -1,比如 1 和 -2, res > 2 ^ 11\n res = (~res) ^ 0b111111111111\n return res",
"def sumDigit():",
"def __add__(self, other: 'SInt') -> 'SInt':\r\n # Recoding the addition\r\n if type(other) != self.__class__ or len(self) != len(other):\r\n raise TypeError(\"Wrong type or length for other\")\r\n retenue = [0 for i in range(len(self) + 1)]\r\n new_bin = ''\r\n for i in range(len(self)):\r\n k = int(self.binaire[-(i + 1)]) + int(other.binaire[-(i + 1)]) + retenue[i]\r\n new_bin = ['0', '1', '0', '1'][k] + new_bin\r\n retenue[i + 1] = 1 if k > 1 else 0\r\n if self.signe == other.signe and retenue[-1] != retenue[-2]:\r\n raise OverflowError(\"The sum is over the bytes available\")\r\n H = self.__class__(self.nbBytes)\r\n H.binaire = new_bin\r\n return H",
"def addition(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a + b",
"def add_binary(a,b):\n max_len = max(len(a),len(b))\n a = a + (max_len-len(a))*'0'\n b = b + (max_len-len(b))*'0'\n result = \"\" \n elde = 0 \n for i in range(max_len):\n a_i,b_i = int(a[i]),int(b[i])\n if (a_i + b_i+elde) == 2:\n elde = 1\n t = 0\n else:\n t = (a_i + b_i+elde)%2\n result += str(t)\n if (i == max_len-1) and elde:\n result+=\"1\"\n return result[::-1]",
"def add(num1, num2):\n return num1 + num2"
]
| [
"0.7697689",
"0.72538656",
"0.7124233",
"0.70175815",
"0.7010839",
"0.6899279",
"0.68684566",
"0.6858524",
"0.6757252",
"0.6746462",
"0.6708959",
"0.6673802",
"0.66633177",
"0.66163915",
"0.66131914",
"0.66131914",
"0.66076356",
"0.66049117",
"0.66027284",
"0.6597941",
"0.65744776",
"0.65696716",
"0.6549776",
"0.65403765",
"0.6535205",
"0.6521719",
"0.65196306",
"0.649946",
"0.64865154",
"0.64738023"
]
| 0.7828891 | 0 |
Create a new drink. | def drinks_new():
return render_template('drinks_new.html', drink={}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_new_drink(user_data):\n drink_res = requests.post(url = \"http://127.0.0.1:5000/add_drink\", json=user_data)\n return drink_res.text",
"def onDrinkCreated(self, event):",
"def add_drink(self, _drink):\n self.drinks.append(_drink)",
"def make_drink(beverage_type, water, milk, sugar):\n\n drink = Drink(beverage_type)\n if water.is_hot():\n drink.with_water(water)\n if not milk.is_off():\n drink.with_milk(milk)\n try:\n drink.with_sugar(sugar)\n except ValueError as e:\n return e.message\n\n drink.stir()\n\n return drink\n else:\n return 'ERROR: Bad Milk'\n else:\n return 'ERROR: Cold Water'",
"def create(request: BedRequestCreate) -> Bed:\n if request.garden_id:\n bed = Bed(request.name, request.yard_id, request.garden_id)\n else:\n bed = Bed(request.name, request.yard_id)\n return bed",
"def drink(self):\n print(self.name + ' is drinking.')",
"def send_make_drink(self, typ, drink):\n order_msg = activateOrder()\n order_msg.order_type = typ # assumes ring numbering is same as numbering in activateOrder message!\n order_msg.selection = drink # Undefined!\n self.order_pub.publish(order_msg)",
"def create():",
"def create():",
"def main():\n\n tea_bag = Flavour('Tea')\n hot_water = Water('Hot Water')\n semi_skimmed = Milk('Semi-Skimmed Milk')\n no_sugar = Sugar('No Sugar')\n\n print make_drink(tea_bag, hot_water, semi_skimmed, no_sugar)\n\n sour_milk = Milk.BAD_MILK\n print make_drink(tea_bag, hot_water, sour_milk, no_sugar)\n\n salt = Sugar.INVALID_SUGAR\n print make_drink(tea_bag, hot_water, semi_skimmed, salt)",
"def create(self, *args, **kwargs):\n pass",
"def create(self, validated_data):\n breed_data = validated_data.pop('breed').capitalize()\n breed_id, _ = Breed.objects.get_or_create(title=breed_data)\n # validated_data['breed'] = breed_id\n cat = Cat.objects.create(breed=breed_id, **validated_data)\n return cat",
"def add_drink_order(self, chair_num, _drink):\n self.customers[chair_num].add_drink(_drink)",
"def test_create_drug_successful(self):\n generic = models.Generic.objects.create(\n generic_name=\"Lisinopril\"\n )\n print(type(generic))\n drug = models.Drug.objects.create(\n product_id='12345',\n generic_name=generic,\n product_ndc=\"99999-9999\",\n brand_name=\"Zestril\"\n )\n\n self.assertEqual(str(drug), f\"{drug.product_id} {drug.product_ndc}\")",
"def create(request: PlantRequestCreate) -> Plant:\n logger.debug(f'Executing Plant create with request:{request}')\n return Plant(request.name, request.bed_id)",
"def do_drink(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n fluid = int(input(\"Fluid to drink?\"))\n agent = self.model.get_agent(name)\n agent.drink(fluid)",
"def create():\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def create_driver():\n\n request_data = request.get_json()\n if not request_data.get('name') or not request_data.get('name').strip():\n return Response.error(KEY_REQUIRED.format('name'), 400)\n\n if not request_data.get('license_number') or not request_data.get('license_number').strip():\n return Response.error(KEY_REQUIRED.format('license_number'), 400)\n\n if not request_data.get('motorcycle_id'):\n return Response.error(KEY_REQUIRED.format('motorcycle_id'), 400)\n\n motorcycle = Motorcycle.query.filter_by(\n id=request_data.get('motorcycle_id')).first()\n\n if not motorcycle:\n return Response.error(MOTORCYCLE_NOT_EXIST, 400)\n\n new_driver = Driver(**request_data_strip(request_data))\n new_driver.save()\n driver_schema = DriverSchema()\n response_data = {\n 'driver': driver_schema.dump(new_driver)\n }\n return Response.success(DRIVER_CREATED, response_data, 201)",
"def create(self):\n\n pass",
"def test_create_drink_created_by_admin(self):\n self.test_create_admin_user()\n self.test_create_seting_bar()\n user = UserBase.objects.get(username='admin')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.token)\n url = reverse('drink-list')\n data = {\n 'name': 'Testing Drink',\n 'ingredients':'[{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001},{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001}]'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})",
"def create(self):\n ...",
"def drinks_submit():\n drink = {\n 'name': request.form.get('name'),\n 'price': request.form.get('price'),\n 'description': request.form.get('description'),\n 'images': request.form.get('images').split()\n }\n drink_id = drinks_collection.insert_one(drink).inserted_id\n return redirect(url_for('drinks_show', drink_id=drink_id))",
"def create(self):\n\n raise NotImplementedError",
"def create_dive(dive_name, dive_creator):\n\n # first check if there is an active dive\n active_dive = get_active_dive()\n if active_dive:\n stop_dive(active_dive.name)\n print \"Creating dive %s\" % dive_name\n dive_creator.start_dive(dive_name, timezone.now())",
"def create_pet(pet_name, pet_type, pet_breed, pet_gender, \n pet_color, pet_status, pet_image, last_address):\n\n pet = Pet(pet_name=pet_name,\n pet_type=pet_type, \n pet_breed=pet_breed, \n pet_gender=pet_gender,\n pet_color=pet_color,\n pet_status=pet_status,\n pet_image=pet_image,\n last_address=last_address)\n\n db.session.add(pet)\n db.session.commit()\n\n return pet",
"def create(self, name=None, dynurl_config_data=None):\n data = DataObject()\n data.add_value_string(\"name\", name)\n data.add_value_string(\"dynurl_config_data\", dynurl_config_data)\n\n response = self.client.post_json(URL_MAPPING, data.data)\n response.success = response.status_code == 200\n return response"
]
| [
"0.6792728",
"0.63321716",
"0.62927145",
"0.61510134",
"0.6055335",
"0.5757426",
"0.57419044",
"0.57061833",
"0.57061833",
"0.56481385",
"0.5645407",
"0.5643587",
"0.56387925",
"0.55879545",
"0.5575625",
"0.5549776",
"0.55411875",
"0.55409163",
"0.55409163",
"0.55409163",
"0.55302036",
"0.5528716",
"0.54925144",
"0.5487898",
"0.54328156",
"0.54091597",
"0.5395323",
"0.5391734",
"0.5365476",
"0.53414565"
]
| 0.64342654 | 1 |
Submit a new drink. | def drinks_submit():
drink = {
'name': request.form.get('name'),
'price': request.form.get('price'),
'description': request.form.get('description'),
'images': request.form.get('images').split()
}
drink_id = drinks_collection.insert_one(drink).inserted_id
return redirect(url_for('drinks_show', drink_id=drink_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_new_drink(user_data):\n drink_res = requests.post(url = \"http://127.0.0.1:5000/add_drink\", json=user_data)\n return drink_res.text",
"def post(self):\n\n model = gbmodel.get_model()\n\n #-------- sentiment-text analysis on review----------------\n review = request.form['review']\n\n rating = self.sentiment_analysis(review)\n #--------finish sentiment-text analysis--------------------\n\n result = model.insert(request.form['name'], rating, review, request.form['drink_to_order'])\n if result == False:\n flash(\"specified store:\" + str(request.form['name']) + \" could not be added to our database!\")\n else:\n flash(\"Store \" + str(request.form['name']) + \" added, thank you!\")\n return render_template('index.html')",
"def submit(self):\n raise NotImplementedError()",
"def do_drink(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n fluid = int(input(\"Fluid to drink?\"))\n agent = self.model.get_agent(name)\n agent.drink(fluid)",
"def drinks_new():\n return render_template('drinks_new.html', drink={})",
"def send_make_drink(self, typ, drink):\n order_msg = activateOrder()\n order_msg.order_type = typ # assumes ring numbering is same as numbering in activateOrder message!\n order_msg.selection = drink # Undefined!\n self.order_pub.publish(order_msg)",
"def submit(self):\n self.driver.find_element(*BaseLocators.SUBMIT_BUTTON).click()",
"def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)",
"def submit(id, host):",
"def do_submit(self, price_float, volume_float):\r\n raise NotImplementedError()",
"def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)",
"def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')",
"def onDrinkCreated(self, event):",
"def add_new_flower() -> Union[str, Response]:\n if request.method == \"POST\":\n flower_name = request.form[\"flower_name\"]\n quantity = request.form[\"quantity\"]\n price = request.form[\"price\"]\n valid_quantity = validate_int(quantity)\n valid_price = validate_float(price)\n if not valid_quantity or not valid_price:\n flash(\"Invalid entry\", \"danger\")\n return render_template(\"add_new_flower.html\")\n add = AddFlower(flower_name)\n add.add_new_in_stock(valid_quantity, valid_price)\n return redirect(url_for(\"add_flower\", items=STOCK))\n return render_template(\"add_new_flower.html\")",
"def new_stock(request):\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form= StockForm()\n else:\n # POST data submitted; process data.\n form= StockForm(data=request.POST)\n if form.is_valid():\n new_stock= form.save(commit=False)\n new_stock.save()\n return redirect('stock_trackers:stocks')\n #Display a blank or invalid form.\n context= {'form': form}\n return render(request, 'stock_trackers/new_stock.html', context)",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n args = self.reqparse.parse_args()\n check_for_empty_fields(args)\n car = Car(args['registration'],\n args['model'], args['capacity'])\n return car.add()"
]
| [
"0.62971145",
"0.59484977",
"0.5801306",
"0.57446504",
"0.57028437",
"0.5566209",
"0.5552328",
"0.5551744",
"0.5535918",
"0.553399",
"0.540908",
"0.5380634",
"0.5339647",
"0.5332633",
"0.53210694",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.53188866",
"0.5317971"
]
| 0.74074155 | 0 |
Show a single drink. | def drinks_show(drink_id):
drink = drinks_collection.find_one({'_id': ObjectId(drink_id)})
return render_template('drinks_show.html', drink=drink) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drink(self):\n print(self.name + ' is drinking.')",
"def drinks_edit(drink_id):\n drink = drinks_collection.find_one({'_id': ObjectId(drink_id)})\n return render_template('drinks_edit.html', drink=drink)",
"def print_drinks(self):\n for beverage in self.drinks:\n print(beverage.get_name())",
"def drinks_new():\n return render_template('drinks_new.html', drink={})",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self):\n\n pass",
"def onDrinkCreated(self, event):",
"def show(self):\n pass",
"def show(self) -> None:",
"def display(self, *args, **kwargs):\n return self.show(*args, **kwargs)",
"def show(self, item_id):\n pass",
"def show(*args, **kwargs):\n from . import core\n\n return core.show(*args, **kwargs)",
"def show_pet(self):\n pet = self.pet_factory.get_pet()\n\n print(\"this is a lovely \", pet)\n print(\"It says \", pet.speak())\n print(\"It eats \", self.pet_factory.get_food())",
"def do_drink(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n fluid = int(input(\"Fluid to drink?\"))\n agent = self.model.get_agent(name)\n agent.drink(fluid)",
"def view_animal(self):\n self._view_animal()",
"def show_pet(self):\n pet = self.pet_factory.get_pet()\n print \"We have a lovely {}\".format(pet)\n print \"It says {}\".format(pet.speak())\n print \"We also have {}\".format(self.pet_factory.get_food())",
"def show(self):",
"def show(args, syn):\n \n ent = syn.get(args.id, downloadFile=False)\n syn.printEntity(ent)",
"def view_details_wishlist():\n try:\n curItem = wishlist_treeview.focus().strip('#')\n\n with open(\"images_url_dict.json\", \"r\") as images_dict_fo_complete:\n imgs_dict = json.load(images_dict_fo_complete)\n name = \"-\".join(curItem.lower().split())\n\n _, title, ID = imgs_dict[name]\n\n webbrowser.open_new_tab(\"https://eztv.io/shows/{}/{}/\".format(ID, title))\n except KeyError:\n print(\"Failed to use series list\")\n\n webbrowser.open_new_tab(\"https://www.imdb.com/find?ref_=nv_sr_fn&q={}&s=tt\".format(curItem))",
"def test_show(self):\n\n # Create a dog object\n Dog.create(id=5, name='Johnny', owner='John')\n\n # Fetch this dog by ID\n rv = self.client.get('/blueprint/dogs/5')\n assert rv.status_code == 200\n\n expected_resp = {\n 'dog': {'age': 5, 'id': 5, 'name': 'Johnny', 'owner': 'John'}\n }\n assert rv.json == expected_resp\n\n # Test search by invalid id\n rv = self.client.get('/blueprint/dogs/6')\n assert rv.status_code == 404\n\n # Delete the dog now\n dog = Dog.get(5)\n dog.delete()",
"async def drinkify(self,ctx):\n artist = ctx.message.content[9:]\n drink = \"\"\n response = requests.get('http://drinkify.org/{}'.format(artist))\n if response.status_code == 200:\n root = html.fromstring(response.content)\n recipe = root.xpath('//ul[@class=\"recipe\"]/li/text()')\n for alc in recipe:\n drink +=\"{}\\n\".format(alc)\n instructions = root.xpath('normalize-space(//p[@class=\"instructions\"]/text())')\n drink += \"\\nInstructions:\\n{}\".format(instructions)\n else:\n return\n await ctx.bot.send_message(ctx.message.channel, drink)",
"def stonith_show(stonith_id, extra_args=None, cibfile=None):\n return item_show(\n item=\"stonith\", item_id=stonith_id, extra_args=extra_args, cibfile=cibfile\n )",
"def show(self):\n raise NotImplementedError",
"def show(self):\n raise NotImplementedError",
"def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)",
"def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)",
"def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)",
"def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)"
]
| [
"0.70895994",
"0.6359162",
"0.62630725",
"0.61819303",
"0.5997045",
"0.5997045",
"0.5997045",
"0.5957326",
"0.59298",
"0.5922641",
"0.5921987",
"0.5875874",
"0.5837164",
"0.58103615",
"0.5791792",
"0.5771903",
"0.5733305",
"0.56826496",
"0.5669358",
"0.56403744",
"0.5607102",
"0.5597074",
"0.5574928",
"0.55729073",
"0.5565909",
"0.5565909",
"0.5514742",
"0.5514742",
"0.5514742",
"0.5514742"
]
| 0.80978394 | 0 |
Show the edit form for a drink. | def drinks_edit(drink_id):
drink = drinks_collection.find_one({'_id': ObjectId(drink_id)})
return render_template('drinks_edit.html', drink=drink) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()",
"def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)",
"def show(self, screen, beverage):\n screen.clear()\n self._draw_header(screen, 'Edit - {}'.format(beverage['name']))\n self._draw_labels(screen)\n\n beverage['name'] = self._read_string(screen, Point(BeverageEditForm.INPUT_POSITION.y, BeverageEditForm.INPUT_POSITION.x + 8))\n beverage['price'] = self._read_float(screen, Point(BeverageEditForm.INPUT_POSITION.y + 2, BeverageEditForm.INPUT_POSITION.x + 8))\n\n if '_id' in beverage:\n self.db.beverages.update({'_id': beverage['_id']}, beverage)\n else:\n self.db.beverages.insert(beverage)",
"def edit_form():\n return template (\"edit\")",
"def show_edit_post_form(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('posts/edit.html', post=post)",
"def show_and_edit_pet_page(pet_id):\n \n pet = Pet.query.get(pet_id)\n\n form = EditPetPage(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n return redirect('/')\n\n else:\n return render_template('display_pet.html', pet=pet, form=form)",
"def show_edit_pet(pet_id):\n pet = Pet.query.get(pet_id)\n form = EditPet(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template('edit_pet.html', form=form, pet=pet)",
"def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)",
"def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)",
"def edit_plante(id):\n plante = get_plante(id)\n form = PlanteForm(plante)\n return render_template(\n \"create-plante.html\",\n title = plante.get_name()+\" - edit\",\n form = form,\n plante = plante,\n param = \"modif\")",
"def edit(self):\n\n pass",
"def show_edit_tag_form(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n \n return render_template('edit-tag.html', tag=tag)",
"def edit_entry(request, entry_id):\n entry= Entry.objects.get(id= entry_id)\n stock= entry.stock\n\n if request.method != 'POST':\n #initial request; pre-fill form with the current note entry.\n form= EntryForm(instance=entry)\n else:\n # POST data submitted; process data.\n form= EntryForm(instance=entry, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('stock_trackers:stock', stock_id=stock.id)\n\n context= {'entry': entry, 'stock': stock, 'form': form}\n return render(request, 'stock_trackers/edit_entry.html', context)",
"def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)",
"def edit(self, **kwargs):\n ...",
"def view_edit_pet(id):\n pet = Pet.query.get_or_404(id)\n form = PetEditForm(obj=pet)\n if form.validate_on_submit():\n form.populate_obj(pet)\n db.session.commit()\n\n flash(f\"Updated {pet.species} named {pet.name}\")\n return redirect(f'/{id}')\n else:\n return render_template(\"pet_edit_form.html\", form=form, pet=pet)",
"def edit_artist(artist_id):\n\n result = db.session.query(Artist).filter(Artist.id == artist_id)\n result = result[0]\n artist = result\n form = ArtistForm(obj=artist)\n\n # TODO: populate form with fields from artist with ID <artist_id>\n return render_template(\"forms/edit_artist.html\", form=form, artist=artist)",
"def getEditForm( self ):\n return \"listc_edit\"",
"def show_edit_tag_form(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n posts = Post.query.all()\n return render_template('tags/edit_tag.html', tag=tag, posts=posts)",
"def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)",
"def pet_display(pet_id):\n pet = Pets.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n db.session.commit()\n flash(\"Succesfully updated\")\n return redirect(f'/{pet_id}')\n else:\n return render_template('display.html', pet=pet, form=form)",
"def show_edit_post_form(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('edit_post.html', post=post, user=user)",
"def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )",
"def show_pet_details(id):\n \n pet = Pet.query.get_or_404(id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.notes = form.notes.data\n pet.available = form.available.data\n pet.photo_url = form.photo_url.data\n db.session.commit()\n return redirect('/')\n\n else:\n return render_template('/pet_edit.html', form = form, pet = pet)",
"def drinks_show(drink_id):\n drink = drinks_collection.find_one({'_id': ObjectId(drink_id)})\n return render_template('drinks_show.html', drink=drink)",
"def drinks_new():\n return render_template('drinks_new.html', drink={})",
"def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)",
"def show_pet(pet_id):\n \n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n notes = form.notes.data\n photo_url = form.photo_url.data\n available = form.available.data\n\n pet.notes = notes\n pet.photo_url = photo_url\n pet.available = available\n\n db.session.commit()\n flash(f\"notes: {notes}, photo_url={photo_url}, available={pet.available}\")\n \n return redirect(f'/{pet_id}')\n\n p = dict(name=pet.name, species=pet.species, photo_url=pet.photo_url, age=pet.age, notes=pet.notes, available=pet.available)\n\n\n return render_template('pet_edit_form.html', pet=p, form=form)",
"def funding_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n return render_to_response('funding_form.html', \n {'details': funding_reference,'info':funding_reference},\n context_instance=RequestContext(request))",
"def view_and_edit_listing(request, listing_id):\n categories = Category.objects.all()\n listing = get_object_or_404(Listing, pk=listing_id)\n\n if request.method == 'POST':\n editform = AddListingForm(\n request.POST,\n request.FILES,\n instance=listing)\n if editform.is_valid():\n listing.save()\n messages.success(\n request,\n 'Thank you. Your listing has been updated')\n return redirect(reverse('addlisting'))\n else:\n editform = AddListingForm(instance=listing)\n\n context = {\n 'editform': editform,\n 'listing': listing,\n 'categories': categories\n }\n return render(request, 'editlisting.html', context)"
]
| [
"0.64750195",
"0.6467963",
"0.63552886",
"0.63058394",
"0.6244319",
"0.6187853",
"0.6180922",
"0.6136211",
"0.6059131",
"0.60525787",
"0.60396665",
"0.59724575",
"0.596908",
"0.5960207",
"0.59366137",
"0.59290475",
"0.5920844",
"0.58841246",
"0.5826922",
"0.5805979",
"0.58049995",
"0.5790488",
"0.57674485",
"0.5766235",
"0.57281715",
"0.56989914",
"0.56886953",
"0.56564486",
"0.5631908",
"0.562856"
]
| 0.7579091 | 0 |
Submit an edited drink. | def drinks_update(drink_id):
updated_drink = {
'name': request.form.get('name'),
'price': request.form.get('price'),
'description': request.form.get('description'),
'images': request.form.get('images').split()
}
drinks_collection.update_one(
{'_id': ObjectId(drink_id)},
{'$set': updated_drink}
)
return redirect(url_for('drinks_show', drink_id=drink_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drinks_submit():\n drink = {\n 'name': request.form.get('name'),\n 'price': request.form.get('price'),\n 'description': request.form.get('description'),\n 'images': request.form.get('images').split()\n }\n drink_id = drinks_collection.insert_one(drink).inserted_id\n return redirect(url_for('drinks_show', drink_id=drink_id))",
"def update_drink(jwt, drink_id):\n try:\n drink = Drink.query.filter(Drink.id == drink_id).one_or_none()\n\n if drink is None:\n abort(404)\n\n body = request.get_json()\n req_title = body.get('title', drink.title)\n req_recipe = json.dumps(body.get('recipe', drink.recipe))\n\n drink.title = req_title\n drink.recipe = req_recipe\n drink.update()\n\n return jsonify({\n 'success': True,\n 'drinks': [drink.long()]\n }), 200\n\n except Exception as e:\n abort(422)",
"def submitToReview(self, obj):\n self.wftool.doActionFor(obj, \"submit\")",
"def submitupdate():\n form = AuthorUpdateForm(formdata=request.form, is_update=True)\n visitor = DataExporter()\n visitor.visit(form)\n\n workflow_object = workflow_object_class.create(\n data={},\n id_user=current_user.get_id(),\n data_type=\"authors\"\n )\n workflow_object.extra_data['formdata'] = copy.deepcopy(visitor.data)\n workflow_object.extra_data['is-update'] = True\n workflow_object.data = formdata_to_model(workflow_object, visitor.data)\n workflow_object.save()\n db.session.commit()\n\n # Start workflow. delay will execute the workflow in the background\n start.delay(\"author\", object_id=workflow_object.id)\n\n ctx = {\n \"inspire_url\": get_inspire_url(visitor.data)\n }\n\n return render_template('authors/forms/update_success.html', **ctx)",
"def drinks_edit(drink_id):\n drink = drinks_collection.find_one({'_id': ObjectId(drink_id)})\n return render_template('drinks_edit.html', drink=drink)",
"def submit(self):\n raise NotImplementedError()",
"def submit(self):\n self.parent().app.setOverrideCursor(gui.QCursor(core.Qt.WaitCursor))\n changes = []\n new = changed = False\n for ix, wins in enumerate(self.fields):\n if ix < len(self.parent().artists):\n fname, lname = wins[0].text(), wins[1].text()\n artist = self.parent().artists[ix]\n if fname != artist.first_name or lname != artist.last_name:\n changed = True\n changes.append((artist.id, fname, lname))\n else:\n new = True\n changes.append((0, wins[0].text(), wins[1].text()))\n if changed or new:\n dmla.update_artists(changes)\n else:\n qtw.QMessageBox.information(self, 'Albums', 'Nothing changed')\n self.parent().app.restoreOverrideCursor()\n self.parent().get_all_artists()\n self.parent().do_select()",
"def presssubmitdesign(self):\n self.mode.submitDesign(self.myDesign)",
"def submit(self, content):\n pass",
"def edit_artist_submission(artist_id):\n # TODO: take values from the form submitted, and update existing\n \n result = db.session.query(Artist).filter(Artist.id == artist_id)\n result = result[0]\n artist = result\n\n name = request.form.get('name')\n city = request.form.get('city')\n state = request.form.get('state')\n phone = request.form.get('phone')\n genres = request.form.getlist('genres')\n imageLink = request.form.get('image link')\n facebookLink = request.form.get('facebook_link')\n website = request.form.get('website')\n seeking_venue = request.form.get('seeking_venue')\n seeking_description = request.form.get('seeking_description')\n\n artist.name = name\n artist.city = city\n artist.state = state\n artist.phone = phone\n artist.genres = genres\n artist.image_link = imageLink\n artist.facebook_link = facebookLink\n artist.website = website\n artist.seeking_venue = seeking_venue\n artist.seeking_description = seeking_description\n\n db.session.commit()\n db.session.close()\n\n # artist record with ID <artist_id> using the new attributes\n\n return redirect(url_for(\"show_artist\", artist_id=artist_id))",
"def post(self):\n\n model = gbmodel.get_model()\n\n #-------- sentiment-text analysis on review----------------\n review = request.form['review']\n\n rating = self.sentiment_analysis(review)\n #--------finish sentiment-text analysis--------------------\n\n result = model.insert(request.form['name'], rating, review, request.form['drink_to_order'])\n if result == False:\n flash(\"specified store:\" + str(request.form['name']) + \" could not be added to our database!\")\n else:\n flash(\"Store \" + str(request.form['name']) + \" added, thank you!\")\n return render_template('index.html')",
"def submit(self, script, **kwargs):\n raise NotImplementedError()",
"def do_drink(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n fluid = int(input(\"Fluid to drink?\"))\n agent = self.model.get_agent(name)\n agent.drink(fluid)",
"def submit(self):\n self.driver.find_element(*BaseLocators.SUBMIT_BUTTON).click()",
"def submit(self, selector):\n el = self.locate_element(selector)\n el.submit()",
"def submit_comment_edit(self, comment_id, new_comment_body):\r\n self._find_within(\"#comment_{} .post-update\".format(comment_id)).first.click()\r\n EmptyPromise(\r\n lambda: (\r\n not self.is_comment_editor_visible(comment_id) and\r\n self.is_comment_visible(comment_id) and\r\n self.get_comment_body(comment_id) == new_comment_body\r\n ),\r\n \"Comment edit succeeded\"\r\n ).fulfill()",
"def update_draw(request, draw_id):\n prev_bom_draw = MONGO.retrieve_draw(draw_id)\n draw_type = draw_factory.get_draw_name(prev_bom_draw.draw_type)\n user_can_write_draw(request.user, prev_bom_draw)\n\n LOG.debug(\"Received post data: {0}\".format(request.POST))\n draw_form = draw_factory.create_form(draw_type, request.POST)\n if not draw_form.is_valid():\n LOG.info(\"Form not valid: {0}\".format(draw_form.errors))\n return render(request, \"draws/display_draw.html\", {\"draw\": draw_form, \"bom\": prev_bom_draw})\n else:\n bom_draw = prev_bom_draw\n raw_draw = draw_form.cleaned_data\n LOG.debug(\"Form cleaned data: {0}\".format(raw_draw))\n # update the draw with the data coming from the POST\n for key, value in raw_draw.items():\n if key not in (\"_id\", \"pk\") and value != \"\":\n setattr(bom_draw, key, value)\n if not bom_draw.is_feasible():\n LOG.info(\"Draw {0} is not feasible\".format(bom_draw))\n draw_form.add_error(None, _(\"Draw not feasible\"))\n draw_form = draw_factory.create_form(draw_type, bom_draw.__dict__.copy())\n return render(request, \"draws/display_draw.html\", {\"draw\": draw_form, \"bom\": bom_draw})\n else:\n bom_draw.add_audit(\"DRAW_PARAMETERS\")\n # generate a result if a private draw\n if not bom_draw.is_shared:\n bom_draw.toss()\n\n MONGO.save_draw(bom_draw)\n LOG.info(\"Updated draw: {0}\".format(bom_draw))\n return redirect('retrieve_draw', draw_id=bom_draw.pk)",
"def post(self):\n modified_content = self.request.get('comment_edit')\n comment_id = self.request.get('comment_id')\n comment = Comments.get_by_id(int(comment_id))\n user = self.get_active_user()\n\n if user.key().id() == comment.submitter_id:\n comment.content = modified_content\n comment.put()\n self.redirect('/%s' % str(comment.post_id))\n else:\n self.error(403)",
"def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()",
"def edit_entry(request, entry_id):\n entry= Entry.objects.get(id= entry_id)\n stock= entry.stock\n\n if request.method != 'POST':\n #initial request; pre-fill form with the current note entry.\n form= EntryForm(instance=entry)\n else:\n # POST data submitted; process data.\n form= EntryForm(instance=entry, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('stock_trackers:stock', stock_id=stock.id)\n\n context= {'entry': entry, 'stock': stock, 'form': form}\n return render(request, 'stock_trackers/edit_entry.html', context)",
"def submit(id, host):",
"def do_submit(self, price_float, volume_float):\r\n raise NotImplementedError()",
"def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)",
"def set_submitted(self, review_request_id):\r\n self.api_call('api/review-requests/%s/' % review_request_id, {\r\n 'status': 'submitted',\r\n }, method='PUT')",
"def submit(self, data):\n self.update_current_data(data)\n self.job_status = \"COMPLETED\"\n return None",
"def ticket_submit_callback(self, data):\n self.output('staged order ticket submitted: %s' % repr(data))",
"def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()",
"def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()",
"def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)",
"def post_review(self, form):\n comments_file = form.cleaned_data.get('comments', None)\n return_code = form.cleaned_data.get('return_code', None)\n\n # Update the review\n self.object.post_review(comments_file, return_code=return_code)\n if return_code:\n self.revision.return_code = return_code\n\n verb = None\n # If every reviewer has posted comments, close the reviewers step\n if self.object.role == 'reviewer':\n qs = Review.objects \\\n .filter(document=self.document) \\\n .filter(revision=self.revision.revision) \\\n .filter(role='reviewer') \\\n .exclude(closed_on=None)\n if qs.count() == self.revision.reviewers.count():\n self.revision.end_reviewers_step(save=False)\n verb = Activity.VERB_CLOSED_REVIEWER_STEP\n\n # If leader, end leader step\n elif self.object.role == 'leader':\n self.revision.end_leader_step(save=False)\n verb = Activity.VERB_CLOSED_LEADER_STEP\n\n # If approver, end approver step\n elif self.object.role == 'approver':\n self.revision.end_review(save=False)\n verb = Activity.VERB_CLOSED_APPROVER_STEP\n\n self.revision.save(update_document=True)\n\n if verb:\n activity_log.send(verb=verb,\n target=self.revision,\n sender=do_batch_import,\n actor=self.request.user)"
]
| [
"0.64912164",
"0.60468745",
"0.58157015",
"0.5634383",
"0.5631785",
"0.55172324",
"0.5454788",
"0.53785783",
"0.53721833",
"0.5315731",
"0.5288665",
"0.5285213",
"0.5263699",
"0.52583075",
"0.5245523",
"0.52326274",
"0.5188646",
"0.5173956",
"0.51724726",
"0.5169337",
"0.5122155",
"0.5119826",
"0.51176625",
"0.50885475",
"0.5065475",
"0.5063422",
"0.5054141",
"0.5054141",
"0.50212413",
"0.5004058"
]
| 0.6137021 | 1 |
Convert pydantic object to pandas dataframe with 1 row. | def to_df(self):
return pd.DataFrame([dict(self)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self)",
"def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df",
"def to_pandas(self):\n # TODO Add type translation.\n # Skipping analyzing 'pandas': found module but no type hints or library stubs\n import pandas as pd # type: ignore\n\n map = {}\n for n, c in self._field_data.items():\n map[n] = c.to_pandas()\n return pd.DataFrame(map)",
"def to_df(self) -> pd.DataFrame:\n\n return pd.DataFrame(self.to_dict()).drop(\n DUMMY_ENTITY_ID, axis=1, errors=\"ignore\"\n )",
"def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df",
"def to_pandas_dataframe(self):\n pd_index = self.index().to_pandas_index()\n return pd.DataFrame.from_items(self.collect()).set_index(pd_index)",
"def get_obj_df(self) -> pd.DataFrame:\n df = pd.DataFrame(self.obj, columns=[\"x\", \"y\", \"m\", \"dx\", \"dy\"])\n df['iter'] = self.current_iteration\n return df",
"def return_data_as_pandas_df(self):\n if not self.response:\n return None\n\n data = self.response['data'][self.data_type.value]\n\n # flatten data dictionary by joining property and subproperty names\n data_flat = {}\n for i, entry in enumerate(data):\n id = self.id[i]\n curr_dict = {}\n for key, values in entry.items():\n if isinstance(values, list):\n v = values[0]\n else:\n v = values\n if isinstance(v, str):\n new_key = f\"{key}\"\n curr_dict[new_key] = v\n else:\n for subprop, val in v.items():\n new_key = f\"{key}.{subprop}\"\n curr_dict[new_key] = val\n data_flat[id] = curr_dict\n\n return pd.DataFrame.from_dict(data_flat, orient='index')",
"def to_pandas(self):\n pass",
"def to_pandas(self):\n pass",
"def to_pandas_df(self):\n data = self._get_data(pd=True)\n return data",
"def to_dataframe(self, data_dict):\n return pd.DataFrame.from_dict(data_dict, orient='index')",
"def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())",
"def to_pandas(self):\n dataframe = self.get().to_pandas()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n\n return dataframe",
"def as_DataFrame (self):\n return DataFrame(self.table)",
"def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df",
"def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe",
"def to_dataframe(self, attrs_as_columns=False):\n\n # Set up empty dict for dataframe\n ds = {}\n\n # Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if isinstance(self.dict[key], list):\n ds[key] = self.dict[key]\n else:\n if attrs_as_columns:\n ds[key] = self.dict[key]\n\n # Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n # Return dataset\n return ds",
"def show(obj):\n if isinstance(obj, pd.Series):\n df = pd.DataFrame(obj)\n return df\n elif hasattr(obj, '__dict__'):\n return pd.DataFrame(pd.Series(obj.__dict__),\n columns=['value'])\n else:\n return obj",
"def to_pandas(self):\n self.meta = pd.DataFrame(self.meta)\n return",
"def to_df(thisdict, name=None, index=None) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(thisdict, orient='index')\n if index:\n df = df.set_index(index)\n if name:\n df.index.name=name\n\n if df.size>0:\n df.sort(inplace=True, ascending=False)\n return df",
"def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)",
"def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)",
"def to_pandas(self) -> pd.DataFrame:\n\n data = {column.name: column.to_pandas()\n for column in self.plaincolumns}\n\n return pd.DataFrame(data, columns=self.columns)",
"def to_df(self):\n # check read only\n if self.__read_only:\n raise IOError(\"Table is for read only.\")\n\n # convert data to dicts\n data = dict(record.to_id_dict()\n for record in self.__data.values())\n\n # make data frame\n df = pd.DataFrame(data).T\n df.index.name = \"_id\"\n return df"
]
| [
"0.75369906",
"0.75369906",
"0.75369906",
"0.75369906",
"0.75369906",
"0.7209753",
"0.71949995",
"0.71874905",
"0.71487474",
"0.71033204",
"0.7100494",
"0.7046343",
"0.6982141",
"0.69812274",
"0.69812274",
"0.6949213",
"0.6906141",
"0.68650144",
"0.6826173",
"0.681689",
"0.6779675",
"0.6772389",
"0.67545307",
"0.6738791",
"0.6726808",
"0.67251456",
"0.6719451",
"0.6717257",
"0.67062944",
"0.66851926"
]
| 0.7580992 | 0 |
Test the internal _parse_image_meta methode Feed it an 'orphan' image as we get it from from imgadm list j | def test_parse_image_meta_orphan(image_orphan):
ret = {"Error": "This looks like an orphaned image, image payload was invalid."}
assert _parse_image_meta(image_orphan, True) == ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_image_metadata(self):\n pass",
"def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25:52Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"18.1.0\",\n }\n assert _parse_image_meta(image_native, True) == ret",
"def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": \"ubuntu-16.04\",\n \"os\": \"linux\",\n \"published\": \"2016-06-01T02:17:41Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"20160601\",\n }\n assert _parse_image_meta(image_lx, True) == ret",
"def test_answer_meta_image_undefined(self):\n answer = Answer()\n answer.save()\n page = self.create_answer_page(answer_base=answer)\n self.assertIsNone(page.meta_image)",
"def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def test_parse_image_meta_docker(image_docker):\n ret = {\n \"description\": (\n \"Docker image imported from \"\n \"busybox42/zimbra-docker-centos:latest on \"\n \"2019-03-23T01:32:25.320Z.\"\n ),\n \"name\": \"busybox42/zimbra-docker-centos:latest\",\n \"os\": \"linux\",\n \"published\": \"2019-03-23T01:32:25.320Z\",\n \"source\": \"https://docker.io\",\n \"version\": \"62487cf6a7f6\",\n }\n assert _parse_image_meta(image_docker, True) == ret",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def test_answer_meta_image_uses_category_image_if_no_social_image(self):\n category = baker.make(Category, category_image=self.test_image)\n page = self.page1\n page.category.add(category)\n page.save_revision()\n self.assertEqual(page.meta_image, self.test_image)",
"def testMissingImage(self):\n self.assertNotIn('no_image', self.data)",
"def process_image(self):\n pass",
"def strip_exif(self,img):\n data = list(img.getdata())\n image_without_exif = PIL.Image.new(img.mode, img.size)\n image_without_exif.putdata(data)\n return image_without_exif",
"def test_read_image(self):\n pass",
"def test_read_namespaced_image_stream_image(self):\n pass",
"def images_exist(self):\n pass",
"def test_rebuilt_server_image_field(self):\n actual_image_id = self.server.image.id if self.server.image is not None else None\n self.assertEqual(self.expected_image_id, actual_image_id)",
"def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)",
"def _parseImageXml(self, xml, topImage):\n if not topImage or topImage.pixelInfo.get('magnificaiton'):\n return\n topImage.parse_image_description(xml)\n if not topImage._description_record:\n return\n try:\n xml = topImage._description_record\n # Optrascan metadata\n scanDetails = xml.get('ScanInfo', xml.get('EncodeInfo'))['ScanDetails']\n mag = float(scanDetails['Magnification'])\n # In microns; convert to mm\n scale = float(scanDetails['PixelResolution']) * 1e-3\n topImage._pixelInfo = {\n 'magnification': mag,\n 'mm_x': scale,\n 'mm_y': scale,\n }\n except Exception:\n pass",
"def test_list_image(self):\n pass",
"def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.image = image\n \n fields = self.eod.eodms_rapi.get_collections()[self.image.get_collId()]\\\n ['fields']\n \n self.metadata['imageUrl'] = self.image.get_metadata('thisRecordUrl')\n self.metadata['imageMetadata'] = self.image.get_metadata(\\\n 'metadataUrl')\n self.metadata['imageStartDate'] = self.image.get_date()",
"def test_class_image(self):\n fwa = FakeWikiArchivo(\n 'abcd <a href=\"/wiki/foobar\" class=\"image\">FooBar</a> dcba'\n )\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [])",
"def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))",
"def forward_test(self, img, img_metas, **kwargs):",
"def test_tag_image_duplicate(self):\n\n message = {\n \"method\": \"build_image\",\n \"params\": {\"url\": self.url,\n \"tag_image\": self.tag_image}\n }\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"error\")",
"def process_images(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n img = soup.img\n try:\n image = img['title']\n return image\n except (TypeError, KeyError):\n # print(img)\n pass"
]
| [
"0.7048962",
"0.6765278",
"0.66359967",
"0.6442332",
"0.6345524",
"0.62997514",
"0.6292283",
"0.6252548",
"0.62425846",
"0.6214532",
"0.62140304",
"0.61478466",
"0.6136727",
"0.6091698",
"0.60507995",
"0.6047498",
"0.6019373",
"0.59844077",
"0.5983927",
"0.5947135",
"0.5930554",
"0.5899293",
"0.5888757",
"0.5858772",
"0.58400005",
"0.58355665",
"0.57995695",
"0.57888293",
"0.5786886",
"0.5786308"
]
| 0.795044 | 0 |
Test the internal _parse_image_meta methode Feed it an 'native' image as we get it from from imgadm list j | def test_parse_image_meta_native(image_native):
ret = {
"description": ("A SmartOS image pre-configured for building pkgsrc packages."),
"name": "pkgbuild",
"os": "smartos",
"published": "2018-04-09T08:25:52Z",
"source": "https://images.joyent.com",
"version": "18.1.0",
}
assert _parse_image_meta(image_native, True) == ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": \"ubuntu-16.04\",\n \"os\": \"linux\",\n \"published\": \"2016-06-01T02:17:41Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"20160601\",\n }\n assert _parse_image_meta(image_lx, True) == ret",
"def test_list_image_metadata(self):\n pass",
"def test_parse_image_meta_orphan(image_orphan):\n ret = {\"Error\": \"This looks like an orphaned image, image payload was invalid.\"}\n assert _parse_image_meta(image_orphan, True) == ret",
"def test_parse_image_meta_docker(image_docker):\n ret = {\n \"description\": (\n \"Docker image imported from \"\n \"busybox42/zimbra-docker-centos:latest on \"\n \"2019-03-23T01:32:25.320Z.\"\n ),\n \"name\": \"busybox42/zimbra-docker-centos:latest\",\n \"os\": \"linux\",\n \"published\": \"2019-03-23T01:32:25.320Z\",\n \"source\": \"https://docker.io\",\n \"version\": \"62487cf6a7f6\",\n }\n assert _parse_image_meta(image_docker, True) == ret",
"def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }",
"def test_read_image(self):\n pass",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def test_read_namespaced_image_stream_image(self):\n pass",
"def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))",
"def process_image(self):\n pass",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta",
"def encode_decode(self, img, img_metas):\n pass",
"def _parseImageXml(self, xml, topImage):\n if not topImage or topImage.pixelInfo.get('magnificaiton'):\n return\n topImage.parse_image_description(xml)\n if not topImage._description_record:\n return\n try:\n xml = topImage._description_record\n # Optrascan metadata\n scanDetails = xml.get('ScanInfo', xml.get('EncodeInfo'))['ScanDetails']\n mag = float(scanDetails['Magnification'])\n # In microns; convert to mm\n scale = float(scanDetails['PixelResolution']) * 1e-3\n topImage._pixelInfo = {\n 'magnification': mag,\n 'mm_x': scale,\n 'mm_y': scale,\n }\n except Exception:\n pass",
"def process(self, image):",
"def test_read_namespaced_image_stream(self):\n pass",
"def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)",
"def test_read_namespaced_image_stream_tag(self):\n pass",
"def process_image((uri, label)):\n image_bytes = read_image(uri)\n\n if image_bytes is not None:\n yield uri, label, image_bytes",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def forward_test(self, img, img_metas, **kwargs):",
"def test_class_image(self):\n fwa = FakeWikiArchivo(\n 'abcd <a href=\"/wiki/foobar\" class=\"image\">FooBar</a> dcba'\n )\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [])",
"def test_rt_metadata():\n \n img = lena()\n\n textdata = \"This is some arbitrary metadata for a text field\"\n info = TiffImagePlugin.ImageFileDirectory()\n\n info[tag_ids['ImageJMetaDataByteCounts']] = len(textdata)\n info[tag_ids['ImageJMetaData']] = textdata\n\n f = tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n \n loaded = Image.open(f)\n\n assert_equal(loaded.tag[50838], (len(textdata),))\n assert_equal(loaded.tag[50839], textdata)",
"def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img",
"def met(r):\n image_url = r.get(\"image\")\n if image_url is None:\n if r.get(\"source\") is not None:\n image_url = r.get(\"source\").get(\"href\")\n image_name = r.get(\"name\")\n image_artist = r.get(\"Who\")\n return image_url, image_name, image_artist",
"def test_meta(self):\n expected = {\n 'data_path': [str(self.img_path)],\n 'fast_disk': '',\n 'num_workers': -1,\n 'save_path0': str(self.session_path.joinpath('alf')),\n 'move_bin': True,\n 'keep_movie_raw': False,\n 'delete_bin': False,\n 'batch_size': 500,\n 'combined': False,\n 'look_one_level_down': False,\n 'num_workers_roi': -1,\n 'nimg_init': 400,\n 'nonrigid': True,\n 'maxregshift': 0.05,\n 'denoise': 1,\n 'block_size': [128, 128],\n 'save_mat': True,\n 'scalefactor': 1,\n 'mesoscan': True,\n 'nplanes': 1,\n 'tau': 1.5,\n 'functional_chan': 1,\n 'align_by_chan': 1,\n 'nrois': 1,\n 'nchannels': 1,\n 'fs': 6.8,\n 'lines': [[3, 4, 5]],\n 'dx': np.array([0], dtype=int),\n 'dy': np.array([0], dtype=int),\n }\n\n meta = {\n 'scanImageParams': {'hStackManager': {'zs': 320},\n 'hRoiManager': {'scanVolumeRate': 6.8}},\n 'FOV': [{'topLeftDeg': [-1, 1.3], 'topRightDeg': [3, 1.3], 'bottomLeftDeg': [-1, 5.2],\n 'nXnYnZ': [512, 512, 1], 'channelIdx': 2, 'lineIdx': [4, 5, 6]}]\n }\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump(meta, f)\n self.img_path.joinpath('test.tif').touch()\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False)\n self.assertEqual(self.task.status, 0)\n self.assertDictEqual(self.task.kwargs, expected)\n # {k: v for k, v in self.task.kwargs.items() if expected[k] != v}\n # Now overwrite a specific option with task.run kwarg\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False, nchannels=2, delete_bin=True)\n self.assertEqual(self.task.status, 0)\n self.assertEqual(self.task.kwargs['nchannels'], 2)\n self.assertEqual(self.task.kwargs['delete_bin'], True)\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump({}, f)"
]
| [
"0.72541213",
"0.71688217",
"0.6869826",
"0.68278027",
"0.6766955",
"0.6693063",
"0.6681533",
"0.66430104",
"0.6527833",
"0.65183055",
"0.6516116",
"0.6472335",
"0.63868135",
"0.63460547",
"0.6322751",
"0.6241381",
"0.6176103",
"0.6162279",
"0.6160161",
"0.6152018",
"0.6060926",
"0.60401446",
"0.6025206",
"0.60014236",
"0.5991696",
"0.59573144",
"0.59506714",
"0.59468114",
"0.594506",
"0.59424025"
]
| 0.8063198 | 0 |
Test the internal _parse_image_meta methode Feed it an 'lx' image as we get it from from imgadm list j | def test_parse_image_meta_lx(image_lx):
ret = {
"description": (
"Container-native Ubuntu 16.04 64-bit image. Built to run on "
"containers with bare metal speed, while offering all the "
"services of a typical unix host."
),
"name": "ubuntu-16.04",
"os": "linux",
"published": "2016-06-01T02:17:41Z",
"source": "https://images.joyent.com",
"version": "20160601",
}
assert _parse_image_meta(image_lx, True) == ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_image_metadata(self):\n pass",
"def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25:52Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"18.1.0\",\n }\n assert _parse_image_meta(image_native, True) == ret",
"def test_parse_image_meta_orphan(image_orphan):\n ret = {\"Error\": \"This looks like an orphaned image, image payload was invalid.\"}\n assert _parse_image_meta(image_orphan, True) == ret",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def _parseImageXml(self, xml, topImage):\n if not topImage or topImage.pixelInfo.get('magnificaiton'):\n return\n topImage.parse_image_description(xml)\n if not topImage._description_record:\n return\n try:\n xml = topImage._description_record\n # Optrascan metadata\n scanDetails = xml.get('ScanInfo', xml.get('EncodeInfo'))['ScanDetails']\n mag = float(scanDetails['Magnification'])\n # In microns; convert to mm\n scale = float(scanDetails['PixelResolution']) * 1e-3\n topImage._pixelInfo = {\n 'magnification': mag,\n 'mm_x': scale,\n 'mm_y': scale,\n }\n except Exception:\n pass",
"def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]",
"def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }",
"def metadata2eic(url):\n logging.info('fetching image metadata from %s' % url)\n ds = json.loads(urllib.urlopen(url).read())\n fields = ['imagename','alt','pitch','roll']\n for d in ds:\n yield map(str,[d[k] for k in fields])",
"def test_read_image(self):\n pass",
"def test_parse_image_meta_docker(image_docker):\n ret = {\n \"description\": (\n \"Docker image imported from \"\n \"busybox42/zimbra-docker-centos:latest on \"\n \"2019-03-23T01:32:25.320Z.\"\n ),\n \"name\": \"busybox42/zimbra-docker-centos:latest\",\n \"os\": \"linux\",\n \"published\": \"2019-03-23T01:32:25.320Z\",\n \"source\": \"https://docker.io\",\n \"version\": \"62487cf6a7f6\",\n }\n assert _parse_image_meta(image_docker, True) == ret",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def met(r):\n image_url = r.get(\"image\")\n if image_url is None:\n if r.get(\"source\") is not None:\n image_url = r.get(\"source\").get(\"href\")\n image_name = r.get(\"name\")\n image_artist = r.get(\"Who\")\n return image_url, image_name, image_artist",
"def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta",
"def handle_images(lyx_path, blog_dir, assets_rel_dir, front_matters,\n update=True):\n\n our_fm = front_matters.our_fm\n\n assets_rel_dir = os.path.normpath(assets_rel_dir)\n\n # The images are created in a directory of the form\n # blog_path/assets_rel_dir/date-html_fname\n # so that images of different articles are in separate directories.\n date_html_fname = front_matters.get_date_html_fname()\n rel_dest_dir = os.path.join(assets_rel_dir, date_html_fname)\n dest_dir = os.path.join(blog_dir, rel_dest_dir)\n\n image_info = []\n name_to_num = {}\n\n image_num = 1\n image_http_path = None\n image_label = None\n\n # NOTE:\n # - In LyX files, '\\' can only appear in commands, so searching for, say,\n # '\\begin_inset' is safe.\n with open(lyx_path, encoding='utf-8') as f:\n # in_* remember the nesting level; -1 = not inside\n in_graphics = -1\n in_float_figure = -1\n in_label = -1\n\n nesting = 0\n\n for line in f:\n if line.startswith(r'\\begin_inset Float figure'):\n in_float_figure = nesting # we're in\n if line.startswith(r'\\begin_inset Graphics'):\n in_graphics = nesting # we're in\n if (line.startswith(r'\\begin_inset CommandInset label') and\n in_float_figure != -1): # only if in float figure\n in_label = nesting # we're in\n\n we_were_in = (in_graphics != -1 or\n in_float_figure != -1 or\n in_label != -1)\n\n # We handle the nesting of begin_ and end_inset.\n if line.startswith(r'\\begin_inset'):\n nesting += 1\n elif line.startswith(r'\\end_inset'):\n nesting -= 1\n if in_graphics == nesting:\n in_graphics = -1 # we're out\n if in_float_figure == nesting:\n in_float_figure = -1 # we're out\n if in_label == nesting:\n in_label = -1 # we're out\n\n we_are_in = (in_graphics != -1 or\n in_float_figure != -1 or\n in_label != -1)\n\n if we_were_in and not we_are_in: # we exited\n # We write the data collected so far.\n if image_http_path is None:\n raise Exception(\"LyX file: couldn't get image http path!\")\n image_info.append(image_http_path)\n if image_label:\n name_to_num[image_label] = str(image_num)\n image_num += 1\n\n # reset\n image_http_path = None\n image_label = None\n\n if in_graphics != -1:\n # format:\n # filename discrete fgfg.svg\n m = re.match(r'\\s*filename\\s+(.+)$', line)\n if m:\n src_path = m[1]\n base_name = os.path.basename(src_path)\n dest_path = os.path.join(dest_dir, base_name)\n if not update and os.path.exists(dest_path):\n raise Exception('Already exists: ' + dest_path)\n\n # Create the directory and copy the file\n os.makedirs(dest_dir, exist_ok=True)\n shutil.copy(src_path, dest_path)\n\n # Return the blog-relative path of the copied image\n image_http_path = ('/' + assets_rel_dir + '/' +\n date_html_fname + '/' + base_name)\n\n if in_float_figure != -1 and in_label != -1:\n # format:\n # name \"fig:label_per_figure\"\n m = re.match(r'\\s*name\\s+\"([^\"]+)\"$', line)\n if m:\n image_label = m[1]\n\n return image_info, name_to_num",
"def process_image((uri, label)):\n image_bytes = read_image(uri)\n\n if image_bytes is not None:\n yield uri, label, image_bytes",
"def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def process_image(self):\n pass",
"def test_read_namespaced_image_stream_image(self):\n pass",
"def load_image(self, **kwargs):\n ...",
"def test_rt_metadata():\n \n img = lena()\n\n textdata = \"This is some arbitrary metadata for a text field\"\n info = TiffImagePlugin.ImageFileDirectory()\n\n info[tag_ids['ImageJMetaDataByteCounts']] = len(textdata)\n info[tag_ids['ImageJMetaData']] = textdata\n\n f = tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n \n loaded = Image.open(f)\n\n assert_equal(loaded.tag[50838], (len(textdata),))\n assert_equal(loaded.tag[50839], textdata)",
"def test_list_image(self):\n pass",
"def populateMeta(self, *args):\n meta = self._getAllMeta()\n if not meta:\n raise MetaReadError(\"Error Reading Image MetaData, has image finished copying?\")\n else:\n self.exifKeys = self._getAllMetaKeys(meta)\n for key in self.exifKeys:\n if key == self._getExifKey_TimeCode():\n tag = meta[self._getExifKey_TimeCode()]\n self.startTimecode = tag.raw_value\n self._splitTimecode()\n \n if args:\n for arg in args:\n try:\n lTag = meta[arg]\n self.__dict__[arg.split('.')[1] + '_' + arg.split('.')[2]] = lTag.raw_value\n except:\n print 'could not get meta for tag ', arg",
"def process(self, image):",
"def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"",
"def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)",
"def forward_test(self, img, img_metas, **kwargs):"
]
| [
"0.693872",
"0.6648406",
"0.6415607",
"0.64070094",
"0.6382566",
"0.63602597",
"0.63075614",
"0.6132728",
"0.61233515",
"0.6116494",
"0.6083304",
"0.607617",
"0.6065453",
"0.5985813",
"0.59669995",
"0.5943033",
"0.5930517",
"0.59295875",
"0.58997864",
"0.586116",
"0.5860643",
"0.585562",
"0.58435285",
"0.5836021",
"0.5799419",
"0.57898563",
"0.5785739",
"0.57842416",
"0.5764626",
"0.57536656"
]
| 0.79599434 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.