query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Given an offset, plaintext, and oracle, forges a block with the proper padding.
def forge_block(offset, plaintext, oracle): b_size, _, _ = challenge_12.determine_block_stats(oracle) new_padding = b"A" * (b_size - offset) payload = new_padding + challenge_09.pkcs7(plaintext, b_size) ciphertext = oracle(payload) return challenge_07.as_blocks(ciphertext, b_size)[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forge_padding_block(oracle):\n b_size, pt_size, padding = challenge_12.determine_block_stats(oracle)\n new_padding = b\"A\" * padding\n\n return challenge_07.as_blocks(oracle(new_padding), b_size)[-1]", "def encrypt(text, offset):\r\n\r\n return format_text(text, offset)", "def pkcs7_pad(blocklength, text):\n padlen = blocklength - len(text) % blocklength\n return text + chr(padlen) * padlen", "def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text", "def find_block_length(encryption_oracle):\n my_text = ''\n ciphertext = encryption_oracle(my_text)\n initial_len = len(ciphertext)\n new_len = initial_len\n\n while new_len == initial_len:\n my_text += 'A'\n ciphertext = encryption_oracle(my_text)\n new_len = len(ciphertext)\n\n return new_len - initial_len", "def encryption_oracle(unknown_encryptor: Callable[[bytes, bytes], bytes]) -> str:\n my_bytes = b'This is 16 bytes'*3\n encrypted_bytes = unknown_encryptor(my_bytes, os.urandom(16))\n hamming_distance = average_hamming_distance_between_blocks(encrypted_bytes[16:], 16, 2)\n return 'ECB' if hamming_distance == 0 else 'Not ECB'", "def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING", "def decrypt(text, offset):\r\n return format_text(text, -offset)", "def block(self, text, head_offset=0):\n if not self.lite:\n tre = '|'.join(self.btag)\n else:\n tre = '|'.join(self.btag_lite)\n text = text.split('\\n\\n')\n\n tag = 'p'\n atts = cite = graf = ext = ''\n c1 = ''\n\n out = []\n\n anon = False\n for line in text:\n pattern = r'^(%s)(%s%s)\\.(\\.?)(?::(\\S+))? (.*)$' % (\n tre, self.align_re, self.c\n )\n match = re.search(pattern, line, re.S)\n if match:\n if ext:\n out.append(out.pop() + c1)\n\n tag, atts, ext, cite, graf = match.groups()\n h_match = re.search(r'h([1-6])', tag)\n if h_match:\n head_level, = h_match.groups()\n tag = 'h%i' % max(1, min(int(head_level) + head_offset, 6))\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, graf)\n # leave off c1 if this block is extended,\n # we'll close it at the start of the next block\n\n if ext:\n line = \"%s%s%s%s\" % (o1, o2, content, c2)\n else:\n line = \"%s%s%s%s%s\" % (o1, o2, content, c2, c1)\n\n else:\n anon = True\n if ext or not re.search(r'^\\s', line):\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, line)\n # skip $o1/$c1 because this is part of a continuing\n # extended block\n if tag == 'p' and not self.hasRawText(content):\n line = content\n else:\n line = \"%s%s%s\" % (o2, content, c2)\n else:\n line = self.graf(line)\n\n line = self.doPBr(line)\n if self.html_type == 'xhtml':\n line = re.sub(r'<br>', '<br />', line)\n\n if self.html_type == 'html':\n line = re.sub(r'<br />', '<br>', line)\n\n if ext and anon:\n out.append(out.pop() + \"\\n\" + line)\n elif not eat:\n out.append(line)\n\n if not ext:\n tag = 'p'\n atts = ''\n cite = ''\n graf = ''\n\n if ext:\n out.append(out.pop() + c1)\n return '\\n\\n'.join(out)", "def pad_encoded_text(self, encoded_text):\n\n\t\textra_padding = 8 - len(encoded_text) % 8#calculmaos cuanto falta por agregar\n\t\tfor i in range(extra_padding):\n\t\t\tencoded_text += \"0\"\n\n\t\tpadded_info = \"{0:08b}\".format(extra_padding)#le agregamos una informacion adicionar la cual utilizaremos despues al comprimir para saber cuantos 0 le agregamos y despues poder eliminarlos\n\t\tencoded_text = padded_info + encoded_text\n\t\treturn encoded_text", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)", "def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()", "def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)", "def offset_pad(self, offset):\n return (((offset + 3) / 4) * 4)", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def get_pt_block(plain_text): # 4 Blocks 16 bit each\n pt_block = []\n temp = ' '.join(bin(ord(item))[2:] for item in plain_text)\n temp = temp.split(' ') # Split chars into list of 8 cells\n temp_list = ['0' * (8 - len(item)) + item for item in temp]\n temp = ''.join(byte for byte in temp_list)\n temp = ''.join('0' * (BLOCK_SIZE * 4 - len(temp))) + temp\n [pt_block.append(int(temp[i:i + 16], 2)) for i in range(0, len(temp), 16)]\n return pt_block", "def pad_instance(line):\n \n # split the line and extract attributes\n attributes = line.split(\",\")\n seq = attributes[0].strip()\n inc = int(attributes[1])\n out = int(attributes[2])\n lifetime = float(attributes[3])\n classify = attributes[4]\n inc_50 = int(attributes[5])\n out_50 = int(attributes[6])\n\n # how many cells were sent/received before any padding\n initial_num_cells = inc + out\n\n # the ratio of outgoing cells to incoming cells\n out_in_ratio = float(out)/float(inc)\n new_seq, orig_seq_length, inc_added, out_added = pad_sequence(seq)\n \n # account for added beginning sequence padding in overall total\n inc += inc_added\n out += out_added\n\n # account for added beginning sequence padding in first 50 or so cells\n inc_50 += inc_added\n out_50 += out_added\n\n out_padding = 0\n in_padding = 0\n \n # flip a coin\n coin = random.randint(1, 9)\n \n # if the circuit has more incoming cells than outgoing cells \n # (typical of Client-RP)\n if classify != \"noise\" and out_in_ratio < 0.98:\n \n # pad the outgoing cells to bring the ratios closer\n if coin <= 4:\n out_padding = int(out / out_in_ratio * 0.85)\n else:\n out_padding = int(out / out_in_ratio * 1.05)\n \n # if there are more outgoing than incoming cells \n # (typical of HS-RP)\n elif classify != \"noise\" and out_in_ratio > 1.02:\n \n # pad the incoming cells to bring the ratios closer\n if coin <= 4:\n in_padding = int(inc * out_in_ratio * 0.9)\n else:\n in_padding = int(inc * out_in_ratio * 1.05)\n\n # add the appropriate padding to the overall totals\n inc += in_padding\n out += out_padding\n\n # we have to account for how padding would affect the first 50 or so cells\n first_cells = inc_50 + out_50\n first_ratio = float(inc_50)/first_cells\n if first_cells > 50:\n first_cells = 50\n \n # the first 50 cells should have a similar ratio to the padding\n new_inc_percent = float(inc) / (inc + out)\n \n # add a bit of randomness to the first 50 if they are not noise\n first_random = random.randint(1, 201) / 1000.0\n flip = random.randint(1, 11)\n if flip % 2 == 0:\n if new_inc_percent + new_inc_percent * first_random < 1:\n new_inc_percent += new_inc_percent * first_random\n else:\n if new_inc_percent - new_inc_percent * first_random < 1:\n new_inc_percent -= new_inc_percent * first_random\n\n general = False\n # don't mess with the ratio if we didn't pad the whole thing\n if classify == \"noise\":\n general = True\n new_inc_percent = first_ratio\n\n # the first 50 cells should follow the padded ratio\n inc_50 = int(new_inc_percent * first_cells)\n out_50 = first_cells - inc_50\n\n # the padded instance for the new file\n padded_instance = new_seq + \",\" + str(inc) + \",\" + str(out) + \",\" \\\n + str(lifetime) + \",\" + classify + \",\" + str(inc_50) + \",\" + str(out_50)\n\n num_cells_with_padding = inc + out\n\n # return the padded instance, the initial number of cells for the circuit,\n # and the number of cells after padding, because we need to know\n # how much overhead the padding adds\n return padded_instance, initial_num_cells, num_cells_with_padding, general", "def find_padding(known, iter_len=1):\n pad = None\n starting_length = oracle(known)\n for i in range(32):\n test_pad = random_nonb64_string(i)\n padded_length = oracle(known + test_pad)\n if padded_length != starting_length:\n pad = test_pad[:-iter_len]\n break\n return pad", "def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))", "def add_padding(self):\n pad_len = 8 - (len(self.text) % 8)\n self.text += pad_len * chr(pad_len)", "def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):", "def pad_with_buffer(b: bytes, pad: bytes) -> bytes:\n assert b\n assert pad\n\n b += pad\n b = pkcs_7(b, 16)\n\n return b", "def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters", "def encrypt(text, offset):\n encrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n encrypted_character = chr(ord(char))\n elif ord(char) < 90:\n encrypted_character = ord(char) + offset\n if encrypted_character > 90:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n else:\n encrypted_character = ord(char) + offset\n if encrypted_character > 122:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n encrypted_text += encrypted_character\n\n return encrypted_text", "def pad_bases_pw_ip_cigar(read: deepconsensus_pb2.Subread,\n padded_len: int) -> None:\n pad_amt = padded_len - len(read.bases)\n if pad_amt > 0:\n str_padding = dc_constants.GAP_OR_PAD * pad_amt\n list_padding = [dc_constants.GAP_OR_PAD_INT] * pad_amt\n read.bases = read.bases + str_padding\n read.pw[:] = list(read.pw) + list_padding\n read.ip[:] = list(read.ip) + list_padding\n read.expanded_cigar = read.expanded_cigar + str_padding", "def add_padding(text1: str) -> str:\n\n pad_len = 8 - (len(text1) % 8)\n return text1 + (pad_len * '\\0')", "def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset", "def parseAddressDA(address, blocks, block_size, word_size = 4):\n binary_address = bin(address)[2:].zfill(32)\n byte_offset_size = int(math.log2(word_size))\n word_offset_size = int(math.log2(block_size))\n index_size = int(math.log2(blocks))\n byte_offset = int(binary_address[-byte_offset_size:],2)\n if word_offset_size == 0:\n word_offset = 0\n elif word_offset_size == 1:\n word_offset = int(binary_address[len(binary_address)-byte_offset_size-1],2)\n else:\n word_offset = int(binary_address[-byte_offset_size-byte_offset_size:-byte_offset_size],2)\n index = int(binary_address[-byte_offset_size-word_offset_size-index_size:-byte_offset_size-word_offset_size],2)\n tag = int(binary_address[:-(byte_offset_size+byte_offset_size)-1],2)\n #address_result = int(binary_address[:-byte_offset_size],2)\n return {\"tag\" : tag, \"address_result\" : address - byte_offset , \"index\" : index , \"word_offset\" : word_offset, \"byte_offset\" : byte_offset}" ]
[ "0.7168934", "0.5863658", "0.5846171", "0.5747094", "0.574332", "0.5658577", "0.5591082", "0.5590468", "0.54089946", "0.53620017", "0.53189814", "0.5233698", "0.52017486", "0.517661", "0.51744473", "0.512005", "0.50064063", "0.49992564", "0.49891132", "0.4943387", "0.489504", "0.4890104", "0.48709166", "0.48686668", "0.4866655", "0.48475662", "0.48460665", "0.48280573", "0.4812431", "0.48115885" ]
0.7434226
0
Given an oracle, forges a block with all PKCS7 padding (which occurs when the length of a plaintext is an integer multiple of the block size)
def forge_padding_block(oracle): b_size, pt_size, padding = challenge_12.determine_block_stats(oracle) new_padding = b"A" * padding return challenge_07.as_blocks(oracle(new_padding), b_size)[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forge_block(offset, plaintext, oracle):\n b_size, _, _ = challenge_12.determine_block_stats(oracle)\n new_padding = b\"A\" * (b_size - offset)\n payload = new_padding + challenge_09.pkcs7(plaintext, b_size)\n ciphertext = oracle(payload)\n\n return challenge_07.as_blocks(ciphertext, b_size)[1]", "def pkcs7_pad(blocklength, text):\n padlen = blocklength - len(text) % blocklength\n return text + chr(padlen) * padlen", "def find_block_length(encryption_oracle):\n my_text = ''\n ciphertext = encryption_oracle(my_text)\n initial_len = len(ciphertext)\n new_len = initial_len\n\n while new_len == initial_len:\n my_text += 'A'\n ciphertext = encryption_oracle(my_text)\n new_len = len(ciphertext)\n\n return new_len - initial_len", "def pkcs7_pad_bytes(input_bytes, block_size):\r\n return pad(input_bytes, block_size)", "def encryption_oracle(unknown_encryptor: Callable[[bytes, bytes], bytes]) -> str:\n my_bytes = b'This is 16 bytes'*3\n encrypted_bytes = unknown_encryptor(my_bytes, os.urandom(16))\n hamming_distance = average_hamming_distance_between_blocks(encrypted_bytes[16:], 16, 2)\n return 'ECB' if hamming_distance == 0 else 'Not ECB'", "def pkcsPadding():\n test_data = [(20, 'This is a Saturday'),(16, 'NO PAIN NO GAIN!')]\n\n for padlength,data in test_data:\n print padlength, repr(data), repr(pkcs7_pad(padlength, data))", "def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text", "def pkcs_7(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n b = bytearray(b)\n padding = size - (len(b) % size)\n for _ in range(padding):\n b.append(padding)\n\n return bytes(b)", "def pad_with_buffer(b: bytes, pad: bytes) -> bytes:\n assert b\n assert pad\n\n b += pad\n b = pkcs_7(b, 16)\n\n return b", "def get_pt_block(plain_text): # 4 Blocks 16 bit each\n pt_block = []\n temp = ' '.join(bin(ord(item))[2:] for item in plain_text)\n temp = temp.split(' ') # Split chars into list of 8 cells\n temp_list = ['0' * (8 - len(item)) + item for item in temp]\n temp = ''.join(byte for byte in temp_list)\n temp = ''.join('0' * (BLOCK_SIZE * 4 - len(temp))) + temp\n [pt_block.append(int(temp[i:i + 16], 2)) for i in range(0, len(temp), 16)]\n return pt_block", "def find_padding(known, iter_len=1):\n pad = None\n starting_length = oracle(known)\n for i in range(32):\n test_pad = random_nonb64_string(i)\n padded_length = oracle(known + test_pad)\n if padded_length != starting_length:\n pad = test_pad[:-iter_len]\n break\n return pad", "def recover(n):\n recovered = b\"\"\n b_size = get_blocksize() \n rep = (n//b_size)+1\n for j in range(n):\n app = b\"A\"*((rep*b_size -(len(recovered)+1))) + recovered\n first = encrypt_oracle(b\"A\"*(rep*b_size - (len(recovered)+1)))[(rep-1)*b_size:rep*b_size]\n i = 0\n while i< 256:\n if first == encrypt_oracle(app + bytes(chr(i),'utf-8'))[(rep-1)*b_size:rep*b_size]:\n recovered += bytes(chr(i), 'utf-8')\n i = 256\n i+=1\n return recovered", "def pkcs5_pad(self,s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)", "def naive_block_padding(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n l = len(b)\n if l > 0 and l % size == 0:\n return b\n\n return b + b'\\x00' * (size - (l % size))", "def padding_encryption():\n return padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )", "def __pad(self, data):\n return data + (AES.block_size - len(data) % AES.block_size) * \\\n chr(AES.block_size - len(data) % AES.block_size)", "def get_cipher_block(cipher_text): # 4 Blocks 16 bit each\n cipher_block = []\n [cipher_block.append(int(cipher_text[i:i + 4], 16)) for i in range(0, len(cipher_text), 4)]\n return cipher_block", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def un_pkcs_7(b: bytes, size: int) -> bytes:\n b = bytearray(b)\n padding = b[-1]\n if padding <= 0 or padding > size:\n raise BadPaddingException\n\n for i in range(-padding, 0):\n if b[i] != padding:\n raise BadPaddingException\n\n return bytes(b[:-padding])", "def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def test():\n\n block_size = 16\n\n # Test case 1: incorrect value < required:\n paddedMsg = b'ICE ICE BABY\\x03\\x03\\x03\\x03'\n remove_padding(paddedMsg, block_size)\n\n # Test caes 2: incorrect value > required:\n paddedMsg = b\"ICE ICE BABY\\x05\\x05\\x05\\x05\" \n remove_padding(paddedMsg, block_size)\n\n # Test case 3: incorrect length:\n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 4: variable numbers:\n paddedMsg = b\"ICE ICE BABY\\x01\\x02\\x03\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 5: correct padding \n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)", "def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING", "def pad_encoded_text(self, encoded_text):\n\n\t\textra_padding = 8 - len(encoded_text) % 8#calculmaos cuanto falta por agregar\n\t\tfor i in range(extra_padding):\n\t\t\tencoded_text += \"0\"\n\n\t\tpadded_info = \"{0:08b}\".format(extra_padding)#le agregamos una informacion adicionar la cual utilizaremos despues al comprimir para saber cuantos 0 le agregamos y despues poder eliminarlos\n\t\tencoded_text = padded_info + encoded_text\n\t\treturn encoded_text", "def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)", "def pad_to_blocksize(string, blocksize=AES_BSZ, leftpad=False, pad_char=None):\n if len(string) % blocksize == 0:\n return string\n length = len(string) + blocksize - (len(string) % blocksize)\n return pad_to_len(string, length, leftpad=leftpad, pad_char=pad_char)", "def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")", "def un_pkcs_1_5(b: int, size: int) -> bytes:\n unpadded = b.to_bytes(size, \"big\")\n\n if not (unpadded[0] == 0x00 and unpadded[1] == 0x02):\n raise BadPaddingException\n unpadded = unpadded[2:]\n\n i = 0\n while unpadded[i] == 0xff:\n i += 1\n unpadded = unpadded[i:]\n\n if not (unpadded[0] == 0x00):\n raise BadPaddingException\n\n unpadded = unpadded[1:]\n return unpadded", "def find_prefix_length(encryption_oracle, block_length):\n\n # Encrypt two different ciphertexts\n ciphertext_a = encryption_oracle('A')\n ciphertext_b = encryption_oracle('B')\n\n # Find their common length\n common_len = 0\n while ciphertext_a[common_len] == ciphertext_b[common_len]:\n common_len += 1\n\n # Make sure that the common length is multiple of the block length\n common_len = int(common_len / block_length) * block_length\n\n # Try to add an increasing number of common bytes to the plaintext till they until\n # the two ciphertexts will have one extra identical block\n for i in range(1, block_length + 1):\n ciphertext_a = encryption_oracle('A' * i + 'X')\n ciphertext_b = encryption_oracle('A' * i + 'Y')\n\n # If there is one more identical block, it will mean that by adding i bytes\n # we made the common input (including prefix) to the same length multiple of\n # a block size. Then we can easily get the length of the prefix.\n if ciphertext_a[common_len:common_len + block_length] == ciphertext_b[common_len:common_len + block_length]:\n return common_len + (block_length - i)", "def calculate_message_authentication_code_cbc(\n key: bytes,\n additional_data: bytes,\n payload: bytes = b\"\",\n block_0: bytes = bytes(16),\n) -> bytes:\n blocks = (\n block_0 + len(additional_data).to_bytes(2, \"big\") + additional_data + payload\n )\n y_cipher = Cipher(algorithms.AES(key), modes.CBC(bytes(16)))\n y_encryptor = y_cipher.encryptor()\n y_blocks = (\n y_encryptor.update(byte_pad(blocks, block_size=16)) + y_encryptor.finalize()\n )\n # only calculate, no ctr encryption\n return y_blocks[-16:]" ]
[ "0.718337", "0.7012611", "0.6525299", "0.63634807", "0.6312443", "0.62574965", "0.6204864", "0.61596626", "0.6152799", "0.6040315", "0.5880037", "0.58678126", "0.581205", "0.5769569", "0.57621455", "0.57519305", "0.572222", "0.5710781", "0.5623784", "0.56190366", "0.5612416", "0.55954885", "0.5574591", "0.5532973", "0.5524964", "0.5474486", "0.54722595", "0.54592836", "0.54476595", "0.5352203" ]
0.7988467
0
Reads the file from fin and saves the file in wav format in fout
def convert_to_wav(fin, fout): temp = subprocess.run(["ffmpeg", "-i", fin, fout], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def write_wav(self, full_out_file = None):\n\n if full_out_file is None:\n \n (out_file, out_dir) = misc.save_file(FilterSpec='*.wav', DialogTitle='Write sound to ...', \n DefaultName='')\n full_out_file = os.path.join(out_dir, out_file)\n if full_out_file is None:\n print('Output discarded.')\n return 0\n else:\n full_out_file = os.path.abspath(full_out_file)\n (out_dir , out_file) = os.path.split(full_out_file)\n\n write(str(full_out_file), int(self.rate), self.data)\n print('Sounddata written to ' + out_file + ', with a sample rate of ' + str(self.rate))\n print('OutDir: ' + out_dir)\n \n return full_out_file", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def wavwrite(fname, Fs, xt):\n # convert to np.int16 data type\n xt = np.array((2**15-1)*xt, np.int16)\n sio_wav.write(fname, Fs, xt)", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def write_data_to_wav(self, file_name: str, data):\r\n # apply scale and convert to int16\r\n data = np.int16(data/np.max(np.abs(data)) * self.wav_scale)\r\n # write to file\r\n write(file_name, self.audio_sample_rate, data)\r\n print('Sound ', file_name, ' has been saved')", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def wav_wav(orig, dest, **_kwargs):\n\n # options = kwargs.get(\"tree\").cmd_options.get(\"options\", [])\n\n # first demux it to 16 bit 48khz\n dest_list = []\n for index, orig_elem in enumerate(tools.get_iter(orig)):\n tmp_dest = os.path.join(\n os.path.dirname(dest),\n \"{0}_{1}\".format(index, os.path.basename(dest)))\n cmd = \"ffmpeg -i {orig} -acodec pcm_s16le -ar 48000 {dest}\".format(\n dest=tmp_dest,\n orig=orig_elem)\n logger.debug(cmd)\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n continue\n dest_list.append(tmp_dest)\n\n if len(dest_list) > 1:\n cmd = \"sox {orig} {dest}\".format(\n orig=\" \".join(orig),\n dest=dest)\n logger.debug(cmd)\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n else:\n os.rename(dest_list[0], dest)\n return dest", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def downmixWAV(self, wavf: str) -> None:\n # HACK: https://github.com/jiaaro/pydub/issues/129\n # FIXME: a reliable method to get number of wav channels\n multichannel = True\n try:\n w = wave.open(wavf, 'rb')\n if w.getnchannels() < 3:\n multichannel = False\n w.close()\n except Exception:\n pass\n if multichannel:\n newwavf = wavf[:-4] + \"-stereo.wav\"\n FNULL = open(os.devnull, 'w')\n subprocess.call(['ffmpeg', '-y', '-i', wavf, '-c:a', 'pcm_s24le', '-ac', '2', newwavf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n os.remove(wavf)\n os.rename(newwavf, wavf)", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def save(self, fname, master_volume=1.):\n \n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n )\n\n # normalisation for conversion to int32 bitdepth wav\n norm = master_volume * (pow(2, 31)-1) / vmax\n\n # setup array to house wav stream data \n chans = np.zeros((self.out_channels['0'].values.size,\n len(self.out_channels)), dtype=\"int32\")\n \n # normalise and collect channels into a list\n for c in range(len(self.out_channels)):\n vals = self.out_channels[str(c)].values\n chans[:,c] = (vals*norm).astype(\"int32\")\n \n # finally combine and write out wav file\n wavfile.write(fname, self.samprate, chans)\n print(f\"Saved {fname}\")", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def save_wfm(self, source, dest):\n self.bus.write('SAV:WAVE %s,%s' % (source, dest))", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def snip(filename,s,e,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n st=int(s*44100)\n en=int(e*44100)\n data_s=data[st:en,:]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_snipped.wav',data_s,sr,'PCM_16')\n print('Done!')\n return data_s", "def wavwrite(y, fs, filename):\n\n\tx = copy.deepcopy(y) # copy array\n\tx *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range\n\tx = np.int16(x) # converting to int16 type\n\twrite(filename, fs, x)", "def save_wav(data, file_path, sample_rate):\n if np.issubdtype(data.dtype, np.floating):\n data = data.astype(np.float32)\n elif data.dtype not in [np.int32, np.int16, np.uint8]:\n raise ValueError(f'wavfile data must be np.float*, np.int32, np.int16, or np.uint8, got {data.dtype}')\n\n wavfile.write(file_path, sample_rate, data)" ]
[ "0.6910893", "0.65897757", "0.6502043", "0.64688486", "0.6411844", "0.63927984", "0.62594163", "0.6213807", "0.61818516", "0.61807996", "0.6094217", "0.6085368", "0.6054992", "0.60524696", "0.603306", "0.602905", "0.6027152", "0.6003981", "0.59826803", "0.5968395", "0.5954145", "0.59341556", "0.5928608", "0.5927191", "0.589333", "0.5848692", "0.5836656", "0.58010644", "0.57882166", "0.57799053" ]
0.73223794
0
set the message data business_id to a specific value
def step_impl_the_ru_is_set_to(context, business_id): context.bdd_helper.message_data["business_id"] = business_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def business_id(self, business_id):\n\n self._business_id = business_id", "def business_phone(self, business_phone):\n\n self._business_phone = business_phone", "def business_phone_number(self, business_phone_number):\n\n self._business_phone_number = business_phone_number", "def bus_ob_id(self, bus_ob_id):\n\n self._bus_ob_id = bus_ob_id", "def bus_ob_id(self, bus_ob_id):\n\n self._bus_ob_id = bus_ob_id", "def _set_id(self, value):\n pass", "def business_email(self, business_email):\n\n self._business_email = business_email", "def mailing_id(self, val: str):\n self._mailing_id = val", "def message_id(self, val: str):\n self._message_id = val", "def business_model(self, business_model):\n\n self._business_model = business_model", "def business_account(self, business_account):\n\n self._business_account = business_account", "def update_business(current_user, businessId):\n business = Business.query.get(int(businessId))\n\n if not business:\n return make_json_reply('message', 'Business id does not exist'), 404\n\n if business.user_id != current_user.id:\n return make_json_reply('message', 'Cannot update business'), 400\n\n data = request.get_json(force=True)\n name = location = category = description = None\n\n if 'name' in data.keys():\n name = data['name']\n\n if 'location' in data.keys():\n location = data['location']\n\n if 'category' in data.keys():\n category = data['category']\n\n if 'description' in data.keys():\n description = data['description']\n\n if check_validity_of_input(name=name):\n business.name = name\n\n if check_validity_of_input(location=location):\n business.location = location\n\n if check_validity_of_input(category=category):\n business.category = category\n\n if check_validity_of_input(description=description):\n business.description = description\n\n db.session.add(business)\n\n return make_json_reply(\n 'message', 'Successfully updated business ' + business.name), 200", "def set_company_id_value(self, company_id_value):\n self.company_id_value = company_id_value", "def id(self, value):\n self._id = value", "def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount", "def business_owner(self, business_owner):\n\n self._business_owner = business_owner", "def set_id(self, id):\n self.data['id'] = id", "def bus_ob_rec_id(self, bus_ob_rec_id):\n\n self._bus_ob_rec_id = bus_ob_rec_id", "def is_business(self, is_business):\n\n self._is_business = is_business", "def setB(self, b):\n\t\tself.b = int(b)", "def content_id(self, value):\n self._content_id = value", "def id_bandeira(self, id_bandeira):\n self._id_bandeira = id_bandeira", "def id(self, value: str):\n self._id = value", "def _set_person_id(cls, data):\n document_id = data.get(\"_id\")\n if document_id:\n data[\"person_id\"] = document_id\n return data", "def set_CallbackID(self, value):\n super(GetCallbackDataInputSet, self)._set_input('CallbackID', value)", "def test_0_1_create_id(self):\n\n self.b1.id = 5\n self.assertEqual(self.b1.id, 5)", "def id(self, value): # pylint: disable=invalid-name\n self._write(MX_ID, value)", "def id(self, _id):\n self.metadata[\"id\"] = _id", "def _setValue( self, client, value ):\n\t\treturn client.setValue( self.schema, value )", "def set_bid(self, bid):\n self.__bid = bid" ]
[ "0.7509327", "0.6467309", "0.63252246", "0.6016918", "0.6016918", "0.59384924", "0.58963674", "0.5866777", "0.5703364", "0.5699451", "0.5621401", "0.56115365", "0.55243134", "0.54495156", "0.5436615", "0.5346349", "0.53436536", "0.53373826", "0.5327274", "0.5308453", "0.5226442", "0.51390165", "0.5138292", "0.5130027", "0.50929534", "0.5053407", "0.5048454", "0.50431615", "0.5033273", "0.49865723" ]
0.7794713
0
Import a submission from deviantArt. Ignores flash content. Uses a combination of the DA backend and HTML scraping.
def import_submission(self, submission: praw.objects.Submission) -> dict: try: if self.regex_direct.match(urlsplit(submission.url).netloc): r = requests.head(submission.url, headers=self.headers) mime_text = r.headers.get('Content-Type') mime = mimeparse.parse_mime_type(mime_text) if mime[0] == 'image': self.log.debug('DA link is a direct image') data = {'author': 'An unknown DA author', 'source': submission.url, 'import_urls': [submission.url], 'importer_display': {'header': 'Mirrored deviantArt image ' 'by an unknown author:\n\n'}} return data if not self.regex.match(urlsplit(submission.url).netloc): return None query_url = 'http://backend.deviantart.com/oembed?{}'.format( urlencode({'format': 'json', 'url': submission.url})) self.log.debug('%s is valid DA url.', submission.url) self.log.debug('Querying DA API %s', query_url) response = json.loads(self.read_url(query_url)) if response['type'] not in ('link', 'photo'): self.log.debug('Response is not link or photo') return None self.log.debug('Author name: %s', response['author_name']) # Using the official DA API data = {'author': response['author_name'], 'source': submission.url, 'importer_display': {'header': 'Mirrored deviantArt image by the author "{}":\n\n'.format( response['author_name'])}} if response['type'] == 'link': data['import_urls'] = [response['fullsize_url']] self.log.debug('Found DA API url %s', data['import_urls']) try: # Trying to scrape manually bs = BeautifulSoup(self.read_url(submission.url)) # Checking for flash animation, because mirroring a preview # for a flash animation is stupid is_flash = bool(bs.select('iframe[class~=flashtime]')) is_madefire = bool(bs.select('iframe[class~=madefire-player]')) if is_flash or is_madefire: self.log.info('DA url is flash, no preview needed.') return None # Seems to alternate between the two full_view = (bs.select('img[class~=fullview]') or bs.select('img[class~=dev-content-full]')) if full_view: full_url = full_view[0]['src'] self.log.debug('Found full DA image url: %s', full_url) data['import_urls'] = [full_url] except Exception as e: self.log.error(traceback.format_exc()) if 'import_urls' not in data: self.log.debug('No url found for DA image.') return None return data except Exception as e: self.log.error('Deviantart Error: %s', traceback.format_exc()) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_submission(submission_url):\n\n\t'''\n\tScrape Data\n\t'''\n\n\t# Get submission dict\n\tsubmission_dict = reddit.extract_post_data(submission_url=submission_url)\n\n\t# Get list of comments_dicts\n\tsubmission_object = submission_dict.get('submission_object')\n\tcomments_dict = reddit.extract_post_comments_data(submission_object)\n\n\t'''\n\tExit if no comments were extracted from the submission\n\t'''\n\n\tif not len(comments_dict.get('data')) > 0:\n\t\tlogger.info('Data extraction yielded zero comments. Aborting sentiment analysis and database insertion.')\n\t\treturn\n\n\t'''\n\tAnalyze Sentiment\n\t'''\n\n\t# Call sentimentanalysis to analyze the comments and append the dicts\n\tSentimentAnalysis.list_parser(comments_dict)\n\n\t'''\n\tInsert to Database\n\t'''\n\n\t# Create instance of database_manager\n\tdatabase_manager = DatabaseManager()\n\n\t# Check if submission exists\n\tif database_manager.check_submission_exists(submission_dict):\n\t\t# Delete the submission and associated data if exists\n\t\tdatabase_manager.delete_submission(submission_dict)\n\n\t# Insert new submission info into database\n\tnew_submission = database_manager.insert_submission(submission_dict)\n\n\t# Insert comments if submission inserted successfully\n\tif new_submission is not None:\n\t\tdatabase_manager.insert_comments(comments_dict, new_submission)\n\t\tdatabase_manager.insert_sentiment(comments_dict)\n\n\t# Returns submission_id\n\treturn submission_dict.get('id')", "def import_submission(self, submission: praw.objects.Submission) -> dict:\n try:\n if not self.regex.match(urlsplit(submission.url).netloc):\n return None\n data = {'author': 'a gyazo.com user',\n 'source': submission.url,\n 'importer_display':\n {'header': 'Imported gyazo.com image:\\n\\n'}}\n r = requests.head(submission.url, headers=self.headers)\n if r.status_code == 301:\n return None\n\n mime_text = r.headers.get('Content-Type')\n mime = mimeparse.parse_mime_type(mime_text)\n # If we're already given an image...\n if mime[0] == 'image':\n # Use the already given URL\n image_url = submission.url\n else:\n # Otherwise, use the gyazo oEmbed API.\n response = requests.get(\n 'https://api.gyazo.com/api/oembed/',\n {'url': submission.url},\n headers=self.headers).json()\n if response.get('type') == 'photo':\n image_url = response.get('url')\n else:\n # This is something that is not a photo. Do not scrape.\n return None\n\n assert image_url\n data['import_urls'] = [image_url]\n return data\n except Exception:\n self.log.error('Could not import gyazo URL %s (%s)',\n submission.url, traceback.format_exc())\n return None", "def import_content(self,\n lbl_feedback=None, import_status=None, progbar=None):\n debug = False\n if lbl_feedback is None: lbl_feedback = importer.DummyLabel()\n if import_status is None:\n import_status = importer.dummy_import_status.copy()\n if progbar is None: progbar = importer.DummyProgBar()\n faulty2missing_fld_list = []\n large = True\n if not self.headless:\n wx.BeginBusyCursor()\n ## Use up 2/3rds of the progress bar in initial step (parsing html and\n ## then extracting data from it) and 1/3rd adding to the SQLite database.\n prog_steps_for_xml_steps = mg.IMPORT_GAUGE_STEPS*(2.0/3.0)\n prog_step1 = prog_steps_for_xml_steps/5.0 ## to encourage them ;-)\n prog_step2 = prog_steps_for_xml_steps/2.0\n tree = ods_reader.get_contents_xml_tree(\n self.fpath, lbl_feedback, progbar, prog_step1, prog_step2)\n tbl = ods_reader.get_tbl(tree)\n ok_fldnames = ods_reader.get_ok_fldnames(tbl, ROWS_TO_SAMPLE,\n has_header=self.has_header, headless=self.headless)\n if not ok_fldnames:\n raise Exception(_('Unable to extract or generate field names'))\n ## Will expect exactly the same number of fields as we have names for.\n ## Have to process twice as much before it will add another step on bar.\n fldtypes, rows = ods_reader.get_ods_dets(progbar, tbl,\n ok_fldnames, faulty2missing_fld_list, prog_steps_for_xml_steps,\n next_prog_val=prog_step2,\n has_header=self.has_header, headless=self.headless)\n if debug:\n if large:\n print(f'{rows[:20]}')\n else:\n print(f'{rows}')\n default_dd = getdata.get_default_db_dets()\n rows_n = len(rows)\n items_n = rows_n*3 ## pass through it all 3 times (parse, process, save)\n steps_per_item = importer.get_steps_per_item(items_n)\n gauge_start = prog_steps_for_xml_steps\n try:\n feedback = {mg.NULLED_DOTS_KEY: False}\n importer.add_to_tmp_tbl(\n feedback, import_status,\n default_dd.con, default_dd.cur,\n self.tblname, ok_fldnames, fldtypes,\n faulty2missing_fld_list, rows,\n progbar, rows_n, steps_per_item, gauge_start,\n has_header=self.has_header, headless=self.headless)\n importer.tmp_to_named_tbl(default_dd.con, default_dd.cur,\n self.tblname, progbar, feedback[mg.NULLED_DOTS_KEY],\n headless=self.headless)\n except Exception:\n importer.post_fail_tidy(progbar, default_dd.con, default_dd.cur)\n raise\n default_dd.cur.close()\n default_dd.con.commit()\n default_dd.con.close()\n progbar.SetValue(0)\n lib.GuiLib.safe_end_cursor()", "def _upload(self):\r\n loc = os.path.dirname(__file__)\r\n del_file = open(os.path.join(loc, 'delicious.html'))\r\n res = self.app.post(\r\n '/admin/import',\r\n params={'api_key': self.api_key},\r\n upload_files=[('import_file',\r\n 'delicious.html',\r\n del_file.read())],\r\n )\r\n return res", "def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)", "def extract_src(session, file_name, submission_num):\n # Gets the HTML page for the submission page\n response = session.get(\"https://dmoj.ca/src/\" + submission_num + \"/raw\")\n with open(file_name, \"w\") as f:\n f.write(response.text)", "def experimentImport(request):\n if request.method == 'POST':\n form = ImportForm(request.POST, request.FILES)\n if form.is_valid():\n json_data = request.FILES['import_file'].read()\n ExperimentAdmin.importFromJSON(request, json_data)\n return redirect('/admin/experiments/experiment')\n form = ImportForm()\n return render(request, 'admin/experiments/import_form.html', {'form': form})", "def import_data(self, form):\n error_msg = \"\"\n \n try:\n # Set insert order\n columns = \"organization, contact, email, phone, data_url, \\\n project_name_short, project_name, project_description, timeline_start, timeline_finish, project_funder,\\\n data_target, location_description, site_count, data_collector, data_type, data_format, data_policies, \\\n keyword, other, location, shp_file\"\n \n # Gather submitted for values\n values = []\n # Source data\n values.append( '\"%s\"' % form.getvalue('organization') )\n values.append( '\"%s\"' % form.getvalue('contact') )\n values.append( '\"%s\"' % form.getvalue('email') )\n if form.getvalue('phone'):\n values.append( form.getvalue('phone') )\n else:\n values.append('NULL')\n values.append( '\"%s\"' % form.getvalue('source') )\n # Project data\n if len(form.getvalue('labelShort')) > 0:\n values.append( '\"%s\"' % form.getvalue('labelShort') )\n else:\n values.append( '\"%s\"' % form.getvalue('label') )\n values.append( '\"%s\"' % form.getvalue('label') )\n values.append( '\"%s\"' % form.getvalue('description') ) \n values.append( \"STR_TO_DATE('\"+ form.getvalue('timelineStart') +\"', '%m/%d/%Y')\" )\n values.append( \"STR_TO_DATE('\"+ form.getvalue('timelineFinish') +\"', '%m/%d/%Y')\" )\n values.append( '\"%s\"' % form.getvalue('funder') )\n # Meta data\n values.append( '\"%s\"' % form.getvalue('target') )\n values.append( '\"%s\"' % form.getvalue('locdescription') )\n values.append( form.getvalue('numsites') )\n values.append( '\"%s\"' % form.getvalue('collector') )\n values.append( '\"%s\"' % form.getvalue('datatype') )\n values.append( '\"%s\"' % form.getvalue('dataformat') )\n values.append( '\"%s\"' % form.getvalue('policies') )\n # Other Data\n values.append( '\"%s\"' % \" \".join(pattern.sub(' ', form.getvalue('keyword')).split()) )\n values.append( '\"%s\"' % form.getvalue('other') )\n # Shape file data \n zip_shp_file = form['shp_file'].file\n zip_shp_file_name = form.getvalue('shp_file') \n # Latitude/Longitude data\n lat = form.getvalue('lat')\n lng = form.getvalue('lng')\n \n # Build MySQL Geometry syntax\n locations = []\n json_data = \"\"\n if zip_shp_file_name:\n # Extract all files from compressed shapefile\n zip_shp_file_contents = zip_shp_file.read()\n with ZipFile(StringIO(zip_shp_file_contents), 'r') as zip_sf:\n temp_dir = mkdtemp(dir=self.base_dir+\"/tmp/\")\n zip_sf.extractall(path=temp_dir)\n path_to_shapefile = self.find_shapefile(temp_dir)\n \n #json_data = {'message':'DEBUG::Temp Dir:'+temp_dir}\n #self.return_message = json.dumps(json_data);\n #return\n \n # Set POLYGON GEOMETRY from shp file\n polygons,errors,warnings = self.set_poly_geo(path_to_shapefile[0]) \n \n # Regardless of errors process polygons\n for polygon in polygons:\n # Re-map polygon coordinates with spaces between lat and lng\n for idx, val in enumerate(polygon):\n # Reverse values so that latitude is first, then longitude\n val.reverse()\n polygon[idx] = \" \".join( map( str, val) )\n locations.append(\"GeomFromText('POLYGON((%s))')\" % (\",\".join(polygon)))\n \n # Send errors, if any\n errors_warnings = errors + warnings\n html_errors = \"<br>\".join(errors_warnings)\n json_data = {'message':html_errors}\n self.return_message = json.dumps(json_data);\n \n # If there are errors, warnings are OK, then return without inserting\n if len(errors) > 0: \n return\n elif lat and lng:\n # Set MySQL NULL value for shp contents\n zip_shp_file_contents = \"NULL\"\n # Set POINT GEOMETRY from latitude and longitude\n locations.append(\"GeomFromText('POINT(\"+lat+\" \"+lng+\")')\") \n else:\n json_data = {'message':'ERROR:: No Shape File nor Coordinates were found.'}\n self.return_message = json.dumps(json_data);\n return\n \n # For each location insert details into DB\n count = 0\n if len(locations) < 1:\n json_data = {'message':'ERROR:: Coordinates were not found.'}\n self.return_message = json.dumps(json_data);\n return\n \n for location in locations:\n if not location:\n json_data = {'message':'ERROR:: Empty location.'} \n self.return_message = json.dumps(json_data);\n return\n \n # Init reusable list to append location and shapefile\n locs_shps = []\n count = count+1\n \n # Build MySQL insert query\n locs_shps.append(location)\n locs_shps.append( '\"%s\"' % self.db.escape_string(zip_shp_file_contents) )\n \n insert_query = \"INSERT INTO calswim.GeoData (\"+columns+\") VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_values = tuple(values+locs_shps)\n insert_query_with_values = insert_query % insert_values \n self.cursor.execute(insert_query_with_values)\n if json_data == \"\":\n json_data = {'message':'Data import successful'} \n \n # Commit queries\n self.db.commit()\n \n select_query = \"SELECT LAST_INSERT_ID() as id\"\n self.cursor.execute(select_query)\n row = self.cursor.fetchone()\n \n data_file = form['data_file']\n if data_file.filename:\n data_file_name = os.path.basename(data_file.filename) \n \n download_dir = self.base_dir +\"/downloads/\"+ str(row[0]) +\"/\" \n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n \n data_save_file = open(download_dir+data_file_name, \"w\")\n data_save_file.write(data_file.file.read())\n data_save_file.close\n \n update_query = \"\"\"UPDATE calswim.GeoData SET data_url=\"%(PATH)s\" WHERE gd_id=%(ID)s\"\"\" % {'PATH':\"/downloads/\"+ str(row[0]) +\"/\"+ data_file_name, 'ID':row[0]}\n self.cursor.execute(update_query) \n \n # Return JavaScript boolean to view \n self.return_message = json.dumps(json_data)\n except:\n e = sys.exc_info()[1]\n #json_data = {'message': error_msg+\" \"+str(e)}\n json_data = {'message': \"ERROR:: Please try again.\"} \n self.return_message = json.dumps(json_data)\n print >> self.errors, \"ERROR:: \"+error_msg+\" \"+str(e)\n \n # Delete temp files\n try:\n shutil.rmtree(temp_dir) # delete directory\n except:\n e = sys.exc_info()[1]\n print >> self.errors,\"ERROR:: \"+error_msg+\" \"+str(e)\n # Close DB connections \n self.cursor.close()", "def submission_old():\n response.headers[\"Content-Type\"] = \"text/xml\"\n xml = str(request.post_vars.xml_submission_file.value)\n if len(xml) == 0:\n raise HTTP(400, \"Need some xml!\")\n importxml(db, xml)\n r = HTTP(201, \"Saved.\")\n r.headers[\"Location\"] = request.env.http_host\n raise r", "def post(self) :\n\n self.msg = \"\"\n error = True\n importer = Importer(DataAccessor(self.addErrorMessage))\n\n try :\n target = self.request.POST.get('newFile').file.read()\n importer.parse(StringIO(target))\n\n except IOError :\n self.msg = \"Please select a valid file to import\"\n\n except Usage, err : \n self.msg = err.msg\n\n except AttributeError:\n self.msg = \"Please select a valid file to import\"\n\n if not self.msg : \n self.msg = 'Import was successful'\n error = False\n\n if len(self.msg) > 512 : \n self.msg = self.msg[0:512] + \"...\"\n \n setSessionMessageByRequest(self, self.msg, error)\n self.redirect('/admin')", "def import_submission(conn, submission):\n with conn.begin():\n mbids = []\n if submission['mbid']:\n mbids.append(submission['mbid'])\n if submission['puid']:\n min_duration = submission['length'] - 15\n max_duration = submission['length'] + 15\n mbids.extend(find_puid_mbids(conn, submission['puid'], min_duration, max_duration))\n logger.info(\"Importing submission %d with MBIDs %s\",\n submission['id'], ', '.join(mbids))\n matches = lookup_fingerprint(conn,\n submission['fingerprint'], submission['length'],\n TRACK_MERGE_TRESHOLD, FINGERPRINT_MERGE_TRESHOLD, fast=True)\n fingerprint = {\n 'id': None,\n 'track_id': None,\n 'fingerprint': submission['fingerprint'],\n 'length': submission['length'],\n 'bitrate': submission['bitrate'],\n 'source_id': submission['source_id'],\n 'format_id': submission['format_id'],\n }\n if matches:\n match = matches[0]\n logger.debug(\"Matches %d results, the top result (%s) is %d%% similar\",\n len(matches), match['id'], match['score'] * 100)\n fingerprint['track_id'] = match['track_id']\n if match['score'] > FINGERPRINT_MERGE_TRESHOLD:\n fingerprint['id'] = match['id']\n if not fingerprint['track_id']:\n fingerprint['track_id'] = insert_track(conn)\n logger.info('Added new track %d', fingerprint['track_id'])\n if not fingerprint['id']:\n fingerprint['id'] = insert_fingerprint(conn, fingerprint)\n logger.info('Added new fingerprint %d', fingerprint['id'])\n for mbid in mbids:\n if insert_mbid(conn, fingerprint['track_id'], mbid):\n logger.info('Added MBID %s to track %d', mbid, fingerprint['track_id'])\n update_stmt = schema.submission.update().where(\n schema.submission.c.id == submission['id'])\n conn.execute(update_stmt.values(handled=True))\n return fingerprint", "def test_72_bulk_epicollect_import_non_html(self, Mock, mock):\r\n html_request = FakeRequest('Not an application/json', 200,\r\n {'content-type': 'text/html'})\r\n Mock.return_value = html_request\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'epicollect_project': 'fakeproject',\r\n 'epicollect_form': 'fakeform',\r\n 'formtype': 'json'},\r\n follow_redirects=True)\r\n msg = \"Oops! That project and form do not look like the right one.\"\r\n assert msg in res.data", "def import_form(input_file):\n\n headers = {\n 'content-type': 'application/json',\n }\n\n data = input_file\n\n url = 'https://reactome.org/AnalysisService/import/form'\n\n try:\n response = requests.post(url=url, headers=headers, data=data)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n print(\"Status code returned a value of %s\" % response.status_code)", "def prepare_submission(self):\n ## class Submit2Page\n if (self.form.has_key(\"pdbfile\") == False or \\\n self.form[\"pdbfile\"].file is None or \\\n self.form[\"pdbfile\"].value <= ' '):\n jobid = self.prepare_pdbid_entry()\n return jobid, False\n\n ## allocate a new JobID\n job_id = mysql.job_new()\n\n ## record user's IP address\n ip_addr = os.environ.get(\"REMOTE_ADDR\", \"Unknown\")\n mysql.job_set_remote_addr(job_id, ip_addr)\n\n ## read in all of the lines in the structure file\n infil = self.form[\"pdbfile\"].file\n line_list = []\n while True:\n ln = infil.readline()\n if not ln:\n break\n line_list.append(ln)\n\n ## proceed no further if there were not sufficient lines in uploaded\n ## structure file\n if len(line_list) < 10:\n webtlsmdd.remove_job(job_id)\n raise SubmissionException('Only Recieved %d lines of upload' % (\n len(line_list)))\n\n ## basic sanity checks (for non-via-pdb.org structures)\n run_mainchain_only = False\n r, tmpfile = check_upload(job_id, line_list, mainchain = False)\n if r != '':\n ## \"All atoms\" failed the sanity check. Let's try just the\n ## mainchain atoms.\n r, garbage = check_upload(job_id, line_list, mainchain = True)\n if r != '':\n ## No good. The structure failed both sanity checks.\n ## Can not proceed with this structure.\n raise SubmissionException(str(r))\n else:\n run_mainchain_only = True\n\n ## TODO: Figure out how to do this without webtlsmdd, 2009-05-29\n ## pass the PDB file to the application server\n result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(\"\".join(line_list)))\n if result != \"\":\n raise SubmissionException(result)\n\n return job_id, run_mainchain_only", "def import_bmarks(self):\r\n username = self.matchdict.get('username')\r\n\r\n # if auth fails, it'll raise an HTTPForbidden exception\r\n with ReqAuthorize(self.request):\r\n data = {}\r\n post = self.POST\r\n\r\n # We can't let them submit multiple times, check if this user has\r\n # an import in process.\r\n if ImportQueueMgr.get(username=username, status=NEW):\r\n # They have an import, get the information about it and shoot\r\n # to the template.\r\n return {\r\n 'existing': True,\r\n 'import_stats': ImportQueueMgr.get_details(\r\n username=username)\r\n }\r\n\r\n if post:\r\n # we have some posted values\r\n files = post.get('import_file', None)\r\n\r\n if hasattr(files, 'filename'):\r\n storage_dir_tpl = self.settings.get('import_files',\r\n '/tmp/bookie')\r\n storage_dir = storage_dir_tpl.format(\r\n here=self.settings.get('app_root'))\r\n\r\n out_fname = store_import_file(storage_dir, username, files)\r\n\r\n # Mark the system that there's a pending import that needs\r\n # to be completed\r\n q = ImportQueue(username, unicode(out_fname))\r\n DBSession.add(q)\r\n DBSession.flush()\r\n # Schedule a task to start this import job.\r\n tasks.importer_process.delay(q.id)\r\n\r\n return HTTPFound(\r\n location=self.request.route_url('user_import',\r\n username=username))\r\n else:\r\n data['error'] = [\"Please provide a file to import\"]\r\n\r\n return data\r\n else:\r\n # we need to see if they've got\r\n # just display the form\r\n return {\r\n 'existing': False\r\n }", "def step_3a(browser):\n browser.find_button_by_label('Importer').click()\n # write browser contents\n # with open('browser_contents', 'w') as f:\n # f.write(browser.contents)", "def step_1(browser):\n browser.click_on(\"Import depuis eComptes\".decode('utf8'))", "def test_one_import(self):\r\n self._login_admin()\r\n\r\n # Prep the db with 2 other imports ahead of this user's.\r\n # We have to commit these since the request takes place in a new\r\n # session/transaction.\r\n DBSession.add(ImportQueue(username=u'testing',\r\n file_path=u'testing.txt'))\r\n DBSession.add(ImportQueue(username=u'testing2',\r\n file_path=u'testing2.txt'))\r\n DBSession.flush()\r\n transaction.commit()\r\n\r\n res = self._upload()\r\n res.follow()\r\n\r\n # now let's hit the import page, we shouldn't get a form, but instead a\r\n # message about our import\r\n res = self.app.get('/admin/import')\r\n\r\n self.assertTrue('<form' not in res.body, \"We shouldn't have a form\")\r\n self.assertTrue(\r\n 'waiting in the queue' in res.body,\r\n \"We want to display a waiting message.\")\r\n self.assertTrue(\r\n '2 other imports' in res.body,\r\n \"We want to display a count message.\" + res.body)", "def test_import_upload(self):\r\n self._login_admin()\r\n\r\n # verify we get the form\r\n res = self.app.get('/admin/import')\r\n self.assertTrue(\r\n '<form' in res.body,\r\n 'Should have a form in the body for submitting the upload')\r\n\r\n res = self._upload()\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"302 Found\",\r\n msg='Import status is 302 redirect by home, ' + res.status)\r\n\r\n # now verify that we've got our record\r\n imp = ImportQueueMgr.get_ready()\r\n imp = imp[0]\r\n self.assertTrue(imp, 'We should have a record')\r\n self.assertTrue(imp.file_path.endswith('admin.delicious.html'))\r\n self.assertEqual(imp.status, 0, 'start out as default status of 0')", "def download_submission_attachment(self, url):\n\n r = requests.get(url)\n return r.content", "def submitForm():\n try:\n rec = db.session.query(Rec)\n StockCode = request.form.get('prod')\n this_rec = Rec.query.filter_by(StockCode=StockCode).first()\n logger.debug(\"Successfully loaded data.\")\n return render_template('index.html', recs=rec, this_rec=this_rec)\n except:\n traceback.print_exc()\n logger.warning(\"Not able to get recommendations, error page returned\")\n return render_template('error.html')", "def build_submission(config):\n\n soup = bs(HTML_TEMPLATE, 'html5lib')\n\n # Set title\n title = soup.find('title')\n title.clear()\n my_title = 'A{} | {}'.format(config['assignment'], config['firstlast'])\n title.append(my_title)\n\n # Set course title\n course = soup.find(id='course')\n course.clear()\n if config['course'] == 446:\n course_title = 'ECSE 446: Realistic Image Synthesis'\n elif config['course'] == 546:\n course_title = 'ECSE 546: Advanced Image Synthesis'\n elif config['course'] == 598:\n course_title = 'COMP 598: Realistic Image Synthesis'\n else:\n raise ValueError('Error: invalid course number')\n course.append(course_title)\n\n # Set assignment number\n assignment = soup.find(id='assignment')\n assignment.clear()\n assignment.append('Assignment {}'.format(config['assignment']))\n\n # Set student's name\n name = soup.find(id='firstlast')\n name.clear()\n name.append(config['firstlast'])\n\n # Set student's ID\n id = soup.find(id='id')\n id.clear()\n id.append(bs('ID <code>{}</code>'.format(config['id']), 'html5lib'))\n\n # Create directory for convert images\n if not os.path.exists('pngs'):\n os.makedirs('pngs')\n\n # Insert all rendered scenes\n renders = soup.find(id='renders')\n for i, task in enumerate(config['renders']):\n slider = '<h1>{}</h1>\\n'.format(task['scene'])\n slider_title = task['scene'].replace(' ', '_').lower()\n convert_exr(task['render'], 'pngs/{}.png'.format(slider_title))\n slider += build_slider(slider_title)\n renders.append(bs(slider, 'html.parser'))\n\n # Save\n output = 'a{}_{}.html'.format(config['assignment'], config['id'])\n with open(output, 'w') as out_f:\n out_f.write(str(soup))", "def my_form_post():\r\n \r\n #get connector to mongodb database\r\n db = get_db()\r\n #fetch all articles from \r\n articles = get_articles(db)\r\n \r\n\r\n #extract the input from the form input field\r\n q = request.form['question']\r\n q = q.strip(\"?\").lower()\r\n \r\n\r\n \r\n #obtain metadata from a list of diabetes questions\r\n common_diabetes_questions_as_a_string, N, questions = collect_diabetes_questions(\"diabetes_questions.csv\")\r\n \r\n #obtain the query for the given question\r\n list_types, focus, re_focus,target, questionWords_and_tags = get_query(q,common_diabetes_questions_as_a_string,N )\r\n \r\n re_ans_type = \"\"\r\n for at in list_types:\r\n first_letter = at[0]\r\n re_ans_type = re_ans_type + \"[\"+first_letter.upper()+first_letter.lower()+\"]\" + at[1:] + \"|\"\r\n re_ans_type = re_ans_type.strip(\"|\")\r\n\r\n \r\n articles = db.articles.find({ \"article_title\": { \"$regex\": re_focus}, \"section_title\":{\"$regex\":re_ans_type} })\r\n \r\n\r\n #from multiprocessing import Process\r\n #p = Process(target=extract_information, args=(q,list_types,focus,target, common_diabetes_questions_as_a_string, N,articles))\r\n #obtain the best answer after ranking several passages using the query returned above and other features\r\n ans = extract_information(q,list_types,focus,target, common_diabetes_questions_as_a_string, N,articles)\r\n \r\n \r\n \r\n\r\n return \"\"\"<!DOCTYPE html>\r\n<html>\r\n\t<head>\r\n\t\t\r\n\t\t\r\n\t</head>\r\n\t\r\n\t<body>\r\n\t\t<header>\r\n <STYLE type=\"text/css\">\r\n \r\n header {font-family: Georgia}\r\n \r\n .heading{\r\n text-align: center;\r\n font-family: Georgia;\r\n color: white ;\r\n text-align:center;\r\n font-size: large ;\r\n }\r\n\r\n .second{\r\n font-size: large;\r\n }\r\n \r\n body{\r\n background-color: #1E90FF;\r\n color: white ;\r\n font-size: large ;\r\n text-align:center;\r\n }\r\n\r\n .buttons{\r\n padding: 12px 12px;\r\n cursor: pointer;\r\n text-align: center;\r\n font-size: 16px ;\r\n \r\n \r\n background-color: white;\r\n \r\n border-radius: 12px;\r\n\r\n }\r\n \r\n </STYLE>\r\n\t\t\t \r\n\t </header>\r\n\t\t\r\n\t\t<div id = \"content\"><img src=\"/static/medwhat4.bmp\" align = \"left\"/><br/><br/><br/><br/>\r\n <div class = \"heading\">MEDICAL ARTIFICIAL INTELLIGENCE </div>\r\n <div class = \"heading\"> Your virtual diabetes assistant</div>\r\n\r\n\t\t\t\r\n\t\t\t\r\n <br/>\"\"\" + ans + \"\"\"</div>\r\n\r\n\t\t\t\r\n\r\n\t</body>\r\n</html>\"\"\"", "def importSurvey(self, ImportFormat, Name, Activate=None, URL=None, FileContents=None, OwnerID=None, **kwargs):\n result = self.request(\n \"importSurvey\",\n ImportFormat=ImportFormat,\n Name=Name,\n Activate=Activate,\n URL=URL,\n OwnerID=OwnerID,\n post_files={\"FileContents\": FileContents} if FileContents else None,\n **kwargs\n )\n if result is not None:\n return result[\"Result\"][\"SurveyID\"]", "def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def prepare_submission(self, pdbfile):\n ## class SubmitPDBPage\n job_id = mysql.job_new()\n\n ## basic sanity checks\n ## If check_upload returns anything but a empty string, the server will\n ## inform the user of the problem and not proceed any further.\n ln = pdbfile.split(\"\\n\")\n r, garbage = check_upload(job_id, ln, mainchain = False)\n if r != '':\n raise SubmissionException(str(r))\n\n result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(pdbfile))\n if result != \"\":\n raise SubmissionException(\"Failed to submit structure. Please try again.\")\n\n return job_id", "def submit(self, content):\n pass", "def jobs_import_view():\n import_job()\n return response.redirect(request.app.url_for('jobs'))", "def do_import(export_filename, token):\r\n\r\n print 'Importing %s' % export_filename\r\n url = 'http://shank.trikeapps.com/mediawiki/index.php?title=Special:Import&action=submit'\r\n export_file = open(export_filename, 'rb')\r\n data = {'source': 'upload', 'log-comment': 'migrate_wiki.py script', 'xmlimport': export_file, 'editToken': token }\r\n feed = urllib2.urlopen(url, data)\r\n buf = feed.read()\r\n tree = etree.fromstring(buf, parser)\r\n nodes = tree.xpath('//div[@id=\"bodyContent\"]/p[2]')\r\n if not nodes or not nodes[0].text.startswith('Import finished!'):\r\n raise Exception('Failed to upload file, perhaps export file exceeds max size, try without the --at-once option')", "def _do_import_from_content(content, opr, to_save, blacklisted_domains=BLACKLISTED_DOMAINS):\n if not content:\n log.warn('No content, doing nothing')\n return\n urls = contentfiltering.find_all_urls(content)\n log.info('Found %d urls: %r', len(urls), urls)\n platforms = []\n for url in urls:\n log.info('Oring url: %r', url)\n try:\n url = utils.resolve_http_redirect(url)\n except:\n log.exception('While resolve_http_redirect, skipping')\n continue\n log.info('Redirected url: %r', url)\n vurl = platformutils.url_to_handle(url)\n if not vurl:\n log.info('No handle computed from url %r, skipping', url)\n continue\n domain = utils.domain_from_url(vurl)\n if domain in blacklisted_domains:\n log.info('Domain %r is blacklisted', domain)\n continue\n blog_url = utils.url_without_path(url)\n if domain.endswith('.wordpress.com'):\n platforms.append(models.Platform(platform_name='Wordpress', url=blog_url))\n elif domain.endswith('.blogspot.com'):\n platforms.append(models.Platform(platform_name='Blogspot', url=blog_url))\n else:\n content = xutils.fetch_url(blog_url)\n if content:\n discovered_pname = xutils.contains_blog_metatags(content)\n if discovered_pname:\n platforms.append(models.Platform(platform_name=discovered_pname, url=blog_url))\n continue\n platforms.append(models.Platform(platform_name='Custom', url=blog_url))\n\n influencers = []\n influencers_created = []\n for plat in platforms:\n inf, inf_created = helpers.get_or_create_influencer(plat.url, 'comments_content_import',\n to_save)\n if not inf:\n log.warn('Skipping url %r because influencer with this url is blacklisted', plat.url)\n continue\n plat.influencer = inf\n influencers.append(inf)\n if inf_created:\n influencers_created.append(inf)\n\n if opr:\n opr.data = {\n 'influencer_ids': [influencer.id for influencer in influencers],\n 'influencer_created_ids': [influencer.id for influencer in influencers_created],\n 'influencer_blog_urls': [influencer.blog_url for influencer in influencers],\n }\n\n log.info('Platforms from content: %r', platforms)\n if to_save:\n for plat in platforms:\n # influencer of None means we got a blacklisted influencer\n # when we searched by URL.\n if plat.influencer is not None:\n plat.save()\n\n return platforms" ]
[ "0.616191", "0.610111", "0.58288443", "0.5678453", "0.5619826", "0.5566439", "0.53977937", "0.5396064", "0.5394963", "0.53024304", "0.52641046", "0.5256734", "0.52327317", "0.5211399", "0.52003515", "0.5163786", "0.5095641", "0.50951064", "0.5082744", "0.50585926", "0.5019451", "0.4993752", "0.49879012", "0.49652535", "0.49421084", "0.49403274", "0.4917529", "0.4914674", "0.49015406", "0.4900753" ]
0.6888236
0
Parse the url of each book from the book list page. The book's title and url will be stored in database.
def parse_books_from_html(html): root = lxml.html.fromstring(html) for a in root.cssselect("a"): if not 'href' in a.attrib: continue href = a.attrib['href'] if href.startswith("javascript"): continue if not href.startswith("http"): href = urljoin(base_url, href) book_title = a.text_content() d = parse_qs(urlparse(href).query) if 'M' in d and d['M'][0] in ('book', 'Share'): if 'P' in d: book_id = d['P'][0] book = {'id': book_id, 'url': href, 'title': book_title} save_bookpages(book)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_books_url(url):\n url_array = []\n nbr_pages = get_nbr_of_pages(url) \n if(nbr_pages == None):\n nbr_pages = 1\n formatted_url = split_url(url)\n formatted_url = formatted_url.split('page')\n for i in range(1, int(nbr_pages) + 1):\n if nbr_pages != 1:\n join_url = formatted_url[0] + 'page-' + str(i) + '.html'\n else: \n join_url = url\n response = requests.get(join_url)\n if(response.ok):\n soup = BeautifulSoup(response.text, 'lxml')\n table = soup.find('ol', {'class': 'row'})\n rows = table.find_all('a', href=True)\n for row in rows:\n if row.text:\n url_array.append(\n \"http://books.toscrape.com/catalogue/\" \n + row['href'].strip('../'))\n return url_array", "def scrap_book_links(category_link):\n # list where the links of the books will be stored\n book_links = []\n\n while True:\n # check to see if url was successfully gotten (if ok response=200,otherwise 404)\n response = requests.get(category_link)\n\n # get the content of the page as html and saves it in an object called page\n page = response.content\n\n # we use BeautifulSoup to parse(converting information into a format that's easier to work with) the html\n soup = BeautifulSoup(page, \"html.parser\")\n\n # in the parsed html all children of the parent article,because this is where all the information we need is\n urls_of_books = soup.find_all('article')\n\n # links are found in the a href\n book_links += [book_rel_url_to_book_abs_url(the_stuff.find('a')['href']) for the_stuff in urls_of_books]\n\n # check whether a next button exists\n if a := soup.select_one(\".next > a\"):\n category_link = remove_last_part_of_url(category_link) + \"/\" + a[\"href\"]\n else:\n break\n return book_links", "def scrape_one_book(self, url):\n\n if url in self.url_to_explore:\n self.url_to_explore.remove(url)\n req = requests.get(url, headers = self.headers).content\n soup = BeautifulSoup(req, 'html5lib')\n soupbody = soup.body\n\n book_data = {}\n # get book url\n book_url = url\n book_data[\"url\"] = book_url\n\n # get book title\n book_title = soupbody.find('h1', attrs={'id':'bookTitle'}).text.strip()\n if book_title:\n book_data[\"title\"] = book_title\n\n # # get book id\n reg = 'https://www.goodreads.com/book/show/([0-9]+)'\n book_id = re.search(reg, url).group(1)\n book_data[\"id\"] = book_id\n\n # get book ISBN\n book_databox = soupbody.find('div', attrs={'id':'bookDataBox'})\n if book_databox:\n all_float_divs = book_databox.find_all('div',\n attrs = {'class' : 'clearFloats'})\n book_isbn = ''\n for div in all_float_divs:\n title = div.find('div',\n attrs = {'class':'infoBoxRowTitle'}).text.strip()\n if title == 'ISBN':\n book_isbn = div.find('div',\n attrs = {'class':'infoBoxRowItem'}).contents[0].strip()\n book_data[\"ISBN\"] = book_isbn\n\n # get book author url and author name\n author_name_container = soupbody.find('div',\n attrs = {'class':\"authorName__container\"})\n if author_name_container:\n all_authors = author_name_container.find_all('a',\n href = True, attrs = {'class':\"authorName\"})\n cur_author_url = []\n cur_author_name = []\n for author in all_authors:\n cur_author_url.append(author['href'])\n name = author.find('span', attrs = {'itemprop':'name'}).text.strip()\n cur_author_name.append(name)\n book_data[\"authorURLs\"] = cur_author_url\n book_data[\"author_names\"] = cur_author_name\n\n # get book rating and review\n book_meta = soupbody.find('div', attrs = {'id':'bookMeta'})\n if book_meta:\n rating = book_meta.find('span',\n attrs = {'itemprop':'ratingValue'}).text.strip()\n book_data[\"rating\"] = rating\n\n book_rating_count_container = book_meta.find('meta',\n attrs = {'itemprop':'ratingCount'})\n if book_rating_count_container:\n book_rating_count = book_rating_count_container['content']\n book_data[\"rating_count\"] = book_rating_count\n\n book_review_count_container = book_meta.find('meta',\n attrs = {'itemprop':'reviewCount'})\n if book_review_count_container:\n book_review_count = book_review_count_container['content']\n book_data[\"review_count\"] = book_review_count\n\n # get book image\n image_tag = soupbody.find('img', attrs = {'id':'coverImage'})\n if image_tag:\n image_src = image_tag['src']\n book_data[\"bookImage\"] = image_src\n # print(authorLink.span.text)\n\n # get related_books\n related_works_container = soupbody.find('div', id=re.compile('relatedWorks-'))\n if related_works_container:\n related_books_div = related_works_container.find('div', class_='bigBoxBody')\n if related_books_div:\n related_books_carousel = related_books_div.find('div', class_='bookCarousel')\n if related_books_carousel:\n carousel_row = related_books_carousel.find('div', class_='carouselRow')\n if carousel_row:\n related_books_list_li = carousel_row.find('ul').find_all('li')\n related_books = []\n for item in related_books_list_li:\n link = item.find('a', href = True)['href']\n self.url_to_explore.add(link)\n related_books.append(link)\n book_data[\"similar_books\"] = related_books\n\n self.data_collection.push_to_collection(book_data)\n print(\"Book successfully scraped: \" + book_title)", "def read_book(url,book_num):\n\t#calls open_url function to open the url\n\tbook_contents = open_url(url)\n\tif book_contents != None:\n\t\t#calls filter data function to clean the data\n\t\tclean_data = filter_data(book_contents)\n\t\t#create dictionary for all the words in this book with 0's filling for count in all the books\n\t\tcreate_dict(clean_data)\n\t\treturn clean_data\n\telse:\n\t\treturn []", "def get_books(name, site):\n print name\n url = urlparse.urljoin(\"http://\"+site, name)\n if 'manga24' in site:\n try:\n print 'loading url: '+url\n page = urllib2.urlopen(url)\n html = page.read()\n try:\n parsedhtml = BeautifulSoup(html)\n ul = parsedhtml.findAll('ul', attrs={'class': 'all-chapters'})[0]\n atags = ul.findAll('a')\n links = [\"http://manga24.ru/\"+x['href'] for x in atags]\n return links\n except:\n try:\n parsedhtml = BeautifulSoup(html)\n a = parsedhtml.findAll('a', attrs={'class': 'button'})[0]\n atags = (a,)\n links = [\"http://manga24.ru/\"+x['href'] for x in atags]\n return links\n except:\n print \"can't parse this shit\"\n sys.exit()\n except Exception:\n print \"Wrong name... Or problem with internet connection.. or something\"\n sys.exit()\n elif 'adultmanga.ru' in site:\n try:\n page = urllib2.urlopen(url)\n print \"opening url: \" + url\n html = page.read()\n try:\n parsedhtml = BeautifulSoup(html)\n div = parsedhtml.findAll('div', attrs={'class': 'subject-actions'})[0]\n atag = div.findAll('a')[-1]\n initial_page = atag['href']\n\n url = urlparse.urljoin(\"http://\"+site, initial_page)\n print 'loading url: '+url\n page = urllib2.urlopen(url)\n html = page.read()\n try:\n parsedhtml = BeautifulSoup(html)\n sel = parsedhtml.findAll('select', attrs={'id': 'chapterSelectorSelect'})[0]\n ops = sel.findAll('option')\n links = [urlparse.urljoin(\"http://\"+site, x['value']) for x in ops]\n return links\n except:\n print \"can't find bookslist\"\n sys.exit()\n except:\n print \"Can't find link\"\n sys.exit()\n except Exception:\n print \"Wrong name... Or problem with internet connection.. or something\"\n sys.exit()\n else:\n print \"Can't work with this site\"\n sys.exit()", "def srap_books(url: str, rows: list):\n url_book = get_all_page(url)\n get_all_book(url_book, rows)\n # loop to get the next pages\n while True:\n soup = get_data(url)\n url = get_next_page(soup, url)\n if not url:\n break\n else:\n url_book = get_all_page(url)\n get_all_book(url_book, rows)\n del url_book[:]", "def get_urls(num):\n url = \"https://books.google.at/books?id=77cdBQAAQBAJ&lpg=PP1&dq=%E5%82%85%E4%BD%A9%E6%A6%AE&pg=PA{}&jscmd=click3&vq=%E5%82%85%E4%BD%A9%E6%A6%AE\".format(num)\n res = requests.get(url)\n res_text = json.loads(res.text)\n pages = res_text[\"page\"]\n\n result = {}\n for p in pages:\n if 'src' in p:\n page_num = p['pid']\n page_src = p['src'] \n result[page_num] = page_src\n return result", "def getURLs(modelURL):\n\n #Get model page as soup\n soup, _ = getPage(modelURL)\n\n #Check if page available\n if soup is None:\n #Not available - Break\n print(\"Can't find Model URL\")\n quit()\n \n #Get URLs on first page\n urlList = listingURLs(soup)\n\n #Find last page number if available\n try:\n lastPageURL = soup.find(class_=\"page-number-navigation__link page-number-navigation__link-last link link--base-color-primary link--hover-color-none link--no-underline\")['href']\n lastPage = int(re.search('page-(\\d+)', lastPageURL).group(1))\n except:\n #No Last page button - Only one page of results\n lastPage = None\n\n #Loop for all pages if available\n if lastPage is not None:\n for i in range(2, lastPage + 1):\n #Create Page URL\n urlParts = modelURL.split(\"/\")\n urlParts = urlParts[:-1] + [f\"page-{i}\"] + urlParts[-1:]\n pageURL = \"/\".join(urlParts)\n #Get Page\n soup, _ = getPage(pageURL)\n #Check if page available\n if soup is None:\n #Not available, skip iteration\n continue\n #Get Pages URLs\n urlList += listingURLs(soup)\n\n return urlList", "def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book", "def callback_from_url(self, url):\n if re.search(\"https?://mebook.cc/page/.*\", url):\n return self.parse_list_page\n\n if re.search(\"https?://mebook.cc/date/.*\", url):\n return self.parse_archive_page\n\n if re.search(\"https?://mebook.cc/category/.*$\", url):\n return self.parse_category_page\n\n if re.search(\"https?://mebook.cc/[^/]+.html$\", url):\n return self.parse_book_page\n\n if re.search(\"https?://mebook.cc/download.php?id=.*$\", url):\n return self.parse_download_page", "def put_books_info_in_csv(url):\n books_urls = get_books_url(url)\n for url in books_urls:\n put_book_info_in_csv(url)\n download_book_image(url)", "def parse_list(el):\n el = pq(el)\n name = strip_tags(el.children(\".title a\").html())\n phone = strip_tags(el.children(\".phone\").html())\n email = strip_tags(el.children(\".email a\").attr(\"href\"))\n if email:\n email = email.replace(\"mailto:\", \"\")\n source_url = el.children(\".title a\").attr(\"href\")\n\n data = {\n 'name': name,\n 'source_url': 'http://www.guidestockholm.com%s' % source_url,\n 'email': email,\n 'phone': phone,\n }\n scraperwiki.sqlite.save(unique_keys=['source_url'], data=data, table_name=\"guidestockholm\")", "def parse_list(el):\n el = pq(el)\n name = strip_tags(el.children(\".title a\").html())\n phone = strip_tags(el.children(\".phone\").html())\n email = strip_tags(el.children(\".email a\").attr(\"href\"))\n if email:\n email = email.replace(\"mailto:\", \"\")\n source_url = el.children(\".title a\").attr(\"href\")\n\n data = {\n 'name': name,\n 'source_url': 'http://www.guidestockholm.com%s' % source_url,\n 'email': email,\n 'phone': phone,\n }\n scraperwiki.sqlite.save(unique_keys=['source_url'], data=data, table_name=\"guidestockholm\")", "def get_book_id_from_input_url(self):\n\t #(https:\\/\\/www.goodreads.com\\/book\\/show\\/|http:\\/\\/www.goodreads.com\\/book\\/show\\/)\n # http://www.goodreads.com/book/show/12177850-a-song-of-ice-and-fire\n\n n = 0\n if self.input_url[0:36] == \"https://www.goodreads.com/book/show/\":\n n = 36\n\n remaining_url = self.input_url[n:]\n word_list = re.split('[.-]' ,remaining_url)\n\n if word_list[0].isnumeric():\n self.book_id = word_list[0]\n else:\n # raise the exception when book id is not found in the URL\n raise myException(\"InvalidGoodreadsURL\")\n\n elif self.input_url[0:35] == \"http://www.goodreads.com/book/show/\":\n n=35\n\n remaining_url = self.input_url[n:]\n word_list = re.split('[.-]' ,remaining_url)\n\n if word_list[0].isnumeric():\n self.book_id = word_list[0]\n else:\n # raise the exception when book id is not found in the URL\n raise myException(\"InvalidGoodreadsURL\")\n else:\n # raise exception when the first half of the URL does not match the req\n raise myException(\"InvalidGoodreadsURL\")", "def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)", "def get_all_book(url_book: list, rows: list):\n # loop from book url\n for i in range(len(url_book)):\n book = get_book(url_book[i])\n # write each result to rows\n rows.append(book)", "def get_book_infos(url):\n response = requests.get(url)\n if response.status_code == 200:\n # We get the link without the \\..\n link = response.url\n soup = BeautifulSoup(response.content, 'html.parser')\n search_img = soup.find('div', {\"class\": \"item active\"}).find('img')[\"src\"]\n image_link = requests.get(f\"http://books.toscrape.com/{search_img}\").url\n # Product info are in balise tr\n trs = soup.findAll('tr')\n # Stocking the info in a dictionnary\n dict_tr = {}\n for tr in trs:\n th = tr.find('th').text\n td = tr.find('td').text\n dict_tr[th] = td\n # All the informations of the book that we need\n return {'product_page_url': link,\n 'universal_ product_code (upc)': dict_tr['UPC'],\n 'title': soup.find('h1').text,\n 'price_including_tax': dict_tr['Price (incl. tax)'],\n 'price_excluding_tax': dict_tr['Price (excl. tax)'],\n 'number_available': dict_tr['Availability'],\n 'product_description': soup.findAll('meta')[2][\"content\"],\n 'category': soup.findAll('li')[2].find('a').text,\n 'review_rating': soup.findAll('p')[2][\"class\"][1],\n 'image_url': image_link}", "def parse_apartment_urls(self):\n\n # Generate soup for starting page\n soup = generate_soup(self.start_url)\n\n # Empties the urls list, in case it wasn't before\n self.apartment_urls = []\n\n # Get apartments in current page and store\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls\n\n # Check if there are more page to pull from\n while self.list_has_next_page(soup):\n soup = self.list_get_next_page(soup)\n\n # Get apartments in current page\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls", "def add_books_details(self, books_dict_with_href):\n for i, book_dict in enumerate(books_dict_with_href):\n print(f'Getting more details for book {i + 1}')\n book_content = self._get_book_content(book_dict['book_page_href'])\n self._add_book_details(book_dict, book_content)", "def scrap_book_info(book_url):\n response = requests.get(book_url)\n page = response.content\n soup = BeautifulSoup(page, \"html.parser\")\n\n return {\n \"product_page_url\": book_url,\n \"upc\": soup.select_one(\"table tr:nth-child(1) > td\").text,\n \"title\": soup.select_one(\"article div.col-sm-6.product_main > h1\").text,\n \"price_including_tax\": soup.select_one(\"table tr:nth-child(4) > td\").text,\n \"price_excluding_tax\": soup.select_one(\"table tr:nth-child(3) > td\").text,\n \"number_available\": number_only(soup.select_one(\"#content_inner > article > table tr:nth-child(6) > td\").text),\n \"product_description\": soup.select_one(\"article > p\").text,\n \"category\": soup.select_one(\"#default > div > div > ul > li:nth-child(3) > a\").text,\n \"review_rating\": word_to_number(soup.select_one(\".star-rating\")[\"class\"][1]),\n \"image_url\": remove_suffix(soup.select_one(\"#product_gallery img\")[\"src\"]),\n }", "def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list", "def get_course_page_urls(self,soup):\n\t\tcourse_links =[]\n\t\troot_url = 'http://onlinelearning.cornell.edu'\n\t\tfor link in soup.select('span.field-content a[href]'):\n\t\t\tnew_url = root_url + link['href']\n\t\t\tcourse_links.append(new_url)\n\t\t\tcourse_links.append(' \\n')\n\t\t\n\t\tself.new_list.append(course_links)\n\t\treturn course_links", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def get_book_details(self):\n\n try:\n # gives response for the request from the API url\n response = requests.get(self.book_url)\n\n \n # using ElementTree to store the response content in a tree\n root = ET.fromstring(response.content)\n book = root.find('book')\n\n # getting the required details\n self.book_details[\"title\"] = book.find('title').text\n self.book_details[\"average_rating\"] = book.find('average_rating').text\n self.book_details[\"ratings_count\"] = book.find('ratings_count').text\n self.book_details[\"num_pages\"] = book.find('num_pages').text\n self.book_details[\"image_url\"] = book.find('image_url').text\n self.book_details[\"publication_year\"] = book.find('publication_year').text\n\n # getting list of all the authors\n authors = book.find('authors')\n if authors:\n author_names_list = []\n for author in authors.iter('author'):\n author_names_list.append(author.find('name').text)\n author_names_sentence = \", \".join(author_names_list)\n self.book_details[\"authors\"] = author_names_sentence\n except:\n raise Exception(\"invalid XML response\")", "def _parse_links(self, item) -> list:\n # TODO This would be a \"nice to have\" but is not necessary right now.\n return [{\"href\": \"\", \"title\": \"\"}]", "def get_beers_list(self, location_url: str):\n\n data = helpers.beautiful_url(url=location_url, \n cookies=self.cookies, \n javascript=self.javascript) \n\n if self.single_page: \n if self.beer_parent_tags:\n tag, attribute = self.beer_parent_tags\n data = data.find(tag, attribute)\n\n try:\n tag, attribute = self.beers_html_tags\n self.beers = data.find_all(tag, attribute)\n except:\n self.beers = data.find_all\n else: # get a list of all the beer urls\n print(\"multiPage\")\n tag, attribute = self.beer_multi_page_tags\n self.beers = [url['href'] for url in data.find_all(tag, attribute, href=True)]", "def init_urls(self):\n url = 'http://www.lagou.com/'\n for ip_info in self.col.find(no_cursor_timeout=True):\n ip, port = ip_info['ip'], ip_info['port']\n if ip and port:\n self.urls.append((url, ip, port)) # tuple", "def _parse_past_documents(self, item):\n doc_list = []\n for doc in item.css('a'):\n doc_list.append({\n 'url': 'http://{}{}'.format(self.allowed_domains[0], doc.attrib['href']),\n 'note': doc.css('*::text').extract_first(),\n })\n return doc_list", "def open_url(url):\n\tglobal books\n\tglobal count_books\n\tglobal titles\n\t#global word_count\n\ttry:\n\t\t#open url\n\t\tresponse = re.urlopen(url)\n\t\t#get data\n\t\tcontent = response.read().decode('utf8')\n\t\t#close connection\n\t\tresponse.close()\n\t\t\n\texcept(er.URLError):\n\t\t#if url is not functional\n\t\tcontent = \"\"\n\t\tprint(\"The URL is not functional : \",url)\n\t\treturn None\n\t\t# #remove the url from the books dictionary\n\t\t# for key,val in books.items():\n\t\t# \tif val == url:\n\t\t# \t\tdel books[key]\n\t\t# \t\t#pop the last\n\t\t# \t\ttitles.pop()\n\t\t# \t\tbreak\n\t\t# #update count for number of books\n\t\t# count_books = len(books)\n\t\t# return\n\treturn content", "def get_book_info(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books_info = []\n for book in json_data['docs']:\n info = {'title': book['title']}\n if 'publisher' in book:\n info.update({'publisher': book['publisher']})\n if 'publish_year' in book:\n info.update({'publish_year': book['publish_year']})\n if 'language' in book:\n info.update({'language': book['language']})\n books_info.append(info)\n return books_info" ]
[ "0.6903227", "0.64504856", "0.63376325", "0.6297125", "0.6132663", "0.6110711", "0.6098223", "0.607932", "0.605915", "0.6037943", "0.60282815", "0.5987423", "0.5987423", "0.59660596", "0.5940519", "0.59073544", "0.5829715", "0.58157396", "0.57694906", "0.57678217", "0.571373", "0.570089", "0.5596046", "0.5584765", "0.5575388", "0.5570403", "0.5568707", "0.5566422", "0.555927", "0.55182743" ]
0.6719539
1
Post event to all rulesets
def all_events_request(): result = [] message = json.loads(request.stream.read().decode('utf-8')) for ruleset_name in host.list_rulesets(): result.append(host.post(ruleset_name, message)) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)", "def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, match)))\n if rule.quick:\n break", "def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)", "def apply_ruleset(self, ruleset):\n updates = [self._get_lexicon_update(ruleset['lexicon'])]\n updates += ruleset['rules']\n self.apply_updates(updates)", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def post_process(self, relevant_targets):\r\n pass", "def rulesetsRefreshed(self):\n self.remoteBots.allowSync = True\n self.remoteBots.syncRequests()", "def _addrule(self, nonterm, program, params, info):\n rule = Rule(nonterm, program, params, info)\n\n if not nonterm in self.rules:\n self.rules[nonterm] = []\n \n self.rules[nonterm].append(rule)", "def add_rule(self, rule) -> None:\n self.add_rules([rule])", "def check_rules(self, event):\n logger.debug(\"Checking rules\")\n # Iterate through rules and try to apply them\n for rule in RippleConfig().rules[:]:\n event_type = event['type']\n if self.match_condition(event_type, rule):\n # Currently putting in pathname as key, need to\n # think of a better way to handle \"other\" information\n send_event = {'event': {\n 'type': event_type,\n 'size': event['bytes'],\n 'key': event['key'],\n 'pathname': event['key'],\n 'path': event['key'],\n 'name': event['key'],\n 'shmid': event['shmid'],\n 'perms': event['perms'],\n 'owner': event['owner'],\n 'status': event['status'],\n 'uuid': str(uuid.uuid4()),\n 'hash': 'hashvalue'\n }\n }\n print (\"Sending event: %s\" % send_event)\n send_event.update(rule)\n\n # Now push it down the queue\n message = json.dumps(send_event)\n RippleConfig().queue.put(message)\n logger.debug(\"Sent data to queue\")\n\n return None", "def add_rule(self, rule):\n \n self.rules.append(rule)", "def add_rule(self, rule):\n self.rule.append(rule)", "def test_post_entry_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def add_rule(self, rule: Rule):\n self.rules.append(rule)", "def add_rule_objects ( self, rules ):\n pool = self.get_pool()\n # pool validates the rules' type\n for deprule in rules:\n pool.add ( deprule )\n return True", "def on_set_rule(self) -> None:\r\n\r\n self.stop_animation()\r\n self.master.focus() # Move the cursor away from the rule entry\r\n rule_text = str(self.rule_entry.get())\r\n\r\n if not self.rule.try_set_rule(rule_text):\r\n messagebox.showinfo(message = self.INVALID_RULE_MESSAGE)\r\n return\r\n\r\n self.rule_name.configure(text = rule_text)\r\n\r\n self.board.birth_rule = self.rule.birth_rule\r\n self.board.remain_rule = self.rule.remain_rule\r\n self.anim_board.birth_rule = self.rule.birth_rule\r\n self.anim_board.remain_rule = self.rule.remain_rule", "def postProcess(self):\n flag = False;tt = [];\n # step 1: check that event is referred by Task \n for task in GetOsekObjects('TASK'):\n for attr in task.getAttribute('EVENT'):\n if(self.name == attr.value):\n flag = True\n tt.append(task) # Task has this EVENT\n if(flag == False):\n print 'WARNING: %s has not been referred by any task.'%(self.name)\n elif(len(tt) > 1):\n print 'ERROR: EVENT <%s> is not allowed to be defined for %s, %s ...'%(self.name, tt[0].name, tt[1].name)\n sys.exit(-1)\n else:\n self.addAttribute('TASK', tt[0].name)", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def _post_hooks(self):", "def on_matching_rules(self, matching_rules):\n pass", "def add_rules(self, rules):\n self.name.append(rules)", "def setup_rule(self, client, *args, **keyword_args):\n pass", "def setup_rule(self, client, *args, **keyword_args):\n pass", "def events(self):", "def process_post_events(self):\n self.portfolio_handler.to_database_portfolio()", "def __call__(self, config):\n # loop over the rules sorted according to their dependencies and\n # apply them\n for rule in networkx.topological_sort(self.graph):\n value = rule.apply(config)\n if value is not None:\n set_from_path(config, rule.name, value)", "def setup_rule(self, client):\n pass", "def setup_rule(self, client):\n pass", "def register_rule(cls, rule_func):\n cls._rules_factories.append(rule_func)" ]
[ "0.5963852", "0.58948195", "0.5618622", "0.55938214", "0.5427861", "0.5391614", "0.53427666", "0.5329339", "0.53197384", "0.531575", "0.53121924", "0.530597", "0.5285532", "0.52750355", "0.52552474", "0.5246069", "0.5223859", "0.52142763", "0.51979613", "0.51774013", "0.51750314", "0.51551324", "0.51272774", "0.51272774", "0.509937", "0.5087345", "0.508426", "0.50796455", "0.50796455", "0.50528675" ]
0.59976584
0
Set ruleset state sid
def set_state_sid_request(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.patch_state(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sid(self, sid):\n self._sid = sid", "def set_state(self,s):\n self.state = s", "def set_state(self, state: int):", "def __setstate__(self, state):\n\n self.set(DER = state)", "def set_rule(self, rule):\n self.rule.load_state_dict(rule, strict=True)", "def _set_state(self, state):\n #print(\"** set state from %d to %d\" % (self.state, state))\n self.state = state", "def set_state( self ):", "def set_ident(self, new_ident: int):\n if not isinstance(new_ident, int):\n raise TypeError(\"Spectrum set identifiers may ONLY be positive integers\")\n self._set_ident = new_ident", "def set_state(self, state):\n #print(\"ComponentBase.set_state\")\n for k,v in state.items():\n #print(\" Set {:14s} to {:s}\".format(k,str(v)))\n if k == \"connectors\":\n for con_state in v:\n self.add_connector() \n self.connectors[-1].set_state(con_state)\n else:\n setattr(self, k, v)", "def __setstate__(self, state):\n\n self.list = state", "def __setstate__(self, state):\n\n self.list = state", "def state_id(self, state_id):\n\n self._state_id = state_id", "def set_state(self, state: int):\n self.state = state", "def set_domain_sid(self, sid):\n dsdb._samdb_set_domain_sid(self, sid)", "def set_state(self,state):\n self.__state = state", "def set_srid(self, srid: ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoSetSRID(self, srid=srid).to_expr()", "def set_state(self, state):\n self.state = state", "def entry_mode_set(self, id=True, s=False):\n self.instruction([False, False, False, False, False, True, bool(id), bool(s)])", "def setIdentity(self) -> None:\n ...", "def __setstate__(self, state):\n return None", "def set_state(self, state):\n self.history = state", "def set_state(self, state):\n for v in self._variables:\n self._state[self._mapping[v]] = state[v]\n if any(v not in {1, -1} for v in self._state):\n raise ValueError(\"State must contain only 1's and -1's\")", "def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id", "def setState(self, uistate):\n if isinstance(uistate, list):\n state = set(uistate)\n elif isinstance(uistate, int):\n state = set([uistate])\n else:\n raise TypeError, \"Argument must be int or list\"\n\n if len(state & self._constrains) > 0:\n self._state = state\n else:\n raise KeyError, \"Attemptinng to set an unknown state\"\n\n self.stateChanged.emit(state)", "def set_state(self, state):\n _modeller.mod_state_optimizer_state_set(self._modpt, self.__edat.modpt,\n state)", "def __setstate__(self, state):\n self.__dict__.update(state)\n for y in ['strains', 'alleles', 'base_cobra_model']:\n for x in getattr(self, y):\n x._model = self\n if not hasattr(self, \"name\"):\n self.name = None", "def set_state(self, state=0):\r\n return self._arm.set_state(state=state)", "def psid(self, psid):\n\n self._psid = psid", "def set_state(self, i, state):\n self.states[i] = state\n self.sanity_check()\n print self", "def rule_id(self, rule_id):\n\n self._rule_id = rule_id" ]
[ "0.6317392", "0.6268615", "0.62445796", "0.60649145", "0.58590347", "0.5837428", "0.580806", "0.58021194", "0.57980675", "0.5752198", "0.5752198", "0.5744414", "0.57234263", "0.5718662", "0.5679742", "0.5645187", "0.5636659", "0.5628161", "0.5618529", "0.5560293", "0.5513871", "0.55079603", "0.5506261", "0.5502357", "0.54984635", "0.5496551", "0.5493484", "0.5492342", "0.5487816", "0.5481329" ]
0.74748975
0
Get ruleset state sid
def get_state_sid_request(ruleset_name, sid): result = host.get_state(ruleset_name, sid) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_id(self):\n return self._state_id", "def get_rule_id(self):\n from .osid_errors import IllegalState\n # Someday I'll have a real implementation, but for now I just:\n raise IllegalState()", "def sid(self):\n return self._sid", "def sid(self):\n return self.data[''].sid", "def get_sid(self):\n resdat = self.req().read() #phew, that was easy :)\n print resdat\n resdat = self.parse_response(resdat)\n if (resdat[0][1][0] != \"c\"):\n return None\n sid = resdat[0][1][1]\n return sid", "def get_state(self):\n return STAR_STATES[self.state][0]", "def get_state(self):\n return STAR_STATES[self.state][0]", "def get_rule(self):\n return self.rule.state_dict()", "def SRID():\r\n return SurveyPointMixin._SRID", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.patch_state(ruleset_name, message)\n return jsonify(result)", "def get(self, sid: typing.Union[uuid.UUID, int]) -> bytes:\n if not self.tag.training:\n return bytes()\n if isinstance(sid, int):\n sid = self.tag.states[sid]\n if sid not in self.tag.states:\n raise Level.Invalid(f'Unknown state reference for {self}: {sid}')\n LOGGER.debug('%s: Getting state %s', self, sid)\n return STATES(self.registry, self.project.key, self.lineage.key, self.key, sid)", "def get_rule(self):\n from .osid_errors import IllegalState\n # Someday I'll have a real implementation, but for now I just:\n raise IllegalState()", "def getId(self):\n return _libsbml.Rule_getId(self)", "def config_rule_state(self) -> str:\n return pulumi.get(self, \"config_rule_state\")", "def state(self):\n\n return self.solenoid.get()", "def get_sid(self, seq_name):\n\t\tsid = self.setdefault(seq_name, len(self) + 1)\n\t\t# debug\n\t\t# assert isinstance(sid, int), sid\n\t\treturn sid", "def rule_id(self) -> str:\n return pulumi.get(self, \"rule_id\")", "def sid(self):\n return gdef.PSID(ctypes.addressof(self) + self._sid_offset())", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def state_pk(self):", "def sesid(self, ld8):\n return self.ses_lookup.get(ld8, '')", "def slb_id(self) -> str:\n return pulumi.get(self, \"slb_id\")", "def get_state(self, state):\n return state", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def srid(self) -> ir.IntegerValue:\n return ops.GeoSRID(self).to_expr()", "def get_engine_sid(self):\n return Sid(path=self.engine.get_file_path())", "def sosid(self):\r\n return self.word2idx.get(SOS, 0)", "def sid(self):\n return self.tags.get('AccessionNumber')", "def get_snpid(self):\n return self._snpid" ]
[ "0.6522904", "0.6476181", "0.6398606", "0.6354551", "0.60375524", "0.60295653", "0.60295653", "0.59568083", "0.5888084", "0.58808523", "0.58517295", "0.58414584", "0.58183634", "0.5815065", "0.57778585", "0.5670262", "0.5668822", "0.56557137", "0.56524223", "0.56524223", "0.56215817", "0.56045324", "0.5599762", "0.5588527", "0.55228615", "0.54955745", "0.5477104", "0.5462872", "0.54527783", "0.54512733" ]
0.6936951
0
Post events to the ruleset
def post_events(ruleset_name): message = json.loads(request.stream.read().decode('utf-8')) result = host.post(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)", "def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, match)))\n if rule.quick:\n break", "def events(self):", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def post(self, request):\n return self.serviceHandler.addEvent(request.data)", "def all_events_request():\n result = []\n message = json.loads(request.stream.read().decode('utf-8'))\n for ruleset_name in host.list_rulesets():\n result.append(host.post(ruleset_name, message))\n return jsonify(result)", "def postProcess(self):\n flag = False;tt = [];\n # step 1: check that event is referred by Task \n for task in GetOsekObjects('TASK'):\n for attr in task.getAttribute('EVENT'):\n if(self.name == attr.value):\n flag = True\n tt.append(task) # Task has this EVENT\n if(flag == False):\n print 'WARNING: %s has not been referred by any task.'%(self.name)\n elif(len(tt) > 1):\n print 'ERROR: EVENT <%s> is not allowed to be defined for %s, %s ...'%(self.name, tt[0].name, tt[1].name)\n sys.exit(-1)\n else:\n self.addAttribute('TASK', tt[0].name)", "def post_event(self, event):\r\n from evennia.scripts.models import ScriptDB\r\n\r\n if event.public_event:\r\n event_manager = ScriptDB.objects.get(db_key=\"Event Manager\")\r\n event_manager.post_event(event, self.owner.player, event.display())", "def test_post_add_log_event(self):\n pass", "def check_rules(self, event):\n logger.debug(\"Checking rules\")\n # Iterate through rules and try to apply them\n for rule in RippleConfig().rules[:]:\n event_type = event['type']\n if self.match_condition(event_type, rule):\n # Currently putting in pathname as key, need to\n # think of a better way to handle \"other\" information\n send_event = {'event': {\n 'type': event_type,\n 'size': event['bytes'],\n 'key': event['key'],\n 'pathname': event['key'],\n 'path': event['key'],\n 'name': event['key'],\n 'shmid': event['shmid'],\n 'perms': event['perms'],\n 'owner': event['owner'],\n 'status': event['status'],\n 'uuid': str(uuid.uuid4()),\n 'hash': 'hashvalue'\n }\n }\n print (\"Sending event: %s\" % send_event)\n send_event.update(rule)\n\n # Now push it down the queue\n message = json.dumps(send_event)\n RippleConfig().queue.put(message)\n logger.debug(\"Sent data to queue\")\n\n return None", "def visit_event(self, event):", "def handleEvents(self, events):\n pass", "def process_event(self, event):\r\n pass", "def post(self):\n\n # we need a unique tx number so we can look these back up again\n # as well as for logging\n # FIXME: how can we guarantee uniqueness here?\n tx = int(time.time() * 100000) + random.randrange(10000, 99999)\n\n log.info(\"EVENTS [{}]: Creating events\".format(tx))\n\n try:\n user = self.jbody[\"user\"]\n if not EMAIL_REGEX.match(user):\n user += \"@\" + self.domain\n event_type_id = self.jbody.get(\"eventTypeId\", None)\n category = self.jbody.get(\"category\", None)\n state = self.jbody.get(\"state\", None)\n note = self.jbody.get(\"note\", None)\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n if not event_type_id and (not category and not state):\n raise exc.BadRequest(\n \"Must specify an event type id or both category and state\"\n )\n\n if event_type_id:\n event_type = self.session.query(EventType).get(event_type_id)\n else:\n event_type = self.session.query(EventType).filter(\n and_(\n EventType.category == category,\n EventType.state == state\n )\n ).one()\n\n if event_type is None:\n self.write_error(400, message=\"Bad event type\")\n return\n\n category = event_type.category\n state = event_type.state\n\n hostnames = (\n [self.jbody.get(\"hostname\", None)]\n if self.jbody.get(\"hostname\", None) else []\n )\n\n if \"hostnames\" in self.jbody:\n hostnames.extend(self.jbody.get(\"hostnames\"))\n\n log.info(\n \"EVENTS [{}]: Will create event {} {}\".format(\n tx, category, state\n )\n )\n\n log.info(\n \"EVENTS [{}]: Hostnames specified: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a host query was specified, we need to talk to the external\n # query server to resolve this into a list of hostnames\n if \"hostQuery\" in self.jbody:\n query = self.jbody[\"hostQuery\"]\n log.info(\"EVENTS [{}]: Running query {}\".format(tx, query))\n response = PluginHelper.request_get(params={\"query\": query})\n if response.json()[\"status\"] == \"ok\":\n hostnames.extend(response.json()[\"results\"])\n log.info(\n \"EVENTS [{}]: Hostnames after query: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a quest Id was given, look up the labors in that quest and\n # get all the hostnames for those labors.\n if \"questId\" in self.jbody:\n log.info(\"EVENTS [{}]: Looking up quest {}\".format(\n tx, self.jbody[\"questId\"])\n )\n quest = self.session.query(Quest).filter_by(\n id=self.jbody[\"questId\"]\n ).scalar()\n if not quest:\n raise exc.NotFound(\"No such Quest {} found\".format(id))\n for labor in quest.labors:\n hostnames.append(labor.host.hostname)\n\n log.info(\n \"EVENTS [{}]: Hostnames after quest expansion: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # We need to create a list of hostnames that don't have a Host record\n new_hosts_needed = set(hostnames)\n hosts = (\n self.session.query(Host).filter(Host.hostname.in_(hostnames)).all()\n )\n\n for host in hosts:\n new_hosts_needed.remove(str(host.hostname))\n\n # if we need to create hosts, do them all at once\n if new_hosts_needed:\n log.info(\"EVENTS [{}]: Creating hosts {}\".format(\n tx, \", \".join(new_hosts_needed)\n ))\n Host.create_many(self.session, new_hosts_needed)\n hosts = (\n self.session.query(Host).filter(\n Host.hostname.in_(hostnames)\n ).all()\n )\n\n if not hosts:\n raise exc.BadRequest(\"No hosts found with given list\")\n\n try:\n if len(hosts) > 1:\n # if we are supposed to create many events,\n # we want to do them as a giant batch\n log.info(\"EVENTS [{}]: Creating multiple events\".format(tx))\n events_to_create = []\n for host in hosts:\n events_to_create.append({\n \"host_id\": host.id,\n \"user\": user,\n \"event_type_id\": event_type.id,\n \"note\": note,\n \"tx\": tx\n })\n Event.create_many(self.session, events_to_create, tx)\n else:\n # if we are just creating one event, do it the simple way\n log.info(\"EVENTS [{}]: Creating 1 event\".format(tx))\n event = Event.create(\n self.session, hosts[0], user, event_type, note=note\n )\n\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"EVENTS [{}]: Flushing and committing\".format(tx))\n self.session.flush()\n log.info(\"EVENTS [{}]: Flushed\".format(tx))\n self.session.commit()\n log.info(\"EVENTS [{}]: Committed\".format(tx))\n\n if len(hosts) == 1:\n json = event.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/events/{}\".format(event.id)\n self.created(\n \"/api/v1/events/{}\".format(event.id), json\n )\n else:\n # if we created many events, we need to look them up by the TX\n # number to figure out what they were since the were created in bulk\n created_events = self.session.query(Event).filter(Event.tx == tx).all()\n self.created(\n data={\n \"events\": (\n [event.to_dict(self.href_prefix) for event in created_events]\n ),\n \"totalEvents\": len(created_events)\n }\n )\n\n log.info(\"EVENTS [{}]: Created event {} {} for {}\".format(\n tx, category, state,\n \", \".join(hostnames)\n ))", "def process(self, event):\n pass", "def post( self, event ):\n event.timeFired = self.eventTimer.getTime()\n \n if self.debugger.SYMBOLS_ENABLED:\n self.debugger.logMsg(event)\n ##SOME LISTENERS SHOULD START THEIR OWN THREADS (eventually)\n \n for listener in self.eventTypesToListeners.get(type(event),[]):\n\t\t\tlistener.notify(event)\n #NOTE: If the weakref has died, it will be \n #automatically removed, so we don't have \n #to worry about it.", "def test_post_event_on_schedule_page(self):\n json_data = {\n 'title': 'Test Event',\n 'start': '2017-8-8T12:00:00',\n 'end': '2017-8-8T12:00:00',\n 'group': '3'\n }\n\n response = self.app.post(\"/saveEvent\", data=json.dumps(json_data),\n content_type='application/json')\n self.assertTrue(response.status_code, 200)", "def post_event(self, func, *args, **kwargs):\n if not callable(func):\n assert(len(func) == 5)\n self._events.append(func + (log.get_tb(1), time.time()))\n else:\n self._events.append((func, args, kwargs, None, 0, log.get_tb(), time.time()))", "def send(self, events, validation_hit=False, postpone=False, date=None):\n\n # check for any missing or invalid parameters among automatically collected and recommended event types\n self._check_params(events)\n self._check_date_not_in_future(date)\n\n if postpone is True:\n # build event list to send later\n for event in events:\n event[\"_timestamp_micros\"] = self._get_timestamp(time.time())\n self._event_list.append(event)\n else:\n # batch events into sets of 25 events, the maximum allowed.\n batched_event_list = [\n events[event : event + 25] for event in range(0, len(events), 25)\n ]\n # send http post request\n self._http_post(\n batched_event_list, validation_hit=validation_hit, date=date\n )", "def post_activities():\n pass", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def process_post_events(self):\n self.portfolio_handler.to_database_portfolio()", "def register_to_event(request):\n pass", "def post(self):\n # se captura y se parsea a json el body del request recibido por el\n # webhook\n request_body = json.loads(self.request.body)\n\n for body in request_body:\n \"\"\" Evaluar el tipo de evento ya que trae campos diferentes \"\"\"\n logging.info(request_body)\n\n event = str(body['event'])\n correo = str(body['email'])\n numero_folio = str(body['numero_folio'])\n tipo_dte = str(body['tipo_dte'])\n\n logging.info(event)\n\n if event and correo and numero_folio and tipo_dte:\n\n if event == 'processed':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.processed_event = event\n email_model.processed_sg_event_id = body['sg_event_id']\n email_model.processed_sg_message_id = body['sg_message_id']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.processed_event = event\n e.processed_sg_event_id = body['sg_event_id']\n e.processed_sg_message_id = body['sg_message_id']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'delivered':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.delivered_event = event\n email_model.delivered_sg_event_id = body['sg_event_id']\n email_model.delivered_sg_message_id = body['sg_message_id']\n email_model.delivered_response = body['response']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.delivered_event = event\n e.delivered_sg_event_id = body['sg_event_id']\n e.delivered_sg_message_id = body['sg_message_id']\n e.delivered_response = body['response']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'open':\n model = EmailModel()\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n if email_model.opened_first_date == None:\n email_model.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_event = event\n email_model.opened_ip = body['ip']\n email_model.opened_user_agent = body['useragent']\n email_model.opened_sg_event_id = body['sg_event_id']\n email_model.opened_sg_message_id = body['sg_message_id']\n model.email_add_count(email_model)\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n if e.opened_first_date == None:\n e.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_event = event\n e.opened_ip = body['ip']\n e.opened_user_agent = body['useragent']\n e.opened_sg_event_id = body['sg_event_id']\n e.opened_sg_message_id = body['sg_message_id']\n e.email_add_count(e)\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'dropped':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.dropped_sg_event_id = body['sg_event_id']\n email_model.dropped_sg_message_id = body['sg_message_id']\n email_model.dropped_reason = body['reason']\n email_model.dropped_event = event\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.dropped_sg_event_id = body['sg_event_id']\n e.dropped_sg_message_id = body['sg_message_id']\n e.dropped_reason = body['reason']\n e.dropped_event = event\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'bounce':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.bounce_event = event\n email_model.bounce_sg_event_id = body['sg_event_id']\n email_model.bounce_sg_message_id = body['sg_message_id']\n email_model.bounce_reason = body['reason']\n email_model.bounce_status = body['status']\n email_model.bounce_type = body['type']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.bounce_event = event\n e.bounce_sg_event_id = body['sg_event_id']\n e.bounce_sg_message_id = body['sg_message_id']\n e.bounce_reason = str(body['reason']).decode(\"utf-8\")\n e.bounce_status = body['status']\n e.bounce_type = body['type']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'unsubscribe':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.unsubscribe_uid = body['uid']\n email_model.unsubscribe_purchase = body['purchase']\n email_model.unsubscribe_id = body['id']\n email_model.unsubscribe_event = body['event']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.unsubscribe_uid = body['uid']\n e.unsubscribe_purchase = body['purchase']\n e.unsubscribe_id = body['id']\n e.unsubscribe_event = body['event']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n else:\n logging.info('body con campos vacios')", "def write_event(self, event):\n self.events_written.append(event)", "async def setup_event(self, context, rule):\n cmd = self.resolve(context)\n\n async def event_wrapper(event):\n if isinstance(cmd, Path):\n callback = self.execute_process\n else:\n callback = self.execute_plugin\n return await callback(context, cmd, rule, event)\n\n # XXX: handle in loop?\n on_cond = rule.select_one(\"on\")\n\n def is_cond(e):\n return (fnmatch.fnmatch(e.kind, on_cond.name) and\n rule.lifecycle(context) == RUNNING)\n return context.bus.subscribe(event_wrapper, is_cond)", "def apply_event(self, event, force_wall_update=False):\n if event is None:\n # This was the first event, ignore\n return\n\n self.events.append(event)\n self.time += event.time_delta\n\n if self.track_src_id is not None:\n if event.src_id == self.track_src_id:\n self._tracked_ranks = dict((sink_id, 0) for sink_id in self._tracked_sink_ids)\n else:\n for sink_id in event.sink_ids:\n if sink_id in self._tracked_sink_ids:\n self._tracked_ranks[sink_id] += 1\n\n if force_wall_update:\n self.walls_updated = True\n # Add the event (tweet) to the corresponding lists\n # But do this only when requested.\n for sink_id in event.sink_ids:\n self.sinks[sink_id].append(event)", "def _event(self, level=None, message=None):\n for i in eventhandlers:\n if level == 'write':\n i.write( object_definition=self, message=message )\n else:\n i.debug( object_definition=self, message=message )", "def post_sid_events(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def post(self):\n json_body = self.request.body\n if not json_body:\n # TODO(davidbyttow): Log error?\n return\n\n json_body = unicode(json_body, 'utf8')\n logging.info('Incoming: ' + json_body)\n\n context, events = robot_abstract.ParseJSONBody(json_body)\n for event in events:\n try:\n self._robot.HandleEvent(event, context)\n except:\n logging.error(traceback.format_exc())\n\n json_response = robot_abstract.SerializeContext(context,\n self._robot.version)\n logging.info('Outgoing: ' + json_response)\n\n # Build the response.\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json_response.encode('utf-8'))" ]
[ "0.63994485", "0.6042524", "0.6003626", "0.5981115", "0.5941807", "0.5918527", "0.5845204", "0.5819378", "0.58176184", "0.58072335", "0.57101154", "0.5693851", "0.5638689", "0.56246656", "0.55693597", "0.5526446", "0.55139947", "0.54291743", "0.54178923", "0.5412167", "0.54114", "0.541027", "0.53846085", "0.53118026", "0.53051054", "0.5288992", "0.52795607", "0.5270202", "0.52664346", "0.52548724" ]
0.6678471
0
Post sid events to the ruleset
def post_sid_events(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.post(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.patch_state(ruleset_name, message)\n return jsonify(result)", "def sid(self, sid):\n self._sid = sid", "def post(self, request, *args, **kwargs):\n \n id = args[0] if args else list(kwargs.values())[0]\n try:\n ssn = Subscription.objects.get(id=id)\n except Subscription.DoesNotExist:\n logger.error(\n f'Received unwanted subscription {id} POST request! Sending status '\n '410 back to hub.'\n )\n return Response('Unwanted subscription', status=410)\n \n ssn.update(time_last_event_received=now())\n self.handler_task.delay(request.data)\n return Response('') # TODO", "def informed_consent_on_post_save(sender, instance, raw, created, **kwargs):\n if not raw:\n if created:\n pass\n # instance.registration_update_or_create()\n # update_model_fields(instance=instance,\n # model_cls=['subject_identifier', instance.subject_identifier])\n try:\n OnSchedule.objects.get(\n subject_identifier=instance.subject_identifier, )\n except OnSchedule.DoesNotExist:\n onschedule_model = 'training_subject.onschedule'\n put_on_schedule(schedule_name='training_subject_visit_schedule', instance=instance, onschedule_model=onschedule_model)", "def post_event(self, event):\r\n from evennia.scripts.models import ScriptDB\r\n\r\n if event.public_event:\r\n event_manager = ScriptDB.objects.get(db_key=\"Event Manager\")\r\n event_manager.post_event(event, self.owner.player, event.display())", "def post(self, event, *args, **kwargs):\n self.inq.Signal((event, args, kwargs))", "def post(self, request):\n return self.serviceHandler.addEvent(request.data)", "def register_to_event(request):\n pass", "def setFilterOnRule(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\t# Get some initial post values for processing.\n\truleIds = request.POST.getlist('id')\n\tsensors = request.POST.getlist('sensors')\n\tcommentString = request.POST['comment']\n\tforce = request.POST['force']\n\tresponse = []\n\t\n\t# If the ruleIds list is empty, it means a SID has been entered manually.\n\tif len(ruleIds) == 0:\n\t\t# Grab the value from the POST.\n\t\truleSID = request.POST['sid']\n\t\t\n\t\t# Match the GID:SID pattern, if its not there, throw exception.\n\t\ttry:\n\t\t\tmatchPattern = r\"(\\d+):(\\d+)\"\n\t\t\tpattern = re.compile(matchPattern)\n\t\t\tresult = pattern.match(ruleSID)\n\t\t\t\n\t\t\truleGID = result.group(1)\n\t\t\truleSID = result.group(2)\n\t\texcept:\n\t\t\tresponse.append({'response': 'invalidGIDSIDFormat', 'text': 'Please format in the GID:SID syntax.'})\n\t\t\tlogger.warning(\"Invalid GID:SID syntax provided: \"+str(ruleSID)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a generator object with the GID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\tg = Generator.objects.filter(GID=ruleGID).count() # There might be more than one.\n\t\t\tif g == 0:\n\t\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept Generator.DoesNotExist:\n\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a rule object with the SID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\truleIds.append(Rule.objects.get(SID=ruleSID).id)\n\t\texcept Rule.DoesNotExist:\n\t\t\tresponse.append({'response': 'sidDoesNotExist', 'text': 'SID '+ruleSID+' does not exist.'})\n\t\t\tlogger.warning(\"'SID \"+str(ruleSID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t# If force is false, it means we have to check everything.\t\t\t\t\n\tif force == \"False\":\n\t\t\n\t\tfor sensor in sensors:\n\t\t\ttry:\n\t\t\t\tSensor.objects.get(id=int(sensor))\n\t\t\texcept Sensor.DoesNotExist:\n\t\t\t\tresponse.append({'response': 'sensorDoesNotExist', 'text': 'Sensor with DB ID '+sensor+' does not exist.'})\n\t\t\t\tlogger.warning(\"Sensor with DB ID \"+str(sensor)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\t\t\t\n\t\t\n\t\t# We iterate through all selected sensors and rules to see if a threshold already exists.\n\t\t# We warn the user if there are thresholds. We also check to see if the rule objects selected exist. \t\n\t\tfor sensor in sensors:\n\t\t\ts = Sensor.objects.get(id=sensor)\n\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\ttry:\n\t\t\t\t\tr = Rule.objects.get(id=ruleId)\n\t\t\t\t\tif r.eventFilters.filter(sensor=s).count() > 0:\n\t\t\t\t\t\tif len(response) == 0:\n\t\t\t\t\t\t\tresponse.append({'response': 'thresholdExists', 'text': 'Thresholds already exists, do you want to overwrite?.', 'sids': []})\n\t\t\t\t\t\tresponse[0]['sids'].append(r.SID)\n\t\t\t\t\t\tresponse[0]['sids']=list(set(response[0]['sids']))\n\t\t\t\texcept Rule.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleDoesNotExist', 'text': 'Rule with DB ID '+ruleId+' does not exist.'})\n\t\t\t\t\tlogger.warning(\"Rule with DB ID \"+str(ruleId)+\" could not be found.\")\n\t\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\t\n\t\t# Warn the user if the comment string is empty.\n\t\tif commentString == \"\":\n\t\t\tresponse.append({'response': 'noComment', 'text': 'You have not set any comments on this action, are you sure you want to proceed?.'})\n\t\t\n\t\t# Warn the user since all sensors is default.\n\t\tif \"1\" in sensors:\n\t\t\tresponse.append({'response': 'allSensors', 'text': 'You are setting this threshold on all sensors, are you sure you want to do that?.'})\n\t\t\n\t\t# If any responses were triggered, return them. Else, we set force to true and implement the threshold.\n\t\tif len(response) > 0:\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\telse:\n\t\t\tforce=\"True\"\n\t\n\t# The user either wants us to continue or there were no warnings.\n\tif force == \"True\":\n\t\tfilterType = request.POST['filterType']\n\t\ttcount = int(request.POST['count'])\n\t\ttseconds = int(request.POST['seconds'])\n\t\t\n\t\tif filterType == 'eventFilter':\n\t\t\tttype = int(request.POST['type'])\n\t\t\n\t\t\t# We make sure type is in the correct range.\n\t\t\tif ttype not in range(1,4):\n\t\t\t\tresponse.append({'response': 'typeOutOfRange', 'text': 'Type value out of range.'})\n\t\t\t\tlogger.warning(\"Type value out of range: \"+str(ttype)+\".\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\n\t\tttrack = int(request.POST['track'])\n\t\t\n\t\t# We make sure track is in the correct range.\n\t\tif ttrack not in range(1,3):\n\t\t\tresponse.append({'response': 'trackOutOfRange', 'text': 'Track value out of range.'})\n\t\t\tlogger.warning(\"Track value out of range: \"+str(ttrack)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# If this is an edit, we have to do some things with the comment object.\n\t\tif request.POST.get('edit'):\n\t\t\teditid = int(request.POST['edit'])\n\t\t\tif filterType == 'eventFilter':\n\t\t\t\ttry:\n\t\t\t\t\t# Grab the object and delete its comment object.\n\t\t\t\t\teFilter = EventFilter.objects.get(id=editid)\n\t\t\t\t\tif eFilter.comment is not None:\n\t\t\t\t\t\tcomment = Comment.objects.get(id=eFilter.comment.id)\n\t\t\t\t\t\tcomment.delete()\n\t\t\t\t\t\n\t\t\t\texcept Comment.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find Comment with id \"+str(eFilter.comment.id)+\".\")\n\t\t\t\texcept EventFilter.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find EventFilter with id \"+str(editid)+\".\")\n\t\t\t\t\t\n\t\t\telif filterType == 'detectionFilter':\n\t\t\t\ttry:\n\t\t\t\t\t# Grab the object and delete its comment object.\n\t\t\t\t\tdFilter = DetectionFilter.objects.get(id=editid)\n\t\t\t\t\tif dFilter.comment is not None:\n\t\t\t\t\t\tcomment = Comment.objects.get(id=dFilter.comment.id)\n\t\t\t\t\t\tcomment.delete()\n\t\t\t\t\t\t\n\t\t\t\texcept Comment.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find Comment with id \"+str(dFilter.comment.id)+\".\")\n\t\t\t\texcept DetectionFilter.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find DetecionFilter with id \"+str(editid)+\".\")\n\t\t\n\t\t# We iterate over all the rules and sensors to implement the threshold.\n\t\ttry:\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\tfor sensorId in sensors:\n\t\t\t\t\ttrule = Rule.objects.get(id=ruleId)\n\t\t\t\t\ttsensor = Sensor.objects.get(id=int(sensorId))\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif filterType == 'eventFilter':\n\t\t\t\t\t\t\tcomment = Comment.objects.create(user=request.user,comment=commentString, type=\"newEventFilter\")\n\t\t\t\t\t\t\targuments = {'rule':trule, 'sensor':tsensor, 'comment':comment, 'eventFilterType':ttype, 'track':ttrack, 'count':tcount, 'seconds':tseconds}\n\t\t\t\t\t\t\tfilterObject = EventFilter.objects.get(rule=trule, sensor=tsensor)\n\t\t\t\t\t\t\tfilterObject.eventFilterType = ttype\n\t\t\t\t\t\telif filterType == 'detectionFilter':\n\t\t\t\t\t\t\tcomment = Comment.objects.create(user=request.user,comment=commentString, type=\"newDetectionFilter\")\n\t\t\t\t\t\t\targuments = {'rule':trule, 'sensor':tsensor, 'comment':comment, 'track':ttrack, 'count':tcount, 'seconds':tseconds}\n\t\t\t\t\t\t\tfilterObject = DetectionFilter.objects.get(rule=trule, sensor=tsensor)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise InvalidValueError(filterType+\" is not a valid filter type!\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tfilterObject.track = ttrack\n\t\t\t\t\t\tfilterObject.count = tcount\n\t\t\t\t\t\tfilterObject.seconds = tseconds\n\t\t\t\t\t\tfilterObject.comment = comment\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"EventFilter successfully updated on rule: \"+str(trule)+\".\")\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\texcept EventFilter.DoesNotExist:\n\t\t\t\t\t\tfilterObject = EventFilter.objects.create(**arguments)\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"event_filter successfully added to rule: \"+str(trule)+\".\")\n\t\t\t\t\texcept DetectionFilter.DoesNotExist:\n\t\t\t\t\t\tfilterObject = DetectionFilter.objects.create(**arguments)\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"detection_filter successfully added to rule: \"+str(trule)+\".\")\n\t\t\t\n\t\t\tresponse.append({'response': 'filterAdded', 'text': filterType+' successfully added.'})\n\t\t\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept Exception as e: # Something went wrong.\n\t\t\tresponse.append({'response': 'addFilterFailure', 'text': 'Failed when trying to add filter.'})\n\t\t\tlogger.error(\"Failed when trying to add filter: \"+e.message)\n\t\t\treturn HttpResponse(json.dumps(response))", "def store_event(self, violations):\n current_time = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n insert_query = \"\"\"INSERT INTO social_distancing (Location, Local_Time, Violations) VALUES ('{}', '{}', {})\"\"\".format(self.location, current_time, violations)\n self.off_chain.insert(insert_query)\n\n event_id = self.off_chain.select(\"\"\"SELECT LAST_INSERT_ID() FROM social_distancing\"\"\")[0][0]\n self.on_chain.store_hash(event_id, self.location, current_time, violations)", "def test_post_event_on_schedule_page(self):\n json_data = {\n 'title': 'Test Event',\n 'start': '2017-8-8T12:00:00',\n 'end': '2017-8-8T12:00:00',\n 'group': '3'\n }\n\n response = self.app.post(\"/saveEvent\", data=json.dumps(json_data),\n content_type='application/json')\n self.assertTrue(response.status_code, 200)", "def _push(self, server):\n defns = [self.get_id(ident) for ident in list(self.ids)]\n #for ident in list(self.ids):\n # defn = self.get_id(ident)\n if len(defns) == 0:\n return\n self.app.logger.info(f\"Updating {server} with {len(defns)} records\")\n url = f\"{server}/add_record\"\n try:\n resp = requests.post(url, json=defns)\n except Exception as e:\n self.app.logger.error(str(e))\n return\n if not resp.ok:\n self.app.logger.error(f\"{resp.reason} {resp.content}\")\n return\n self._server_updated[server] = True", "def post(self, slug = None, eid = None):\n uid = self.request.form.get(\"uid\")\n status = self.request.form.get(\"status\") # can be join, maybe, notgoubg\n event = self.barcamp.get_event(eid)\n \n user = self.app.module_map.userbase.get_user_by_id(uid)\n\n reg = RegistrationService(self, user)\n try:\n status = reg.set_status(eid, status, force=True)\n except RegistrationError, e:\n print \"a registration error occurred\", e\n raise ProcessingError(str(e))\n return \n\n return {'status' : 'success', 'reload' : True}", "def events(self):", "def post(self):\n\n # we need a unique tx number so we can look these back up again\n # as well as for logging\n # FIXME: how can we guarantee uniqueness here?\n tx = int(time.time() * 100000) + random.randrange(10000, 99999)\n\n log.info(\"EVENTS [{}]: Creating events\".format(tx))\n\n try:\n user = self.jbody[\"user\"]\n if not EMAIL_REGEX.match(user):\n user += \"@\" + self.domain\n event_type_id = self.jbody.get(\"eventTypeId\", None)\n category = self.jbody.get(\"category\", None)\n state = self.jbody.get(\"state\", None)\n note = self.jbody.get(\"note\", None)\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n if not event_type_id and (not category and not state):\n raise exc.BadRequest(\n \"Must specify an event type id or both category and state\"\n )\n\n if event_type_id:\n event_type = self.session.query(EventType).get(event_type_id)\n else:\n event_type = self.session.query(EventType).filter(\n and_(\n EventType.category == category,\n EventType.state == state\n )\n ).one()\n\n if event_type is None:\n self.write_error(400, message=\"Bad event type\")\n return\n\n category = event_type.category\n state = event_type.state\n\n hostnames = (\n [self.jbody.get(\"hostname\", None)]\n if self.jbody.get(\"hostname\", None) else []\n )\n\n if \"hostnames\" in self.jbody:\n hostnames.extend(self.jbody.get(\"hostnames\"))\n\n log.info(\n \"EVENTS [{}]: Will create event {} {}\".format(\n tx, category, state\n )\n )\n\n log.info(\n \"EVENTS [{}]: Hostnames specified: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a host query was specified, we need to talk to the external\n # query server to resolve this into a list of hostnames\n if \"hostQuery\" in self.jbody:\n query = self.jbody[\"hostQuery\"]\n log.info(\"EVENTS [{}]: Running query {}\".format(tx, query))\n response = PluginHelper.request_get(params={\"query\": query})\n if response.json()[\"status\"] == \"ok\":\n hostnames.extend(response.json()[\"results\"])\n log.info(\n \"EVENTS [{}]: Hostnames after query: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a quest Id was given, look up the labors in that quest and\n # get all the hostnames for those labors.\n if \"questId\" in self.jbody:\n log.info(\"EVENTS [{}]: Looking up quest {}\".format(\n tx, self.jbody[\"questId\"])\n )\n quest = self.session.query(Quest).filter_by(\n id=self.jbody[\"questId\"]\n ).scalar()\n if not quest:\n raise exc.NotFound(\"No such Quest {} found\".format(id))\n for labor in quest.labors:\n hostnames.append(labor.host.hostname)\n\n log.info(\n \"EVENTS [{}]: Hostnames after quest expansion: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # We need to create a list of hostnames that don't have a Host record\n new_hosts_needed = set(hostnames)\n hosts = (\n self.session.query(Host).filter(Host.hostname.in_(hostnames)).all()\n )\n\n for host in hosts:\n new_hosts_needed.remove(str(host.hostname))\n\n # if we need to create hosts, do them all at once\n if new_hosts_needed:\n log.info(\"EVENTS [{}]: Creating hosts {}\".format(\n tx, \", \".join(new_hosts_needed)\n ))\n Host.create_many(self.session, new_hosts_needed)\n hosts = (\n self.session.query(Host).filter(\n Host.hostname.in_(hostnames)\n ).all()\n )\n\n if not hosts:\n raise exc.BadRequest(\"No hosts found with given list\")\n\n try:\n if len(hosts) > 1:\n # if we are supposed to create many events,\n # we want to do them as a giant batch\n log.info(\"EVENTS [{}]: Creating multiple events\".format(tx))\n events_to_create = []\n for host in hosts:\n events_to_create.append({\n \"host_id\": host.id,\n \"user\": user,\n \"event_type_id\": event_type.id,\n \"note\": note,\n \"tx\": tx\n })\n Event.create_many(self.session, events_to_create, tx)\n else:\n # if we are just creating one event, do it the simple way\n log.info(\"EVENTS [{}]: Creating 1 event\".format(tx))\n event = Event.create(\n self.session, hosts[0], user, event_type, note=note\n )\n\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"EVENTS [{}]: Flushing and committing\".format(tx))\n self.session.flush()\n log.info(\"EVENTS [{}]: Flushed\".format(tx))\n self.session.commit()\n log.info(\"EVENTS [{}]: Committed\".format(tx))\n\n if len(hosts) == 1:\n json = event.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/events/{}\".format(event.id)\n self.created(\n \"/api/v1/events/{}\".format(event.id), json\n )\n else:\n # if we created many events, we need to look them up by the TX\n # number to figure out what they were since the were created in bulk\n created_events = self.session.query(Event).filter(Event.tx == tx).all()\n self.created(\n data={\n \"events\": (\n [event.to_dict(self.href_prefix) for event in created_events]\n ),\n \"totalEvents\": len(created_events)\n }\n )\n\n log.info(\"EVENTS [{}]: Created event {} {} for {}\".format(\n tx, category, state,\n \", \".join(hostnames)\n ))", "def write_event(self, event):\n self.events_written.append(event)", "def put(self, uid):\n json_data = request.get_json()\n event_dicts = json_data[\"data\"]\n ids = list()\n\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n\n return {\n \"ids\": ids\n }", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)", "def publishEvent(eventName,publisher, msg):", "def all_events_request():\n result = []\n message = json.loads(request.stream.read().decode('utf-8'))\n for ruleset_name in host.list_rulesets():\n result.append(host.post(ruleset_name, message))\n return jsonify(result)", "def somaSendTimeEvent(ts, seqid):\n\n eventset = []\n seqstr = struct.pack(\">I\", seqid)\n\n esstr = struct.pack(\">HBBQxx\",\n 1, 0x10, 0x0, ts)\n return seqstr + esstr", "def write_to_splunk(**kwargs):\n event = helper.new_event(**kwargs)\n ew.write_event(event)", "def save(self, event_stream: List[DomainEvent], aggregate_root_id: str):\n\n \"\"\" overwriting the event stream is not ok \"\"\"\n if aggregate_root_id not in self.__store:\n self.__store[aggregate_root_id] = event_stream\n else:\n self.__store[aggregate_root_id] += event_stream", "def handle_outgoing_sub_events(sub_events: List[any]):\n events = [core.create_event(sub_event) for sub_event in sub_events]\n core.push_events(events)", "def send_event(self, dsts):\n\n # get a list of the port numbers to send a message to\n if dsts: \n dsts_copy = dsts\n dsts = [socket_connections[clock_id] for clock_id in dsts] \n\n self.send_event_helper(dsts)\n\n # keep track of the logical clock time when the message was sent\n # so that it can be put in the log\n cur_time = self.clock_time\n\n # update the logical clock time\n self.clock_time += 1\n\n # log sending the message\n self.log(\" Sending to \" + str(dsts_copy) + \" at LC time: \" + str(cur_time))", "def register(self):\n self._log.debug(\"Registering Nsr op data path %s as publisher\",\n NsrOpDataDtsHandler.XPATH)\n\n hdl = rift.tasklets.DTS.RegistrationHandler()\n with self._dts.group_create() as group:\n self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,\n handler=hdl,\n flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)", "def setSuppressOnRule(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\t# Get some initial post values for processing.\n\truleIds = request.POST.getlist('id')\n\tsensors = request.POST.getlist('sensors')\n\tcommentString = request.POST['comment']\n\tforce = request.POST['force']\n\tresponse = []\n\t\n\t# If the ruleIds list is empty, it means a SID has been entered manually.\n\tif len(ruleIds) == 0:\n\t\t# Grab the value from the POST.\n\t\truleSID = request.POST['sid']\n\t\t\n\t\t# Match the GID:SID pattern, if its not there, throw exception.\n\t\ttry:\n\t\t\tmatchPattern = r\"(\\d+):(\\d+)\"\n\t\t\tpattern = re.compile(matchPattern)\n\t\t\tresult = pattern.match(ruleSID)\n\t\t\t\n\t\t\truleGID = result.group(1)\n\t\t\truleSID = result.group(2)\n\t\texcept:\n\t\t\tresponse.append({'response': 'invalidGIDSIDFormat', 'text': 'Please format in the GID:SID syntax.'})\n\t\t\tlogger.warning(\"Invalid GID:SID syntax provided: \"+str(ruleSID)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a generator object with the GID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\tg = Generator.objects.filter(GID=ruleGID).count() # There might be more than one.\n\t\t\tif g == 0:\n\t\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept Generator.DoesNotExist:\n\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a rule object with the SID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\truleIds.append(Rule.objects.get(SID=ruleSID).id)\n\t\texcept Rule.DoesNotExist:\n\t\t\tresponse.append({'response': 'sidDoesNotExist', 'text': 'SID '+ruleSID+' does not exist.'})\n\t\t\tlogger.warning(\"'SID \"+str(ruleSID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\n\t# If force is false, it means we have to check everything.\t\n\tif force == \"False\":\n\t\t\n\t\tsensorList = []\n\t\t\n\t\t# If we didnt pick all sensors, we gotta check to see if the selected ones exist. \n\t\t# We also populate a list for later use.\n\n\t\t\t\n\t\t# If we selected all sensors, generate a list of all of their ids.\n\t\t\n\t\tfor sensor in sensors:\n\t\t\ttry:\n\t\t\t\tSensor.objects.get(id=int(sensor))\n\t\t\t\tsensorList.append(sensor)\n\t\t\texcept Sensor.DoesNotExist:\n\t\t\t\tresponse.append({'response': 'sensorDoesNotExist', 'text': 'Sensor with DB ID '+str(sensor)+' does not exist.'})\n\t\t\t\tlogger.warning(\"Sensor with DB ID \"+str(sensor)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\t\n\t\t\n\t\t# We iterate through all selected sensors and rules to see if a threshold already exists.\n\t\t# We warn the user if there are thresholds. We also check to see if the rule objects selected exist. \t\n\t\tfor sensor in sensorList:\n\t\t\ts = Sensor.objects.get(id=sensor)\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\ttry:\n\t\t\t\t\tr = Rule.objects.get(id=ruleId)\n\t\t\t\t\tif r.suppress.filter(sensor=s).count() > 0:\n\t\t\t\t\t\tif len(response) == 0:\n\t\t\t\t\t\t\tresponse.append({'response': 'suppressExists', 'text': 'Suppressions already exists, do you want to overwrite?.', 'sids': []})\n\t\t\t\t\t\tresponse[0]['sids'].append(r.SID)\n\t\t\t\t\t\tresponse[0]['sids']=list(set(response[0]['sids']))\n\t\t\t\texcept Rule.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleDoesNotExist', 'text': 'Rule with DB ID '+ruleId+' does not exist.'})\n\t\t\t\t\tlogger.warning(\"Rule with DB ID \"+str(ruleId)+\" could not be found.\")\n\t\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Since this form lets the user input one or more IPv4 addresses, we have to check them.\n\t\tipString = request.POST['ip']\n\t\t\n\t\t# The string cant be empty.\n\t\tif ipString == \"\":\n\t\t\tresponse.append({'response': 'noIPGiven', 'text': 'You need to supply one or more IP addresses.'})\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\tbadIps = []\n\t\tbadIpTest = False\n\t\t\n\t\t# This pattern matches for valid IPv4 with subnet notation (0.0.0.0/0 - 255.255.255.255/32).\n\t\tipPattern = re.compile(patterns.ConfigPatterns.VALIDIPMASK)\n\t\t\n\t\t# Iterate over each IP given and check it for validity.\n\t\tfor ip in re.finditer(\"[^,;\\s]+\", ipString):\n\t\t\ttest = ipPattern.match(ip.group(0))\n\t\t\tif not test:\n\t\t\t\tbadIps.append(ip.group(0))\n\t\t\t\tbadIpTest = True\n\t\t\n\t\t# Express error if one of the IPs is invalid as IPv4.\n\t\tif badIpTest:\n\t\t\tresponse.append({'response': 'badIP', 'text': 'is not valid IPv4.', 'ips': badIps})\n\t\t\tlogger.warning(\"User provided bad IP format.\")\n\t\t\t\n\t\t# Warn the user if the comment string is empty.\n\t\tif commentString == \"\":\n\t\t\tresponse.append({'response': 'noComment', 'text': 'You have not set any comments on this action, are you sure you want to proceed?.'})\n\t\t\n\t\t# Warn the user since all sensors is default.\n\t\tif \"1\" in sensors:\n\t\t\tresponse.append({'response': 'allSensors', 'text': 'You are setting this suppression on all sensors, are you sure you want to do that?.'})\n\t\t\n\t\t# If any responses were triggered, return them. Else, we set force to true and implement the threshold.\n\t\tif len(response) > 0:\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\telse:\n\t\t\tforce=\"True\"\n\t\n\t# The user either wants us to continue or there were no warnings.\n\tif force == \"True\":\n\t\tstrack = int(request.POST['track'])\n\t\t\n\t\t# We make sure track is in the correct range.\n\t\tif strack not in range(1,3):\n\t\t\tresponse.append({'response': 'trackOutOfRange', 'text': 'Track value out of range.'})\n\t\t\tlogger.warning(\"Track value out of range: \"+str(strack)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# We do the IP matching again since we could have submitted them again since last check.\n\t\t# Since this form lets the user input one or more IPv4 addresses, we have to check them.\n\t\tipString = request.POST['ip']\n\t\t\n\t\t# The string cant be empty.\n\t\tif ipString == \"\":\n\t\t\tresponse.append({'response': 'noIPGiven', 'text': 'You need to supply one or more IP addresses.'})\n\t\t\tlogger.warning(\"User provided bad IP format.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\tgoodIps = []\n\t\t\n\t\t# This pattern matches for valid IPv4 with subnet notation (0.0.0.0/0 - 255.255.255.255/32).\n\t\tipPattern = re.compile(patterns.ConfigPatterns.VALIDIPMASK)\n\t\t\n\t\t# Iterate over each IP given and check it for validity.\n\t\t# We put it in the list we use for making SuppressAddresses later.\n\t\tfor ip in re.finditer(\"[^,;\\s]+\", ipString):\n\t\t\ttest = ipPattern.match(ip.group(0))\n\t\t\tif test:\n\t\t\t\tgoodIps.append(ip.group(0))\n\t\t\t\t\n\t\tsuppressAddressList = []\n\t\t\n\t\t# We iterate over all IPs that were good and create SuppressAddress objects and put them in the \n\t\t# suppressAddressList we use for creating Suppress objects later.\n\t\t# We also check if the IP already has a SuppressAddress object and just put that in the suppressAddressList.\n\t\ttry:\n\t\t\tfor ip in goodIps:\n\t\t\t\tsa = SuppressAddress.objects.filter(ipAddress=ip).count()\n\t\t\t\tif sa > 0:\n\t\t\t\t\tsuppressAddressList.append(SuppressAddress.objects.get(ipAddress=ip))\n\t\t\t\telse:\n\t\t\t\t\tsa = SuppressAddress.objects.create(ipAddress=ip)\n\t\t\t\t\tlogger.info(\"SuppressAddress successfully created for IP: \"+str(ip)+\".\")\n\t\t\t\t\tsuppressAddressList.append(sa)\n\t\t\t\t\n\t\texcept:\n\t\t\tresponse.append({'response': 'addSuppressAddressFailure', 'text': 'Failed when trying to add suppression addresses.'})\n\t\t\tlogger.error(\"Failed when trying to add suppression addresses.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# If this is an edit, we have to do some things with the comment object.\n\t\tif request.POST.get('edit'):\n\t\t\teditid = int(request.POST['edit'])\n\t\t\ttry:\n\t\t\t\t# Grab the object and delete its comment object.\n\t\t\t\tsuppress = Suppress.objects.get(id=editid)\n\t\t\t\tif suppress.comment is not None:\n\t\t\t\t\tcomment = Comment.objects.get(id=suppress.comment.id)\n\t\t\t\t\tcomment.delete()\n\t\t\t\t\t\n\t\t\texcept Comment.DoesNotExist:\n\t\t\t\tlogger.warning(\"Could not find Comment with id \"+str(suppress.comment.id)+\".\")\n\t\t\texcept Suppress.DoesNotExist:\n\t\t\t\tlogger.warning(\"Could not find Suppress with id \"+str(editid)+\".\")\n\t\t\n\t\t\n\t\t\n\n\t\t# We iterate over all the rules and sensors to implement the suppression.\n\t\ttry:\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\tfor sensorId in sensors:\n\t\t\t\t\tsrule = Rule.objects.get(id=ruleId)\n\t\t\t\t\tssensor = Sensor.objects.get(id=int(sensorId))\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcomment = Comment.objects.create(user=request.user,comment=commentString, type=\"newSuppression\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tlogger.warning(\"Could not create Comment.\")\n\t\t\t\t\t# We check to see if a suppression already exists, in that case we just update it. If not, we create one.\n\t\t\t\t\ttry:\n\t\t\t\t\t\ts = Suppress.objects.get(rule=srule, sensor=ssensor)\n\n\t\t\t\t\t\ts.comment = comment\n\t\t\t\t\t\ts.track = strack\n\t\t\t\t\t\tfor address in suppressAddressList:\n\t\t\t\t\t\t\ts.addresses.add(address)\n\t\t\t\t\t\ts.save()\n\t\t\t\t\t\tlogger.info(\"Suppression successfully updated on rule: \"+str(srule)+\".\")\n\t\t\t\t\texcept Suppress.DoesNotExist:\n\t\t\t\t\t\ts = Suppress.objects.create(rule=srule, sensor=ssensor, comment=comment, track=strack)\n\n\t\t\t\t\t\tfor address in suppressAddressList:\n\t\t\t\t\t\t\ts.addresses.add(address)\n\t\t\t\t\t\ts.save()\n\t\t\t\t\t\tlogger.info(\"Suppression successfully created on rule: \"+str(srule)+\".\")\n\t\t\t\n\t\t\tresponse.append({'response': 'suppressAdded', 'text': 'Suppression successfully added.'})\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept: # Something went wrong.\n\t\t\tresponse.append({'response': 'addSuppressFailure', 'text': 'Failed when trying to add suppressions.'})\n\t\t\tlogger.error(\"Failed when trying to add suppressions.\")\n\t\t\treturn HttpResponse(json.dumps(response))", "def shiftr_event_listener(event):\n state = event.data.get(\"new_state\")\n topic = state.entity_id.replace(\".\", \"/\")\n\n try:\n _state = state_helper.state_as_number(state)\n except ValueError:\n _state = state.state\n\n try:\n mqttc.publish(topic, _state, qos=0, retain=False)\n\n if state.attributes:\n for attribute, data in state.attributes.items():\n mqttc.publish(\n f\"/{topic}/{attribute}\", str(data), qos=0, retain=False\n )\n except RuntimeError:\n pass" ]
[ "0.560759", "0.5351545", "0.5286287", "0.5215918", "0.50854534", "0.50759035", "0.5052492", "0.5019985", "0.49917015", "0.4915208", "0.4852344", "0.48465505", "0.48308286", "0.47611645", "0.47459525", "0.47393727", "0.47084105", "0.46966222", "0.46946904", "0.46800652", "0.46637163", "0.46621075", "0.4655034", "0.46411532", "0.45935494", "0.4584962", "0.4568778", "0.45470998", "0.45365942", "0.4518088" ]
0.7941506
0
This function populates an instance of DeadlineTab with the UI controls that make up the submission dialog. This tab is instantiated by Katana every time the user selects "Tabs > Thinkbox > Submit to Deadline" from the menu bar in Katana. Essentially, this function serves as a deferred __init__ implementation for the tab class that can be easily updated via the Deadline repository.
def PopulateSubmitter( gui ): global submissionInfo print( "Grabbing submitter info..." ) try: stringSubInfo = CallDeadlineCommand( [ "-prettyJSON", "-GetSubmissionInfo", "Pools", "Groups", "MaxPriority", "UserHomeDir", "RepoDir:submission/Katana/Main", "RepoDir:submission/Integration/Main", ], useDeadlineBg=True ) output = json.loads( stringSubInfo, encoding="utf-8" ) except: print( "Unable to get submitter info from Deadline:\n\n" + traceback.format_exc() ) raise if output[ "ok" ]: submissionInfo = output[ "result" ] else: print( "DeadlineCommand returned a bad result and was unable to grab the submitter info.\n\n" + output[ "result" ] ) raise ValueError( output[ "result" ] ) # Create a widget with a vertical box layout as a container for widgets to include in the tab scrollWidget = QWidget() scrollLayout = QGridLayout(scrollWidget) scrollLayout.setSpacing(4) scrollLayout.setContentsMargins(4, 4, 4, 4) buttonLayout = QHBoxLayout() # First layout: General options scrollLayout.addWidget(CreateSeparator( "Job Description" ),0,0,1,3) jobNameLabel = QLabel( "Job Name" ) jobNameLabel.setToolTip("The name of your job. This is optional, and if left blank, it will default to 'Untitled'.") scrollLayout.addWidget(jobNameLabel,1,0) gui.jobNameWidget = QLineEdit( os.path.basename(FarmAPI.GetKatanaFileName()).split('.')[0] ) scrollLayout.addWidget(gui.jobNameWidget, 1, 1, 1, 1 ) commentLabel = QLabel( "Comment" ) commentLabel.setToolTip("A simple description of your job. This is optional and can be left blank.") scrollLayout.addWidget(commentLabel,2,0) gui.commentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.commentWidget, 2, 1, 1, 1 ) departmentLabel = QLabel( "Department" ) departmentLabel.setToolTip( "The department you belong to. This is optional and can be left blank." ) scrollLayout.addWidget(departmentLabel, 3, 0) gui.departmentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.departmentWidget, 3, 1, 1, 1 ) # Second layout: Job options scrollLayout.addWidget(CreateSeparator( "Job Options" ),4,0,1,3) pools = submissionInfo["Pools"] poolLabel = QLabel( "Pool" ) poolLabel.setToolTip( "The pool that your job will be submitted to." ) scrollLayout.addWidget(poolLabel, 5, 0) gui.poolsWidget = QComboBox() gui.poolsWidget.addItems(pools) scrollLayout.addWidget(gui.poolsWidget, 5, 1 ) secondPoolLabel = QLabel( "Secondary Pool" ) secondPoolLabel.setToolTip( "The secondary pool lets you specify a pool to use if the primary pool does not have any available Slaves." ) scrollLayout.addWidget(secondPoolLabel, 6, 0 ) gui.secondPoolsWidget = QComboBox() gui.secondPoolsWidget.addItems(pools) scrollLayout.addWidget(gui.secondPoolsWidget, 6, 1 ) groups = submissionInfo[ "Groups" ] groupLabel = QLabel( "Group" ) groupLabel.setToolTip( "The group that your job will be submitted to." ) scrollLayout.addWidget(groupLabel, 7, 0) gui.groupWidget = QComboBox() gui.groupWidget.addItems(groups) scrollLayout.addWidget(gui.groupWidget, 7, 1) priorityLabel = QLabel( "Priority" ) priorityLabel.setToolTip( "A job can have a numeric priority from 0 to 100, where 0 is the lowest priority and 100 is the highest." ) scrollLayout.addWidget(priorityLabel, 8, 0) maxPriority = submissionInfo["MaxPriority"] gui.priorityBox = QSpinBox() gui.priorityBox.setMinimum(0) gui.priorityBox.setMaximum( maxPriority ) scrollLayout.addWidget(gui.priorityBox, 8, 1) taskTimeoutLabel = QLabel( "Task Timeout" ) taskTimeoutLabel.setToolTip( "The number of minutes a Slave has to render a task for this job before it requeues it. Specify 0 for no limit." ) scrollLayout.addWidget(taskTimeoutLabel, 9, 0) gui.taskTimeoutBox = QSpinBox() gui.taskTimeoutBox.setMinimum(0) gui.taskTimeoutBox.setMaximum(10000) scrollLayout.addWidget(gui.taskTimeoutBox, 9, 1) concurrentTasksLabel = QLabel( "Concurrent Tasks" ) concurrentTasksLabel.setToolTip("The number of tasks that can render concurrently on a single Slave. This is useful if the rendering application only uses one thread to render and your Slaves have multiple CPUs.") scrollLayout.addWidget(concurrentTasksLabel, 10, 0 ) gui.concurrentTasksWidget = QSpinBox( ) scrollLayout.addWidget(gui.concurrentTasksWidget, 10, 1) gui.concurrentTasksWidget.setMinimum(1) gui.concurrentTasksWidget.setMaximum(16) gui.limitTasksSlaveLimit = QCheckBox( "Limit Tasks To Slave's Task Limit" ) gui.limitTasksSlaveLimit.setToolTip( "If you limit the tasks to a Slave's task limit, then by default, the Slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual Slaves by an administrator." ) scrollLayout.addWidget(gui.limitTasksSlaveLimit, 10, 2) machineLimitLabel = QLabel( "Machine Limit" ) machineLimitLabel.setToolTip("Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.") scrollLayout.addWidget( machineLimitLabel, 11, 0 ) gui.machineLimitWidget = QSpinBox() scrollLayout.addWidget(gui.machineLimitWidget, 11, 1) gui.isBlackListWidget = QCheckBox( "Machine List Is Blacklist" ) gui.isBlackListWidget.setToolTip("You can force the job to render on specific machines by using a whitelist, or you can avoid specific machines by using a blacklist.") scrollLayout.addWidget(gui.isBlackListWidget, 11, 2) machineListLabel = QLabel( "Machine List" ) machineListLabel.setToolTip("The whitelisted or blacklisted list of machines.") scrollLayout.addWidget( machineListLabel, 12, 0 ) machineListLayout = QHBoxLayout() gui.machineListWidget = QLineEdit( "" ) machineListLayout.addWidget(gui.machineListWidget) getMachineListWidget = QPushButton( "..." ) getMachineListWidget.pressed.connect( lambda: BrowseMachineList(gui.machineListWidget) ) machineListLayout.addWidget(getMachineListWidget) scrollLayout.addLayout( machineListLayout, 12, 1, 1, 2 ) limitsLabel = QLabel( "Limits" ) limitsLabel.setToolTip("The Limits that your job requires.") scrollLayout.addWidget( limitsLabel, 13, 0 ) limitsLayout = QHBoxLayout() gui.limitsWidget = QLineEdit( "" ) limitsLayout.addWidget(gui.limitsWidget) getLimitsWidget = QPushButton( "..." ) getLimitsWidget.pressed.connect( lambda: BrowseLimitList(gui.limitsWidget) ) limitsLayout.addWidget(getLimitsWidget) scrollLayout.addLayout( limitsLayout, 13, 1, 1, 2 ) dependenciesLabel = QLabel( "Dependencies" ) dependenciesLabel.setToolTip("Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering.") scrollLayout.addWidget( dependenciesLabel, 14, 0 ) dependenciesLayout = QHBoxLayout() gui.dependenciesWidget = QLineEdit( "" ) dependenciesLayout.addWidget(gui.dependenciesWidget) getDependenciesWidget = QPushButton( "..." ) getDependenciesWidget.pressed.connect( lambda: BrowseDependencyList(gui.dependenciesWidget) ) dependenciesLayout.addWidget(getDependenciesWidget) scrollLayout.addLayout( dependenciesLayout, 14, 1, 1, 2 ) onJobCompleteLabel = QLabel( "On Job Complete" ) onJobCompleteLabel.setToolTip("If desired, you can automatically archive or delete the job when it completes.") scrollLayout.addWidget( onJobCompleteLabel, 15, 0 ) gui.onJobCompleteWidget = QComboBox( ) gui.onJobCompleteWidget.addItems(["Nothing", "Archive", "Delete"]) scrollLayout.addWidget(gui.onJobCompleteWidget, 15, 1) gui.submitSuspendedWidget = QCheckBox( "Submit Job as Suspended" ) gui.submitSuspendedWidget.setToolTip( "If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render.") scrollLayout.addWidget(gui.submitSuspendedWidget, 15, 2) # Third layout: Katana options scrollLayout.addWidget(CreateSeparator( "Katana Options" ),16,0,1,3) frameRangeLabel = QLabel( "Frame Range" ) frameRangeLabel.setToolTip("The list of frames to render.") scrollLayout.addWidget( frameRangeLabel, 17, 0 ) gui.frameRangeWidget = QLineEdit( "" ) # Populate based on frame range scrollLayout.addWidget( gui.frameRangeWidget, 17, 1, 1, 1 ) frameRange = FarmAPI.GetSceneFrameRange() gui.frameRangeWidget.setText( str(frameRange['start']) + "-" + str(frameRange['end']) ) gui.submitSceneBox = QCheckBox( "Submit Katana Scene File" ) gui.submitSceneBox.setToolTip( "If this option is enabled, the scene file will be submitted with the job, and then copied locally to the Slave machine during rendering." ) scrollLayout.addWidget(gui.submitSceneBox, 17, 2 ) framesPerTaskLabel = QLabel( "Frames Per Task" ) framesPerTaskLabel.setToolTip( "This is the number of frames that will be rendered at a time for each job task." ) scrollLayout.addWidget( framesPerTaskLabel, 18, 0 ) gui.framesPerTaskWidget = QSpinBox( ) gui.framesPerTaskWidget.setMinimum(1) scrollLayout.addWidget( gui.framesPerTaskWidget, 18, 1, 1, 1 ) gui.useWorkingDirectory = QCheckBox( "Use Working Directory" ) gui.useWorkingDirectory.setToolTip( "If enabled, the current working directory will be used during rendering. This is required if your Katana project file contains relative paths." ) gui.useWorkingDirectory.setChecked(True) scrollLayout.addWidget( gui.useWorkingDirectory, 18, 2 ) renderNodeSelectLabel = QLabel( "Render Node Submission" ) renderNodeSelectLabel.setToolTip( "Choose to render the whole scene, render all nodes as separate jobs, or render separate nodes" ) scrollLayout.addWidget( renderNodeSelectLabel, 19, 0 ) gui.renderSelectBox = QComboBox() gui.renderSelectBox.addItems( ["Submit All Render Nodes As Separate Jobs", "Select Render Node"] ) scrollLayout.addWidget( gui.renderSelectBox, 19, 1 ) gui.includeImageWrite = QCheckBox( "Include ImageWrite Nodes" ) gui.includeImageWrite.setToolTip( "If enabled, ImageWrite nodes will be included for submission." ) scrollLayout.addWidget( gui.includeImageWrite, 19, 2 ) renderNodeLabel = QLabel( "Render Node" ) renderNodeLabel.setToolTip( "Set the render node to render with, or leave blank to use the node already set." ) scrollLayout.addWidget( renderNodeLabel, 20, 0 ) gui.frameDependent = QCheckBox( "Submit Jobs As Frame Dependent" ) gui.frameDependent.setToolTip( "If enabled, the Katana Job(s) will have Frame Dependencies. If your scene contains static content, do not use!" ) scrollLayout.addWidget( gui.frameDependent, 20, 2 ) gui.renderNodeBox = QComboBox() gui.renderSelectBox.currentIndexChanged.connect( lambda: RenderSelectionChanged( gui.renderSelectBox, gui.renderNodeBox ) ) scrollLayout.addWidget( gui.renderNodeBox, 20, 1) gui.renderNodeBox.setDisabled(True) # Submit button buttonLayoutSpacer = QSpacerItem( 0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum ) buttonLayout.addItem( buttonLayoutSpacer ) gui.pipelineToolStatusLabel = QLabel( "No Pipeline Tools Set" ) gui.pipelineToolStatusLabel.setAlignment( QtCore.Qt.AlignCenter ) buttonLayout.addWidget( gui.pipelineToolStatusLabel ) pipelineToolsButton = QPushButton( "Pipeline Tools" ) pipelineToolsButton.pressed.connect( lambda: PipelineToolsClicked( gui ) ) buttonLayout.addWidget( pipelineToolsButton ) submitButton = QPushButton( "Submit" ) submitButton.pressed.connect( lambda: SubmitPressed(gui) ) buttonLayout.addWidget( submitButton ) scrollLayout.addLayout( buttonLayout,21,0,1,3 ) verticalStretchLayout = QVBoxLayout() verticalStretchLayout.addStretch() scrollLayout.addLayout( verticalStretchLayout, 22, 0 ) scrollArea = QScrollArea() scrollArea.setWidget(scrollWidget) scrollArea.setWidgetResizable(True) scrollArea.setFrameStyle(QFrame.NoFrame + QFrame.Plain) vLayout = QVBoxLayout() vLayout.setObjectName('vLayout') vLayout.addWidget(scrollArea) gui.setLayout(vLayout) LoadStickySettings( gui ) try: pipelineToolStatusMessage = RetrievePipelineToolStatus( raiseOnExitCode=True ) except subprocess.CalledProcessError as e: pipelineToolStatusMessage = HandlePipelineToolsCalledProcessError( e ) UpdatePipelineToolStatusLabel( gui, pipelineToolStatusMessage ) # Populate the render node drop down based on the effective check state # of the "Include ImageWrite Nodes" checkbox after sticky settings are applied PopulateRenderNodeDropDown(gui.includeImageWrite.isChecked(), gui.renderNodeBox) # We delay wiring up this signal handler until after the sticky settings are applied to avoid # rebuilding the drop-down list multiple times unnecessarily gui.includeImageWrite.stateChanged.connect(lambda checked: PopulateRenderNodeDropDown(checked, gui.renderNodeBox)) # Check if this tab is part of a pane in the main window, or if it is contained in a floating pane if gui.window() != UI4.App.MainWindow.CurrentMainWindow(): # Resize the floating pane's window to accommodate the tab's widgets requiredSize = scrollWidget.sizeHint() gui.window().resize(max(requiredSize.width() + 20, 200), min(requiredSize.height() + 40, 1000))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populateUI():\n \n # Main form layout\n form = cmds.formLayout()\n\n # Tab Layout\n tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n # Form attachment config\n cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'right', 0)) )\n\n # The different Tabs on the window\n spawnTab = SpawnObjectsTab()\n roadTab = RoadRiverTab()\n environmentTab = EnvironmentTab()\n\n # Tab creation\n cmds.tabLayout( tabs, edit=True, tabLabel=((spawnTab, 'Spawn Buildings'), (roadTab, 'Create Roads / Rivers'), (environmentTab, \"Create Environment\") ))", "def __init__(self):\r\n super().__init__()\r\n self._setupTab1()", "def __init__(self, main_win, parent=None):\n super(Tabs, self).__init__(parent)\n self.main_win = main_win\n\n if self.main_win.beamline is not None:\n try:\n beam = importlib.import_module('beamlines.' + self.main_win.beamline + '.beam_tabs')\n except Exception as e:\n print(e)\n msg_window('cannot import beamlines.' + self.main_win.beamline + ' module')\n raise\n self.prep_tab = beam.PrepTab()\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.display_tab = beam.DispTab()\n self.tabs = [self.prep_tab, self.format_tab, self.rec_tab, self.display_tab]\n else:\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.tabs = [self.format_tab, self.rec_tab]\n\n for tab in self.tabs:\n self.addTab(tab, tab.name)\n tab.init(self, main_win)", "def __init__(self):\r\n\r\n self.window = None # page's associated window\r\n self.caption = \"\" # caption displayed on the tab\r\n self.bitmap = wx.NullBitmap # tab's bitmap\r\n self.dis_bitmap = wx.NullBitmap # tab's disabled bitmap\r\n self.rect = wx.Rect() # tab's hit rectangle\r\n self.active = False # True if the page is currently active\r\n self.enabled = True # True if the page is currently enabled\r\n self.hasCloseButton = True # True if the page has a close button using the style\r\n # AUI_NB_CLOSE_ON_ALL_TABS\r\n self.control = None # A control can now be inside a tab\r\n self.renamable = False # If True, a tab can be renamed by a left double-click\r\n \r\n self.text_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNTEXT)\r\n \r\n self.access_time = datetime.datetime.now() # Last time this page was selected\r", "def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)", "def build_ui(self):\n self.ui = UI_procstep.Ui_Form()#.Ui_USGSContactInfoWidgetMain()\n self.ui.setupUi(self)\n self.setup_dragdrop(self)\n\n self.proc_step = RepeatingElement(which='tab',\n tab_label='Step', add_text='Additional Step',\n widget=ProcessStep, remove_text='Remove Step', italic_text='Processing Steps Taken')\n\n #self.proc_step = RepeatingElement(params=params, which='tab', tab_label='Source',)\n self.proc_step.add_another()\n self.ui.widget_procstep.layout().addWidget(self.proc_step)", "def controls_setup(self):\n\n self.inbox = element.NavigationTab(self, css_selector='.messaging a.nav-inbox', alias='Inbox Tab')\n self.sent = element.NavigationTab(self, css_selector='.messaging a.nav-sent', alias='Sent Tab')\n self.write = element.NavigationTab(self, css_selector='.messaging a.nav-write', alias='Write Tab')\n self.archives = element.NavigationTab(self, css_selector='.messaging a.nav-archive', alias='Archives Tab')\n self.trash = element.NavigationTab(self, css_selector='.messaging a.nav-trash', alias='Trash Tab')", "def init_tab(self):", "def extend_ui(self):\n for name, tab in self.build_general_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"General\", name, scroll)\n self.fill_tab(\"General\", name, tab)\n for name, tab in self.build_display_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Display\", name, scroll)\n self.fill_tab(\"Display\", name, tab)\n for name, tab in self.build_data_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Data\", name, scroll)\n self.fill_tab(\"Data\", name, tab)", "def createTabs(self):\r\n self.tab1 = QWidget()\r\n self.tab2 = QWidget()\r\n self.tab3 = QWidget()\r\n self.tab4 = QWidget()\r\n self.tab5 = QWidget()\r\n self.tab6 = QWidget()\r\n self.tab7 = QWidget()\r\n self.tab8 = QWidget()\r\n self.addTab(self.tab1, \"Registro\")\r\n self.addTab(self.tab2, \"Base de Datos\")\r\n self.addTab(self.tab3, \"Ingresos\")\r\n self.addTab(self.tab4, \"Compras\")\r\n self.addTab(self.tab5, \"Gastos\")\r\n self.addTab(self.tab6, \"Res. Diarios\")\r\n self.addTab(self.tab7, \"Res. Mensuales\")\r\n self.addTab(self.tab8, \"Res. Anuales\")", "def _create_data_tabs(self):\n self.tab_ctrl.tab.children = []\n for name, (ctrl_cls, args) in self._get_tab_definitions().items():\n ctrl = ctrl_cls(*args)\n # add to tabs\n self.tab_ctrl.add_tab(name, control=ctrl)\n # Set these controls as named attributes on the object\n setattr(self, name.replace(\" \", \"_\"), ctrl)", "def __init__(self, execution):\n self.execution = execution\n\n self.form = {}\n if len(PARAMS) == 0:\n input_boxes = [] #[urwid.Text('Changing the default parameters not allowed')]\n else:\n input_boxes = [urwid.Text('Change the default parameters for the jobs:')]\n for k, v in PARAMS.items():\n edit_box = urwid.Edit(('edittxt', v + ': '), str(self.execution.job_params.get(k, PARAM_DEFAULT_VALUE)))\n input_boxes.append(urwid.AttrMap(edit_box, 'editbx', 'editfc'))\n self.form[k] = edit_box\n\n input_boxes.append(create_button('Change', self.resubmit))\n\n self.widget = urwid.Padding(urwid.Pile(input_boxes), align='center')\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def _tabs(self):\n selenium_utils.scroll_into_view(self._driver, self.container_element)\n return {\n self._elements.RELATED_ASMTS_TAB: AssessmentRelatedAsmtsTable,\n self._elements.RELATED_ISSUES_TAB: AssessmentRelatedIssuesTable,\n self._elements.CHANGE_LOG_TAB: self._log_tab_validate}", "def init(self, tabs, main_window):\n self.tabs = tabs\n self.main_win = main_window\n self.old_conf_id = ''\n\n layout = QVBoxLayout()\n ulayout = QFormLayout()\n mlayout = QHBoxLayout()\n\n hbox = QHBoxLayout()\n self.cont = QCheckBox('continue')\n self.cont.setChecked(False)\n hbox.addWidget(self.cont)\n self.cont_dir_label = QLabel(' cont dir')\n hbox.addWidget(self.cont_dir_label)\n self.cont_dir_label.hide()\n self.cont_dir_button = QPushButton()\n hbox.addWidget(self.cont_dir_button)\n self.cont_dir_button.hide()\n ulayout.addRow(hbox)\n\n self.add_conf_button = QPushButton('add configuration', self)\n ulayout.addWidget(self.add_conf_button)\n self.rec_id = QComboBox()\n self.rec_id.InsertAtBottom\n self.rec_id.addItem(\"main\")\n ulayout.addWidget(self.rec_id)\n self.rec_id.hide()\n self.proc = QComboBox()\n self.proc.addItem(\"auto\")\n if sys.platform != 'darwin':\n self.proc.addItem(\"cp\")\n self.proc.addItem(\"np\")\n self.proc.addItem(\"af\")\n if sys.platform != 'darwin':\n self.proc.addItem(\"cuda\")\n self.proc.addItem(\"opencl\")\n self.proc.addItem(\"cpu\")\n ulayout.addRow(\"processor type\", self.proc)\n self.device = QLineEdit()\n ulayout.addRow(\"device(s)\", self.device)\n self.reconstructions = QLineEdit()\n ulayout.addRow(\"number of reconstructions\", self.reconstructions)\n self.alg_seq = QLineEdit()\n ulayout.addRow(\"algorithm sequence\", self.alg_seq)\n # TODO add logic to show this only if HIO is in sequence\n self.beta = QLineEdit()\n ulayout.addRow(\"beta\", self.beta)\n self.support_area = QLineEdit()\n ulayout.addRow(\"support_area\", self.support_area)\n self.rec_default_button = QPushButton('set to defaults', self)\n ulayout.addWidget(self.rec_default_button)\n\n self.features = Features(self, mlayout)\n\n llayout = QHBoxLayout()\n self.set_rec_conf_from_button = QPushButton(\"Load rec conf from\")\n self.set_rec_conf_from_button.setStyleSheet(\"background-color:rgb(205,178,102)\")\n self.config_rec_button = QPushButton('run reconstruction', self)\n self.config_rec_button.setStyleSheet(\"background-color:rgb(175,208,156)\")\n llayout.addWidget(self.set_rec_conf_from_button)\n llayout.addWidget(self.config_rec_button)\n\n spacer = QSpacerItem(0, 3)\n llayout.addItem(spacer)\n\n layout.addLayout(ulayout)\n layout.addLayout(mlayout)\n layout.addLayout(llayout)\n\n self.setAutoFillBackground(True)\n self.setLayout(layout)\n\n self.cont_dir_button.clicked.connect(self.set_cont_dir)\n self.config_rec_button.clicked.connect(self.run_tab)\n self.cont.stateChanged.connect(self.toggle_cont)\n self.rec_default_button.clicked.connect(self.set_defaults)\n self.add_conf_button.clicked.connect(self.add_rec_conf)\n self.rec_id.currentIndexChanged.connect(self.toggle_conf)\n self.set_rec_conf_from_button.clicked.connect(self.load_rec_conf_dir)", "def __init__(self):\n\n\t\tself.tasklist = TaskList()\n\t\tself.legend = '\\nLegend: Not Due ' + Fore.CYAN + Style.BRIGHT + 'Upcoming ' + Fore.BLUE + \\\n\t\t Style.BRIGHT + 'Due ' + Fore.RED + Style.BRIGHT + 'Overdue ' + Fore.WHITE + Style.BRIGHT + \\\n\t\t Back.WHITE + 'Completed' + Fore.RESET + Style.NORMAL + Back.RESET", "def __init__(self, parent=None):\n super().__init__(parent);\n tabBar=EditableTabBar(parent);\n self.setTabBar(tabBar);", "def create_tab(self, type):\n tab = Tabs(self.builder.get_object('window1'), type)\n label_widget = tab.get_label_widget()\n\n # connect label_widget's close button to close_tab()\n label_widget.get_children()[-1].connect('clicked', self.close_tab)\n label_widget.show_all()\n\n # set save, run, terminal button active if not\n save_button = self.builder.get_object('save')\n run_button = self.builder.get_object('run')\n terminal_button = self.builder.get_object('terminal')\n\n for button in [save_button, run_button, terminal_button]:\n button.set_sensitive(True)\n\n return tab, label_widget", "def create_live_tab(self):\n\n self.liveLayout = QGridLayout()\n self.textButton = QPushButton(\"Start live observation\")\n self.textButton.setMinimumHeight(60)\n self.textButton.clicked.connect(self.start_live_observation)\n self.liveLayout.addWidget(self.textButton)\n self.lbTimeLive = QLabel()\n self.lbTimeLive.setAlignment(Qt.AlignCenter)\n\n font = QFont(\"Monospace\")\n font.setPointSize(48)\n self.lbTimeLive.setFont(font)\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveLayout.addWidget(self.lbTimeLive)\n self.liveTab = QWidget()\n self.liveTab.setLayout(self.liveLayout)\n self.toolBox.insertItem(2, self.liveTab, \"Live\")", "def setupTabs(self, elmerDefs, Section, ID):\n self.ID = ID\n self.qhash.clear()\n\n layout = self.layout()\n if(layout is not None):\n item = layout.takeAt(0)\n while(item != 0):\n item = None\n if(self.tabWidget is not None):\n self.tabWidget.clear()\n self.tabWidget = None\n item = layout.takeAt(0)\n self.layout = None\n\n # get root element\n self._root = elmerDefs.documentElement()\n\n self.tabWidget = QtGui.QTabWidget()\n self.tabWidget.setUsesScrollButtons(True)\n self.tabWidget.setElideMode(QtCore.Qt.ElideNone)\n\n self._all_stuff = self._root.firstChildElement(\"ALL\")\n self._element = self._root.firstChildElement(\"PDE\")\n\n self.tabs = 0\n\n while(self._element.isNull() is False):\n self._name = self._element.firstChildElement(\"Name\")\n grid = QtGui.QGridLayout()\n params = 0\n for x in range(0, 2):\n if(x == 0):\n if(str(self._name.text()).strip() == \"General\"):\n continue\n self._section = self._all_stuff.firstChildElement(Section)\n else:\n self._section = self._element.firstChildElement(Section)\n\n self._param = self._section.firstChildElement(\"Parameter\")\n\n while(self._param.isNull() is False):\n h = hash_entry_t()\n # label\n widget_type = self._param.attribute(\"Widget\", \"Edit\")\n widget_enabled = self._param.attribute(\"Enabled\", \"True\")\n widget_visible = self._param.attribute(\"Visible\", \"True\")\n paramType = str(self._param.firstChildElement(\"Type\").text()).strip()\n labelName = str(self._param.firstChildElement(\"Name\").text()).strip()\n sifName = str(self._param.firstChildElement(\"SifName\").text()).strip()\n if(sifName == \"\"):\n sifName = labelName\n paramDefault = str(self._param.firstChildElement(\"DefaultValue\").text()).strip()\n whatis = str(self._param.firstChildElement(\"Whatis\").text()).strip()\n statusTip = str(self._param.firstChildElement(\"StatusTip\").text()).strip()\n fullName = \"/\" + str(self._name.text()).strip() + \"/\"\n fullName = fullName + Section + \"/\" + labelName + \"/\" + str(ID)\n h.widget = None\n if(widget_type == \"Edit\"):\n edit = DynLineEdit()\n h.widget = edit.lineEdit\n edit.lineEdit.setText(paramDefault)\n edit.name = fullName\n edit.lineEdit.returnPressed.connect(edit.editSlot)\n edit.lineEdit.textChanged.connect(self._textChangedSlot)\n\n elif(widget_type == \"TextEdit\"):\n textEdit = QtGui.QTextEdit()\n currentFont = textEdit.currentFont()\n fontMetrics = QFontMetrics(currentFont)\n fontHeight = fontMetrics.height()\n textEdit.setMinimumHeight(5*fontHeight)\n textEdit.setMaximumHeight(8*fontHeight)\n h.widget = textEdit\n\n elif(widget_type == \"Combo\"):\n combo = QtGui.QComboBox()\n h.widget = combo\n count = 0\n active = 0\n item = self._param.firstChildElement(\"Item\")\n while (item.isNull() is False):\n itemType = item.attribute(\"Type\", \"\")\n if(itemType == \"Active\"):\n active = count\n itemName = item.firstChildElement(\"Name\")\n count += 1\n combo.insertItem(count,str(itemName.text()).strip())\n item = item.nextSiblingElement(\"Item\")\n combo.setCurrentIndex(active)\n combo.currentIndexChanged.connect(self._comboSlot)\n\n elif(widget_type == \"CheckBox\"):\n l = QtGui.QCheckBox()\n h.widget = l\n l.setText(\"\")\n l.setChecked(False)\n if(paramDefault == \"True\"):\n l.setChecked(True)\n l.stateChanged.connect(self._lSlot)\n\n elif(widget_type == \"Label\"):\n label = QtGui.QLabel()\n font = QFont()\n font.setBold(True)\n font.setUnderline(True)\n label.setFont(font)\n label.setText(labelName)\n h.widget = label\n\n if(h.widget):\n h.widget.setWhatsThis(whatis)\n h.widget.setStatusTip(statusTip)\n h.widget.setProperty(\"dom address\", fullName)\n h.elem = self._param\n if(widget_enabled == \"False\"):\n h.widget.setEnabled(False)\n if(widget_type != \"TextEdit\"):\n h.widget.setFixedHeight(18)\n if(widget_type == \"TextEdit\"):\n textEditLabel = QtGui.QLabel()\n textEditLabel.setText(labelName)\n h.label = textEditLabel\n grid.addWidget(h.widget, params, 0, 1, 2)\n\n if(widget_visible == \"False\"):\n h.label.hide()\n h.widget.hide()\n\n elif(widget_type != \"Label\"):\n label = QtGui.QLabel()\n label.setText(labelName)\n h.label = label\n grid.addWidget(h.label, params, 0)\n grid.addWidget(h.widget, params, 1)\n\n if(widget_visible == \"False\"):\n h.label.hide()\n h.widget.hide()\n else:\n h.label = None\n grid.addWidget(h.widget, params, 0)\n self.qhash.update({fullName: h})\n\n self._param = self._param.nextSiblingElement(\"Parameter\")\n params += 1\n\n dummyWidget = QtGui.QWidget()\n grid.addWidget(dummyWidget, params, 0)\n grid.setRowStretch(params, 1)\n\n frmWidget = QtGui.QWidget()\n frmWidget.setLayout(grid)\n\n src = QtGui.QScrollArea()\n src.setWidget(frmWidget)\n src.setMinimumHeight(300)\n src.setWidgetResizable(True)\n\n if(params > 0):\n self.tabWidget.addTab(src, str(self._name.text()).strip())\n\n self.tabs += 1\n self._element = self._element.nextSiblingElement(\"PDE\")\n\n # Buttons:\n lbl = QtGui.QLabel()\n lbl.setText(\"Name:\")\n self.nameEdit = QtGui.QLineEdit()\n self.nameEdit.setText(Section + \" \" + str(ID+1))\n\n self.applyButton = QtGui.QPushButton(\"&Apply\")\n # applyButton.setIcon(addIcon)\n self.applyButton.clicked.connect(self._applyButtonClicked)\n\n self.discardButton = QtGui.QPushButton(\"&Remove\")\n # discardButton.setIcon(removeIcon)\n self.discardButton.clicked.connect(self._discardButtonClicked)\n\n self.okButton = QtGui.QPushButton(\"&OK\")\n # okButton.setIcon(okIcon)\n self.okButton.clicked.connect(self._okButtonClicked)\n\n self.newButton = QtGui.QPushButton(\"&New\")\n # self.newButton.setIcon(newIcon)\n self.newButton.clicked.connect(self._newButtonClicked)\n\n nameLayout = QtGui.QHBoxLayout()\n nameLayout.addWidget(lbl)\n nameLayout.addWidget(self.nameEdit)\n\n buttonLayout = QtGui.QHBoxLayout()\n buttonLayout.addWidget(self.newButton)\n buttonLayout.addWidget(self.applyButton)\n buttonLayout.addWidget(self.okButton)\n buttonLayout.addWidget(self.discardButton)\n\n spareButtonLayout = QtGui.QHBoxLayout()\n self.spareButton = QtGui.QPushButton(\"SpareButton\")\n self.spareButton.setVisible(False)\n spareButtonLayout.addWidget(self.spareButton)\n self.spareButton.clicked.connect(self._spareButtonClicked)\n\n self.spareScroll = QtGui.QScrollArea()\n self.spareScroll.hide()\n\n mainLayout = QtGui.QVBoxLayout()\n mainLayout.addWidget(self.tabWidget)\n mainLayout.addWidget(self.spareScroll)\n mainLayout.addLayout(spareButtonLayout)\n mainLayout.addLayout(nameLayout)\n mainLayout.addLayout(buttonLayout)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(Section)", "def create_tab(application, tab_name, model):\n tab = application.app_tab_dict[tab_name]\n add_heading(tab, 'Case Number')\n case = Label(tab, text=tab.case_number.get(), fg=\"black\")\n case.grid(row=tab.row_cursor, column=tab.col_cursor, pady=5)\n link = Label(tab, text=\"CASE DOCKET\", fg=\"blue\", cursor=\"hand2\")\n link.grid(row=tab.row_cursor, column=tab.col_cursor+1, pady=5)\n link.bind(\"<Button-1>\", callback)\n tab.row_cursor += 1\n decline_button = Radiobutton(tab, text=\"Decline\", variable=model.vote, value=1)\n decline_button.grid(row=tab.row_cursor, column=tab.col_cursor, sticky=W)\n accept_button = Radiobutton(tab, text=\"Accept\", variable=model.vote, value=2)\n accept_button.grid(row=tab.row_cursor+1, column=tab.col_cursor, sticky=W)\n hold_button = Radiobutton(tab, text=\"Hold\", variable=model.vote, value=3)\n hold_button.grid(row=tab.row_cursor+2, column=tab.col_cursor, sticky=W)\n notpart_button = Radiobutton(tab, text=\"Not Participating\", variable=model.vote, value=4)\n notpart_button.grid(row=tab.row_cursor+3, column=tab.col_cursor, sticky=W)\n tab.row_cursor += 4\n vote_button = add_button_left(tab, \"Submit Vote\", partial(print_vote, model))\n tab.row_cursor += 1\n #next_button = add_button_left(tab, \"Next Case\", tab.next_case())", "def initialize(self):\n # Notebook holds all the tabs\n n = ttk.Notebook(self)\n f1 = RectTab(self)\n f2 = LTab(self)\n f3 = CircTab(self)\n f4 = BuminTab(self)\n f5 = LorentzTab(self)\n n.add(f1, text='Rectangle')\n n.add(f2, text='L')\n n.add(f3, text='Circle ')\n n.add(f4, text='Buminovich')\n n.add(f5, text='Lorentz')\n # need to pack for the Notebook to display\n n.pack()", "def show_create(self):\n\t\t# Get a rectangle with amargin.\n\t\trect = self.renderer._get_rect()\n\t\trect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)\n\n\t\tself.f_tab = ow.Table(4, 2)\n\t\tself.f_tab.topleft = (rect[0], rect[1])\n\n\t\t# Name of the game textbox.\n\t\tself.e_gamename = ow.Entry(\"Ship Wreckyard\")\n\t\tself.l_gamename = ow.Label(\"Name of the game: \")\n\t\tself.f_tab.add_child(0, 0, self.l_gamename)\n\t\tself.f_tab.add_child(0, 1, self.e_gamename)\n\n\t\t# Number of players.\n\t\tself.e_players = ow.Entry(\"2\")\n\t\tself.l_players = ow.Label(\"Number of players: \")\n\t\tself.f_tab.add_child(1, 0, self.l_players)\n\t\tself.f_tab.add_child(1, 1, self.e_players)\n\n\t\t# Board size.\n\t\tself.l_boardw = ow.Label(\"Board width: \")\n\t\tself.e_boardw = ow.Entry(\"10\")\n\t\tself.l_boardh = ow.Label(\"Board height: \")\n\t\tself.e_boardh = ow.Entry(\"10\")\n\t\tself.f_tab.add_child(2, 0, self.l_boardw)\n\t\tself.f_tab.add_child(2, 1, self.e_boardw)\n\t\tself.f_tab.add_child(3, 0, self.l_boardh)\n\t\tself.f_tab.add_child(3, 1, self.e_boardh)\n\n\t\t# Create Game button.\n\t\tself.b_cancel = ow.Button(\"Cancel\")\n\t\tself.b_cancel.topleft = (rect[2] - self.b_cancel.width - 100, rect[3] - self.b_cancel.height)\n\t\tself.b_cancel.connect_signal(oc.SIG_CLICKED, self.do_lobby)\n\n\t\t# Cancel button.\n\t\tself.b_create = ow.Button(\"Start Game\")\n\t\tself.b_create.topleft = (rect[2] - self.b_create.width, rect[3] - self.b_create.height)\n\t\tself.b_create.connect_signal(oc.SIG_CLICKED, self.do_start_hosted)\n\n\t\t# Add all the widgets.\n\t\tself.renderer.add_widget(self.f_tab)\n\t\tself.renderer.add_widget(self.b_create)\n\t\tself.renderer.add_widget(self.b_cancel)", "def controls_setup(self):\n\n self.date_received = element.Link(self, alias=\"Date Received\",\n css_selector='td:nth-child(1) > a', angular=True)\n self.job_type = element.Caption(self, alias=\"Job Type\", css_selector='td:nth-child(2)', angular=True)\n self.description = element.Caption(self, alias=\"Client Name\", css_selector='td:nth-child(3)', angular=True)\n self.address = element.Caption(self, alias=\"Address\", css_selector='td:nth-child(4)', angular=True)\n self.suburb = element.Caption(self, alias=\"Suburb\", css_selector='td:nth-child(5)', angular=True)\n self.client = element.Caption(self, alias=\"Client\", css_selector='td:nth-child(6)', angular=True)", "def make_rule_display(self):\n \n #Make and place Frame\n info_frame = tk.Frame(self,\n bg=self.default_background)\n info_frame.grid(column=1, row=4)\n \n #Define Colors and Style for Rule Info Box\n style = tk.ttk.Style()\n style.theme_create( \"yummy\", parent=\"alt\", settings={\n \"TNotebook\": {\"configure\": {\"background\": self.default_background } },\n \"TNotebook.Tab\": {\n \"configure\": {\"background\": self.button_color ,\n \"bordercolor\": self.highlight_color},\n \"map\": {\"background\": [(\"selected\",'#F0F0F0')]} } } )\n \n style.theme_use(\"yummy\")\n \n #Make Label and Tabs for the Rule Info Box\n list_tabparent = tk.ttk.Notebook(info_frame)\n list_tabparent.config(height=int(round(0.23*self.screenheight)))\n list_tabparent.grid(column=0, row=0)\n self.list_info_boxes=dict()\n self.list_info_boxes['Rules'] = tk.Listbox(list_tabparent, width=50)\n self.list_info_boxes['Log'] = tk.Listbox(list_tabparent, width=50)\n list_tabparent.add(self.list_info_boxes['Rules'], text = self.translate('Rules'))\n list_tabparent.add( self.list_info_boxes['Log'], text = self.translate('Log'))\n \n #Populate list of rules\n for rule in self.Rules:\n self.list_info_boxes['Rules'].insert(tk.END, self.translate('Rules') + ' ' + str(rule.number) + ': ' + self.translate(str(rule.name)))\n \n #Add scrollbar to the rule log display\n self.scrollbar = tk.Scrollbar(info_frame, orient='vertical',\n command=self.list_info_boxes['Log'].yview)\n self.scrollbar.grid(column=1, row=0, sticky='ns')\n self.list_info_boxes['Log'].config(yscrollcommand=self.scrollbar.set)\n \n return info_frame", "def create(cls, *args: Any, **kwargs: Any) -> \"Tab\":", "def get_form(self):\n # setup request layer\n self.request = TestRequest()\n # get add view\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newBiodiverse\")\n # update the form once to initialise all widgets\n form.update()\n # go through all widgets on the form and update the request with default values\n data = {}\n for widget in form.widgets.values():\n data[widget.name] = widget.value\n data.update({\n 'form.widgets.IDublinCore.title': u\"My BD Experiment\",\n 'form.widgets.IDublinCore.description': u'This is my experiment description',\n 'form.widgets.projection.count': '1',\n 'form.widgets.projection.experiment.0': unicode(self.sdmexp.UID()),\n 'form.widgets.projection.dataset.0.count': 1,\n 'form.widgets.projection.dataset.0.0.uuid': unicode(self.sdmproj.UID()),\n 'form.widgets.projection.dataset.0.0.threshold': '0.0',\n 'form.widgets.cluster_size': '5000',\n })\n self.request.form.update(data)\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newBiodiverse\")\n return form", "def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout", "def controls_setup(self):\n\n self.subbie_name = element.Link(self, css_selector='th:nth-child(1) > a', alias=\"Admin Subbie Name Link\")\n self.type = element.Caption(self, css_selector='td:nth-child(2)', alias=\"Subbie Type\")\n self.username = element.Caption(self, css_selector='td:nth-child(3)', alias=\"Username\")\n self.email = element.Caption(self, css_selector='td:nth-child(4)', alias=\"Email\")\n self.active_start_date = element.Link(self, css_selector='td:nth-child(5)', alias=\"Active Start Date Text\")\n self.active_end_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active End Date Text\")", "def __init__(self, scenario_controller):\n super(Ui, self).__init__()\n\n self.scenarioController = scenario_controller\n\n\n # determine if application is a script file or frozen exe\n\n if getattr(sys, 'frozen', False):\n relative_ui_path = 'AssetsV1/MainUI.ui'\n main_ui_path = os.path.dirname(sys.executable)\n\n elif __file__:\n relative_ui_path = 'MainUI.ui'\n main_ui_path = os.path.dirname(__file__)\n\n # needs to point to MainUi.ui file\n complete_ui_path = os.path.join(main_ui_path, relative_ui_path)\n\n uic.loadUi(complete_ui_path, self)\n\n self.timeline = MyTimelineWidget(self, self.scenarioController)\n self.tabWidget.setTabsClosable(True)\n self.tabs = self.tabWidget\n self.tabs.currentChanged.connect(self.current_tab_changed) \n self.tabs.tabCloseRequested.connect(self.close_current_tab)\n\n self.showMaximized()\n\n all_subsystem_names = self.scenarioController.getAvailableSubsystemNames()\n\n self.clearMenuOptions(self.menuOpen)\n self.clearMenuOptions(self.menuFile)\n self.setMenuOptions(self.menuFile, ['Save', 'Save As', 'Save As Scenario'], self.saveMenuHandler)\n self.setMenuOptionsWithParams(self.menuNew, all_subsystem_names, self.newSubsystemHandler)\n self.setMenuOptions(self.menuOpen, ['Open Command File', 'Open Scenario'], self.openMenuHandler)\n\n self.show()\n self.timeline.show()", "def buildPage(self):\n Users = [(u['name']) for u in driver.nodes.match(\"User\")]\n Tissues = [(t['name']) for t in driver.nodes.match(\"Tissue\")]\n Diseases = [(d['name']) for d in driver.nodes.match(\"Disease\")]\n self.add_basic_layout()\n layout = [html.Div([\n html.Div([html.H4('Project information', style={'width': '15.5%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.H4('', id='update_project_id', style={'width': '15%', 'verticalAlign': 'top', 'display': 'none'}),\n html.Br(),\n html.Div(children=[html.Label('Project name:*', style={'marginTop': 15}),\n dcc.Input(id='project name', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Acronym:', style={'marginTop': 15}),\n dcc.Input(id='project acronym', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Responsible:*', style={'marginTop': 15})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Participants:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='responsible-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='participant-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Data Types:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Disease:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='data-types-picker', options=[{'label': i, 'value': i} for i in DataTypes], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='disease-picker', options=[{'label': i, 'value': i} for i in Diseases], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Tissue:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Intervention:', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='tissue-picker', options=[{'label': i, 'value': i} for i in Tissues], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='intervention-picker', placeholder='E.g. SNOMED identifier|SNOMED identifier|...', type='text', style={'width': '100%', 'height': '54px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Number of subjects:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Timepoints:', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_subjects', placeholder='E.g. 77 (each unique patient counts as 1 subject)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_timepoints', placeholder='E.g. 2 months|15 days|24 hours...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Follows up project:', style={'marginTop': 15}),\n dcc.Input(id='related_to', placeholder='Use the Project Identifier (P000000X)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Description:', style={'marginTop': 15}),\n dcc.Textarea(id='project description', placeholder='Enter description...', style={'width': '100%', 'height': '100px'})]),\n html.Br(),\n html.Div(children=[html.Label('Starting Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-start', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=[html.Label('Ending Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-end', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=html.Button('Create Project', id='project_button', n_clicks=0, className=\"button_link\",\n style={'fontSize': '25px'}), style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(children=[html.A(children=html.Button('Download Clinical Data template', id='download_button', n_clicks=0,\n style={'fontSize': '16px', 'display': 'block'}),\n id='download_link', href='', n_clicks=0)], style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(id='project-creation', style={'fontSize': '20px', 'marginLeft': '70%'}),\n html.Br()]),\n html.Hr()])]\n\n self.extend_layout(layout)" ]
[ "0.6382394", "0.5868262", "0.5825627", "0.5785208", "0.5766149", "0.57174546", "0.568647", "0.56794524", "0.5625228", "0.5619884", "0.55831283", "0.5565874", "0.5505238", "0.5494764", "0.54784214", "0.5467494", "0.54137725", "0.5401807", "0.5315046", "0.5295689", "0.52889675", "0.52875775", "0.5221104", "0.5219992", "0.51959145", "0.51825994", "0.5144458", "0.5135385", "0.5081125", "0.5064597" ]
0.65244144
0
Augments a staged job info submission file with the appropriate properties for the Pipeline Tool settings.
def ConcatenatePipelineSettingsToJob( jobInfoPath, batchName ): global submissionInfo jobWriterPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py" ) scenePath = NodegraphAPI.GetSourceFile() argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--write", "--scene-path", scenePath, "--job-path", jobInfoPath, "--batch-name", batchName] CallDeadlineCommand( argArray, False )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_job_info(run, seqno, slices):\n inset = {\"job_info\": [\"workscript.stdout\", \"workscript.stderr\"],\n }\n outset = {\"job_info\": [\"std_{0:06d}_{1:03d}.out\", \"std_{0:06d}_{1:03d}.err\"],\n }\n tarset = {\"job_info\": \"job_info_{0:06d}_{1:03d}.tgz\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n outlist = []\n for i in range(0, len(inset[iset])):\n ofile = outset[iset][i].format(run, seqno)\n with open(ofile, \"w\") as ostr:\n for sl in slices:\n ifile = \"{0},{1}/\".format(sl[0], sl[1]) + inset[iset][i]\n for lines in open(ifile):\n ostr.write(lines)\n outlist.append(ofile)\n tarfile = tarset[iset].format(run, seqno)\n cmd = subprocess.Popen([\"tar\", \"zcf\", tarfile] + outlist,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(tarfile) +\n \" - job logs tarballing failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(tarfile, odir)\n return badslices", "def _edit_job_file(self, edits):\n for key in edits:\n self.json_dict[key] = edits[key]", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def _augment_pipeline_cfg(self):", "def extra_aligner(fq1_files, smp_name, args, fq2_files=None):\n project_path = init_rnaseq_project(args['path_out'], analysis_type=1)\n extra_align_path = project_path['extra']\n\n ## qc-report\n qc_path = os.path.join(extra_align_path['report'], 'qc')\n # QC_reporter(fq1_files, qc_path).run()\n\n ## update args\n args['fq1'] = fq1_files\n args['fq2'] = fq2_files\n args['path_out'] = extra_align_path['mapping']\n args['smp_name'] = smp_name\n args['align_to_te'] = False\n\n # extra small genome, for STAR\n small_genome = args['small_genome']\n args['small_genome'] = True\n\n ## run alignment\n map_bam = Alignment(**args).run()\n\n ## return\n args['small_genome'] = small_genome\n\n ## return\n return map_bam", "def generate_job(self, additional_settings_dict):\n\n self.final_job = additional_settings_dict\n\n header = (\n \"# Measurement file: \\n \"\n \"# Project: \" + self.variables[\"Current_project\"] + \"\\n \"\n \"# Sensor Type: \" + self.variables[\"Current_sensor\"] + \"\\n \"\n \"# ID: \" + self.variables[\"Current_filename\"] + \"\\n \"\n \"# Operator: \" + self.variables[\"Current_operator\"] + \"\\n \"\n \"# Date: \" + str(time.asctime()) + \"\\n\\n\"\n )\n\n IVCV_dict = self.generate_IVCV(\"\") # here additional header can be added\n strip_dict = self.generate_strip(\"\")\n\n if IVCV_dict:\n self.final_job.update({\"IVCV\": IVCV_dict})\n if strip_dict:\n self.final_job.update({\"stripscan\": strip_dict})\n\n # Check if filepath is a valid path\n if self.variables[\"Current_filename\"] and os.path.isdir(\n self.variables[\"Current_directory\"]\n ):\n self.final_job.update({\"Header\": header})\n self.queue_to_measure.put({\"Measurement\": self.final_job})\n self.log.info(\"Sendet job: \" + str({\"Measurement\": self.final_job}))\n else:\n self.log.error(\n \"Please enter a valid path and name for the measurement file.\"\n )", "def fixit(file):\n with open(file, \"r\") as f:\n config = json.load(f)\n if \"job_status\" in config[\"supply-curve-aggregation\"]:\n del config[\"supply-curve-aggregation\"][\"job_status\"]\n if \"rep-profiles\" in config:\n del config[\"rep-profiles\"]\n fname = os.path.basename(file)\n cname = \"_\".join(fname.split(\"_\")[:-1]) + \"_agg\"\n config[\"supply-curve-aggregation\"][cname][\"job_status\"] = \"successful\"\n\n with open(file, \"w\") as f:\n f.write(json.dumps(config, indent=4))", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline,\n verbose):\n sample_sheet = KLSampleSheet(sample_sheet)\n df_sheet = sample_sheet_to_dataframe(sample_sheet)\n\n if pipeline == 'atropos-and-bowtie2':\n click.echo('Stats collection is not supported for pipeline '\n 'atropos-and-bowtie2')\n else:\n stats = run_counts(run_dir, sample_sheet)\n\n stats['sample_name'] = \\\n df_sheet.set_index('lane', append=True)['sample_name']\n\n # returns a map of (run, project_name, lane) -> preparation frame\n preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline)\n\n os.makedirs(output_dir, exist_ok=True)\n\n for (run, project, lane), df in preps.items():\n fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv')\n\n if pipeline == 'fastp-and-minimap2':\n # stats are indexed by sample name and lane, lane is the first\n # level index. When merging, make sure to select the lane subset\n # that we care about, otherwise we'll end up with repeated rows\n df = df.merge(stats.xs(lane, level=1), how='left',\n on='sample_name')\n\n # strip qiita_id from project names in sample_project column\n df['sample_project'] = df['sample_project'].map(\n lambda x: re.sub(r'_\\d+$', r'', x))\n\n # center_project_name is a legacy column that should mirror\n # the values for sample_project.\n df['center_project_name'] = df['sample_project']\n\n df.to_csv(fp, sep='\\t', index=False)\n\n if verbose:\n project_name = remove_qiita_id(project)\n # assume qiita_id is extractable and is an integer, given that\n # we have already passed error-checking.\n qiita_id = project.replace(project_name + '_', '')\n print(\"%s\\t%s\" % (qiita_id, abspath(fp)))", "def _jobfile(self):\n job = self.job.format(fnum=self.fnum)\n with open(job, 'w') as f:\n f.write('#!/bin/sh\\n' + self.phast_cmmd + self.cleanup_cmmd)", "def modify_job(filename, batch_name, node_choice, ppn_list, qe_switch, atoms):\n\tfin = open(filename, \"r\"); file = fin.read(); fin.close()\n\t# fin_head = open(\"/home/twchang/bin/others/job.sh-mod\", 'r').read()\n\tfin_head = Job.job_head\n\tnode_str = \"+\".join([\"node{:02d}:ppn={}\".format(node_choice[i], ppn_list[i]) for i in range(len(ppn_list))])\n\ttail = re.search(r\".*(\\s*-np.*)\", file, re.S).group(1).strip()\n\ttail = re.sub(r\"\\s*>>\\s*out\", r\"\", tail)\n\tif qe_switch:\n\t\ttail = re.sub(r\"(NPROCS\\s*).*(/bin/)\", r\"\\g<1>/data2/twchang/opt/q-e-qe-6.4.1\\g<2>\", tail, re.S)\n\telse: # vasp mode\n\t\ttail = re.sub(r\"(NPROCS\\s*).*(/bin/).*(\\n)\", r\"\\g<1>/home/twchang\\g<2>vasp_noncol \\g<3>\", tail, re.S)\n\tfile = fin_head + \" \" + tail\n\tfile = re.sub(r\"(#PBS\\s+-N\\s+).*\", r\"\\g<1>{}\".format(batch_name), file) # if batch_name != \"\" else file # modify batch_name\n\tfile = re.sub(r\"(#PBS\\s+-l\\s+nodes=).*\", r\"\\g<1>{}\".format(node_str), file) # modify ppn\n\tfile = re.sub(r\"(-np\\s+\\$NPROCS\\s+.*\\s+<).*?(\\..*>\\s+).*?(\\..*)\", r\"\\g<1>{}\\g<2>{}\\g<3>\".format(atoms, atoms), file) if atoms else file\n\tfile = re.sub(r\"/data2/twchang/q-e-qe-6\\.1\\.0/bin\", r\"/data2/twchang/opt/q-e-qe-6.4.1/bin\", file)# if qe_switch else file\n\tfout = open(filename, \"w\"); fout.write(file)", "def edit_job_file(job_file_name, out_file_name, edits):\n o_job = JsonJobsFile()\n o_job._raw_read(job_file_name)\n o_job._edit_job_file(edits)\n o_job.write_as(out_file_name)", "def modelarts_pre_process():\n config.file_name = os.path.join(config.output_path, config.file_name)", "def _format_submission(self, sub):\n #TODO: Format time strings to datetime objects\n formatted = {}\n formatted['percentage'] = sub.get('percentComplete', None)\n formatted['xml_settings'] = sub.get('settings', None)\n formatted['status'] = sub.get('status', 'NotStarted')\n formatted['time_submitted'] = sub.get('submissionTime', None)\n formatted['time_started'] = sub.get('startTime', None)\n formatted['time_completed'] = sub.get('completionTime', None)\n formatted['requested_instances'] = int(sub.get('instanceCount', 0)) #DEP\n formatted['number_tasks'] = int(sub.get('taskCount', 0))\n formatted['output_filename'] = sub.get('outputFileName', None)\n formatted['output_url'] = sub.get('outputLink', {'href':None})['href']\n formatted['thumb_url'] = sub.get('previewLink', {'href':None})['href']\n formatted['tasks_url'] = sub.get('taskListLink', {'href':None})['href']\n formatted['pool_id'] = sub.get('poolId', None)\n\n self._log.debug(\"Extracted job submission data: {0}\".format(formatted))\n return formatted", "def _push_one(self, f, **kwargs):\n\n # Copy the metadata for modifying and open the ann file\n meta = kwargs.copy()\n desc = read_InSar_annotation(f)\n\n # Expand the path for the geotiffs\n tiff_dir = abspath(expanduser(self.geotiff_dir))\n\n # form the pattern to look for and grab the tifs\n pattern = '.'.join(basename(f).split('.')[0:-1]) + '*.tif'\n rasters = glob.glob(join(tiff_dir, pattern))\n\n # Submit each geotif, modifying meta on the fly\n for r in rasters:\n # Grab information from the filename\n f_pieces = r.split('.')\n component = f_pieces[-2] # Real or imaginary component\n data_abbr = f_pieces[-3] # Key to the data name\n dname = self.dname_map[data_abbr] # Data type in db\n\n # For the data type\n meta['type'] = 'insar ' + dname.split(' ')[0]\n\n if dname == 'interferogram':\n meta['type'] += (' ' + component)\n\n # Assign the date for the respective flights\n if 'amplitude' in dname:\n meta['date'] = desc['start time of acquisition for pass {}'.format(\n dname.split(' ')[-1])]['value']\n\n # Derived products always receive the date of the last overpass\n else:\n meta['date'] = desc['start time of acquisition for pass 2']['value']\n\n # Assign only the date not the date and time\n meta['date'] = meta['date'].date()\n\n # Assign units\n meta['units'] = desc['{} units'.format(\n dname.split(' ')[0])]['value']\n\n # Flexibly form a comment for each of the products for dates\n comment = get_InSar_flight_comment(dname, desc)\n # add which dem was used which dictates the file name convert e.g.\n # ...VV_01.int.grd\n comment += ', DEM used = {}'.format(\n desc['dem used in processing']['value'])\n # Add the polarization to the the comments\n comment += ', Polarization = {}'.format(\n desc['polarization']['value'])\n meta['description'] = comment\n\n self.log.info('Uploading {} as {}...'.format(r, meta['type']))\n\n d = self.UploaderClass(r, **meta)\n\n # Submit the data to the database\n d.submit(self.session)\n\n # Uploaded set\n self.uploaded += 1", "def editReport(self, finalReport):\n report = self.mergeReport()\n for f in report.getAllFiles():\n f['outputModule'] = self.moduleName\n f['module_label'] = self.moduleName\n f['inputpfns'] = []\n f['inputs'] = self.inputFiles()\n finalReport.addOutputFile(self.moduleName, f)", "def copy_output_to_archive(wcl, jobfiles, fileinfo, level, task_label, exitcode):\n # fileinfo[filename] = {filename, fullname, sectname}\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n putinfo = {}\n\n\n # check each output file definition to see if should save file\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"Checking for save_file_archive\")\n\n for (filename, fdict) in fileinfo.items():\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"filename %s, fdict=%s\" % (filename, fdict))\n (filename, compression) = miscutils.parse_fullname(fdict['fullname'],\n miscutils.CU_PARSE_FILENAME|miscutils.CU_PARSE_COMPRESSION)\n\n putinfo[filename] = {'src': fdict['fullname'],\n 'compression': compression,\n 'filename': filename,\n 'filetype': fdict['filetype'],\n 'filesave': fdict['filesave'],\n 'filecompress': fdict['filecompress'],\n 'path': fdict['path']}\n\n transfer_job_to_archives(wcl, jobfiles, putinfo, level, task_label, exitcode)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")", "def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True", "def update_info(job_path,\n vals,\n param_names,\n prop_name):\n\n with open(job_path, \"r\") as f:\n info = json.load(f)\n\n real_names = []\n real_vals = []\n\n for param_name, val in zip(param_names, vals):\n if param_name.startswith(\"log_\"):\n # if anything starts with \"log_\" (e.g. \"log_schnet_dropout\"),\n # exponentiate its value to get the actual number\n real_names.append(param_name.replace(\"log_\", \"\"))\n real_vals.append(np.exp(val))\n else:\n real_names.append(param_name)\n real_vals.append(val)\n\n # update values\n for param_type, val in zip(real_names, real_vals):\n if 'dropout' in param_type:\n update_dropout(info=info,\n dropout=val,\n dropout_type=param_type,\n prop_name=prop_name)\n\n elif param_type == \"num_heads\":\n update_heads(info=info,\n heads=val)\n\n elif param_type == \"attention_type\":\n info[\"model_params\"][\"boltzmann_dict\"][\"type\"] = val\n\n else:\n if param_type not in info[\"model_params\"]:\n msg = (f\"Warning: assuming that {param_type} \"\n \"is just a key in `model_params`, but \"\n \"it is not currently in `model_params` in \"\n \"the config file. If it should be in a \"\n \"different location then you will need \"\n \"to write a custom function for updating \"\n \"it.\")\n\n fprint(msg)\n\n update_general(info, key=param_type, val=val)\n\n # save\n with open(job_path, \"w\") as f:\n json.dump(info, f, indent=4, sort_keys=True)", "def __setattr__(self, name, value):\n\n if name in self.submission:\n raise ValueError(\"Can't override job submission data: \"\n \"{data}\".format(data=self.submission[name]))\n\n else:\n super(SubmittedJob, self).__setattr__(name, value)", "def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass", "def _update_extra_metadata(self, extra_metadata):\n self._add_filename_metadata(extra_metadata)\n self._derive_extra_metadata(extra_metadata)\n \n if type(self) == SAFESentinel3:\n self._extract_metadata_from_zipfile(extra_metadata)", "def trigger_batch_job(parent_batch_id, job_input, job_params):\n job_name = job_params[\"jobName\"]\n job_modality = job_params[\"jobModality\"]\n\n batch_id = f\"{parent_batch_id}-{job_name}\"\n\n output_path = (\n f\"s3://{batch_processing_bucket_name}/batch_manifests/{job_modality}/{batch_id}/output\"\n )\n\n # If a label category file wasn't provided as API input, use the previous\n # job's label category file.\n label_category_config_uri = job_input.label_category_s3_uri\n if \"labelCategoryConfigS3Uri\" in job_params:\n label_category_config_uri = job_params[\"labelCategoryConfigS3Uri\"]\n\n # batch_job_input_data = event[\"batch_job_input\"]\n labeling_job_request = construct_labeling_job_input(\n parent_batch_id=parent_batch_id,\n input_manifest_url=job_input.input_manifest_s3_uri,\n audit_label_attribute_name=job_input.label_attribute_name,\n label_category_config_uri=label_category_config_uri,\n job_params=job_params,\n output_path=output_path,\n )\n\n sagemaker.create_labeling_job(**labeling_job_request)\n s3_output_path = f\"{output_path}/{job_name}/manifests/output/output.manifest\"\n\n db.insert_job_level_metadata(\n parent_batch_id=parent_batch_id,\n batch_id=batch_id,\n batch_status=BatchStatus.WAIT_FOR_SMGT_RESPONSE,\n labeling_job_name=job_name,\n label_attribute_name=labeling_job_request[\"LabelAttributeName\"],\n label_category_s3_uri=labeling_job_request[\"LabelCategoryConfigS3Uri\"],\n job_input_s3_uri=labeling_job_request[\"InputConfig\"][\"DataSource\"][\"S3DataSource\"][\n \"ManifestS3Uri\"\n ],\n job_output_s3_uri=s3_output_path,\n )", "def main(self):\n self.jamf_url = self.env.get(\"JSS_URL\")\n self.jamf_user = self.env.get(\"API_USERNAME\")\n self.jamf_password = self.env.get(\"API_PASSWORD\")\n self.ea_script_path = self.env.get(\"ea_script_path\")\n self.ea_name = self.env.get(\"ea_name\")\n self.replace = self.env.get(\"replace_ea\")\n self.ea_data_type = self.env.get(\"ea_data_type\")\n self.ea_inventory_display = self.env.get(\"ea_inventory_display\")\n self.sleep = self.env.get(\"sleep\")\n # handle setting replace in overrides\n if not self.replace or self.replace == \"False\":\n self.replace = False\n\n # clear any pre-existing summary result\n if \"jamfextensionattributeuploader_summary_result\" in self.env:\n del self.env[\"jamfextensionattributeuploader_summary_result\"]\n ea_uploaded = False\n\n # handle files with a relative path\n if not self.ea_script_path.startswith(\"/\"):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(f\"ERROR: EA file {self.ea_script_path} not found\")\n\n # now start the process of uploading the object\n self.output(f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n\n # obtain the relevant credentials\n token, send_creds, _ = self.handle_classic_auth(\n self.jamf_url, self.jamf_user, self.jamf_password\n )\n\n # check for existing - requires obj_name\n obj_type = \"extension_attribute\"\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(\n self.jamf_url,\n obj_name,\n obj_type,\n enc_creds=send_creds,\n token=token,\n )\n\n if obj_id:\n self.output(\n \"Extension Attribute '{}' already exists: ID {}\".format(\n self.ea_name, obj_id\n )\n )\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\".format(\n self.replace\n ),\n verbose_level=1,\n )\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\",\n verbose_level=1,\n )\n return\n\n # upload the EA\n self.upload_ea(\n self.jamf_url,\n self.ea_name,\n self.ea_data_type,\n self.ea_inventory_display,\n self.ea_script_path,\n obj_id=obj_id,\n enc_creds=send_creds,\n token=token,\n )\n ea_uploaded = True\n\n # output the summary\n self.env[\"extension_attribute\"] = self.ea_name\n self.env[\"ea_uploaded\"] = ea_uploaded\n if ea_uploaded:\n self.env[\"jamfextensionattributeuploader_summary_result\"] = {\n \"summary_text\": (\n \"The following extension attributes were created or \"\n \"updated in Jamf Pro:\"\n ),\n \"report_fields\": [\"name\", \"path\"],\n \"data\": {\"name\": self.ea_name, \"path\": self.ea_script_path},\n }", "def _populate_pipeline_info(case_obj, case_data):\n if case_data.get(\"exe_ver\"):\n case_obj[\"pipeline_version\"] = case_data[\"exe_ver\"]", "def annotate(args):\n\n set_quiet(args.quiet)\n\n try:\n # first, load taxonomic_assignments\n tax_assign = MultiLineageDB.load(args.taxonomy_csv,\n keep_full_identifiers=args.keep_full_identifiers,\n keep_identifier_versions=args.keep_identifier_versions,\n force=args.force, lins=args.lins)\n\n except ValueError as exc:\n error(f\"ERROR: {str(exc)}\")\n sys.exit(-1)\n\n if not tax_assign:\n error(f'ERROR: No taxonomic assignments loaded from {\",\".join(args.taxonomy_csv)}. Exiting.')\n sys.exit(-1)\n\n # get csv from args\n input_csvs = tax_utils.collect_gather_csvs(args.gather_csv, from_file=args.from_file)\n\n # handle each gather csv separately\n for n, in_csv in enumerate(input_csvs):\n try:\n # Check for a column we can use to find lineage information:\n with FileInputCSV(in_csv) as r:\n header = r.fieldnames\n # check for empty file\n if not header:\n raise ValueError(f\"Cannot read from '{in_csv}'. Is file empty?\")\n\n # look for the column to match with taxonomic identifier\n id_col = None\n col_options = ['name', 'match_name', 'ident', 'accession']\n for colname in col_options:\n if colname in header:\n id_col = colname\n break\n\n if not id_col:\n raise ValueError(f\"Cannot find taxonomic identifier column in '{in_csv}'. Tried: {', '.join(col_options)}\")\n\n notify(f\"Starting annotation on '{in_csv}'. Using ID column: '{id_col}'\")\n\n # make output file for this input\n out_base = os.path.basename(in_csv.rsplit('.csv')[0])\n this_outfile, _ = make_outfile(out_base, \"annotate\", output_dir=args.output_dir)\n\n out_header = header + ['lineage']\n\n with FileOutputCSV(this_outfile) as out_fp:\n w = csv.DictWriter(out_fp, out_header)\n w.writeheader()\n\n n = 0\n n_missed = 0\n for n, row in enumerate(r):\n # find lineage and write annotated row\n taxres = AnnotateTaxResult(raw=row, id_col=id_col, lins=args.lins,\n keep_full_identifiers=args.keep_full_identifiers,\n keep_identifier_versions=args.keep_identifier_versions)\n taxres.get_match_lineage(tax_assignments=tax_assign, fail_on_missing_taxonomy=args.fail_on_missing_taxonomy)\n\n if taxres.missed_ident: # could not assign taxonomy\n n_missed+=1\n w.writerow(taxres.row_with_lineages())\n\n rows_annotated = (n+1) - n_missed\n if not rows_annotated:\n raise ValueError(f\"Could not annotate any rows from '{in_csv}'.\")\n else:\n notify(f\"Annotated {rows_annotated} of {n+1} total rows from '{in_csv}'.\")\n\n except ValueError as exc:\n if args.force:\n notify(str(exc))\n notify('--force is set. Attempting to continue to next file.')\n else:\n error(f\"ERROR: {str(exc)}\")\n sys.exit(-1)", "def setup_file_data(cls, sess, submission_id):\n ready = JOB_STATUS_DICT['ready']\n csv_validation = JOB_TYPE_DICT['csv_record_validation']\n\n job = FileTests.insert_job(\n sess,\n filetype=FILE_TYPE_DICT['award'],\n status=ready,\n type_id=csv_validation,\n submission=submission_id\n )\n # everything is fine\n FileTests.insert_file(sess, job.job_id, FILE_STATUS_DICT['complete'])\n\n job = FileTests.insert_job(\n sess,\n filetype=FILE_TYPE_DICT['award_financial'],\n status=ready,\n type_id=csv_validation,\n submission=submission_id\n )\n # bad header\n FileTests.insert_file(sess, job.job_id, FILE_STATUS_DICT['unknown_error'])\n\n job = FileTests.insert_job(\n sess,\n filetype=FILE_TYPE_DICT['appropriations'],\n status=ready,\n type_id=csv_validation,\n submission=submission_id\n )\n # validation level errors\n FileTests.insert_file(sess, job.job_id, FILE_STATUS_DICT['complete'])\n cls.insert_row_level_error(sess, job.job_id)", "def make_svm_input_file(input_filename, output_custom_pars_file='custom_svm_params.json', clobber=False,\n log_level=logutil.logging.INFO):\n log.setLevel(log_level)\n if not clobber:\n if os.path.exists(output_custom_pars_file):\n msg = \"A file named '{}' already exists. Please choose a unique name for the custom SVM parameter file.\".format(output_custom_pars_file)\n log.critical(msg)\n sys.exit()\n # Define trailer file (log file) that will contain the log entries for all processing\n if isinstance(input_filename, str): # input file is a poller file -- easy case\n logname = input_filename.replace('.out', '_svm_partam_gen.log')\n\n else:\n logname = 'svm_param_gen.log'\n\n # Initialize total trailer filename as temp logname\n logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)\n # start processing\n starting_dt = datetime.datetime.now()\n log.info(\"Run start time: {}\".format(str(starting_dt)))\n\n try:\n # Parse the poller file and generate the the obs_info_dict, as well as the total detection\n # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects\n # A poller file contains visit data for a single instrument. The TotalProduct discriminant\n # is the detector. A TotalProduct object is comprised of FilterProducts and ExposureProducts\n # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct\n # is the atomic exposure data.\n log.info(\"Parse the poller and determine what exposures need to be combined into separate products.\\n\")\n obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(input_filename, log_level)\n\n # Update all of the product objects with their associated configuration information.\n for total_item in total_obj_list:\n log.info(\"Preparing configuration parameter values for total product {}\".format(total_item.drizzle_filename))\n total_item.configobj_pars = config_utils.HapConfig(total_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n for filter_item in total_item.fdp_list:\n log.info(\"Preparing configuration parameter values for filter product {}\".format(filter_item.drizzle_filename))\n filter_item.configobj_pars = config_utils.HapConfig(filter_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n update_ci_values(filter_item, output_custom_pars_file, log_level)\n\n for expo_item in total_item.edp_list:\n log.info(\"Preparing configuration parameter values for exposure product {}\".format(expo_item.drizzle_filename))\n expo_item.configobj_pars = config_utils.HapConfig(expo_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n # Housekeeping: remove those pesky renamed copies of the input flc.fits/flt.fits files\n # generated by drizzlepac.haputils.product()\n if expo_item.drizzle_filename.endswith(\"_drc.fits\"):\n file_to_remove = expo_item.drizzle_filename.replace(\"_drc.fits\", \"_flc.fits\")\n if expo_item.drizzle_filename.endswith(\"_drz.fits\"):\n file_to_remove = expo_item.drizzle_filename.replace(\"_drz.fits\", \"_flt.fits\")\n if os.path.exists(file_to_remove):\n os.remove(file_to_remove)\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)\n err_msg = \"Something went wrong!\"\n log.error(err_msg)\n raise Exception(err_msg)", "def _StageMetadata(json_metadata, storage_service, staged_file: str):\n # Write computed metadata to object storage.\n temp_run_dir = temp_dir.GetRunDirPath()\n local_file = os.path.join(temp_run_dir, os.path.basename(staged_file))\n with open(local_file, 'w') as f:\n json.dump(json_metadata, f)\n storage_service.Copy(local_file, staged_file)", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)" ]
[ "0.54600435", "0.5365229", "0.49939448", "0.4906341", "0.48872775", "0.48601264", "0.48544395", "0.48313162", "0.48072532", "0.47882256", "0.4777496", "0.47465393", "0.47256604", "0.46529025", "0.4646404", "0.46430737", "0.46245492", "0.4616829", "0.45886663", "0.4572362", "0.45551187", "0.45464298", "0.45442006", "0.45406884", "0.4523129", "0.4521584", "0.451238", "0.45089817", "0.45071065", "0.44974235" ]
0.61063117
0
Grabs a status message from the JobWriter that indicates which pipeline tools have settings enabled for the current scene.
def RetrievePipelineToolStatus( raiseOnExitCode=False ): global submissionInfo scenePath = NodegraphAPI.GetSourceFile() jobWriterPath = os.path.join(submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py") argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--status", "--scene-path", scenePath] statusMessage = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=raiseOnExitCode) return statusMessage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tools_state(self):\n\t\treturn Job(SDK.PrlVm_GetToolsState(self.handle)[0])", "def status(self):\n return STATUSES.get(self._mower_status, {}).get('message', self._mower_status)", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def status():\n with spinner():\n is_enabled = is_witness_enabled()\n signing_key = current_signing_key()\n misses = total_missed()\n\n t = PrettyTable([\"Enabled\", \"Misses\", \"Key\"])\n t.align = \"l\"\n t.add_row([is_enabled, misses, signing_key])\n\n output(t, 'Status')\n output(get_config(), 'Configuration')", "def get_status(self):\n return self.msg", "def status(self) -> str:\n return self._check_job_status()", "def stage_status(self) -> str:\n return pulumi.get(self, \"stage_status\")", "def pipeline_status_path(self):\n return '/_ah/pipeline/status?root=%s&auto=false' % self.root_pipeline_id", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")" ]
[ "0.5826462", "0.5679577", "0.56522906", "0.5546779", "0.55216604", "0.5513174", "0.54827136", "0.5467698", "0.5463971", "0.5463971", "0.5463971", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974" ]
0.7234952
0
Modifies the Pipeline Tool status label UI element with the supplied message
def UpdatePipelineToolStatusLabel( gui, statusMessage ): gui.pipelineToolStatusLabel.setText( statusMessage )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_status(self, msg):\n self.status_lbl.config(text=msg)", "def status_display(self, message, level=0, field=0):\n #print(message)\n self.statusbar_txt.set(message)", "def updateStatus(self, message):\r\n self.statusBar().showMessage(message, 5000)\r\n if self.kinfilename is not None:\r\n self.setWindowTitle(\"Visualization Tool - %s\" % \\\r\n os.path.basename(unicode(self.kinfilename)))", "def setStatus(self, message):\n self.statusBar().showMessage(self.tr(message))", "def SetStatusMessage(self, msg):\n if self._status_msg_fn:\n self._status_msg_fn(msg)\n else:\n tf.logging.info('Status: %s', msg)", "def update_status(self, s):\n self.statusbar.showMessage(s)", "def set_status_text(self, value):\n self.status_bar.SetStatusText(value)", "def set_status(self, string):\n self.statusbar.showMessage(string)\n return", "def showStatus(self, message):\n self.status_bar.showMessage(message)", "def setStatus(self,text:str) -> None:\n self.logger.info(text)\n self.appendReport(text)\n self.status.config(text=text)\n self.root.update_idletasks()", "async def update_status_message(self):\n embed, components = self.get_status_embed_and_components()\n await self.client.message_edit(self.status_message, embed = embed, components = components)", "def set_status_message(self, message):\n\n # Nagios considers a pipe (|) a split from STATUS MESSAGE and perf\n # data. If we replace it with a space, that should safely render the\n # message safe without risking making it unreadable.\n\n try:\n assert message is not None\n self.__exit_message = message.replace('|', ' ')\n except (AttributeError, AssertionError):\n self.unknown_error(\"Status message must be a standard string!\")", "def _update_status(self, message):\n message = \"[{}] {}\".format(strftime(\"%H:%M:%S\", localtime()), message)\n self.tracker.write_log(message)\n self.ui.update_status(message)", "def StatusUpdate(msg):\r\n if verbosity > 0:\r\n print msg", "def display_message(self, message):\n context_id = self.status_bar.get_context_id(\"\")\n self.status_bar.show()\n self.status_bar.push(context_id, message)", "def set_statbar_text(self, msg):\n self.oPB_statBar.showMessage(msg.replace(\"<br>\", \" \").strip(), 0)", "def updateStatusBar(self, message):\n #check current status. Precedence is reset > error < warning\n if \"reset\" in message.topic:\n self.sb.SetBackgroundColour((255, 255, 255, 255))\n self.SetStatusText(\"\")\n elif \"warning\" in message.topic:\n self.sb.SetBackgroundColour('yellow')\n self.SetStatusText(\"Warnings generated. Check Log\")\n elif \"error\" in message.topic:\n self.sb.SetBackgroundColour('Red')\n self.SetStatusText(\"Error encountered. Check Log\")\n print message.data\n else:\n self.sb.SetBackgroundColour((255, 255, 255, 255))\n self.SetStatusText(message.data)", "def set_status(self, msg):\n if self.msg[:5] != \"ERROR\":\n self.msg = msg\n else:\n if msg[:5] == \"ERROR\":\n self.msg = \"\\n\" + msg", "def StatusUpdate(msg):\n if verbosity > 0:\n print msg", "def SetStatusMessageFn(self, fn):\n self._status_msg_fn = fn", "def status_msg(self, msg):\n\n\t\tprint(\"function not supported yet\")", "def set_label(self, message, color):\n self.label.prev_str = self.label_var.get()\n self.label.prev_color = self.label.configure()[\"background\"][4]\n self.label_var.set(message)\n self.label.configure(bg=color)\n self.update()", "def set_status(self):\r\n string = \"%9.3f%s/%9.3f%s\"\r\n unit1 = unit2 = \"b\"\r\n used = self.usedBytes.get()\r\n total = self.totalBytes.get()\r\n if used > total:\r\n self.label.config(fg=\"red\")\r\n else:\r\n self.label.config(fg=\"black\")\r\n if used > 999999:\r\n unit1 = \"Mb\"\r\n used /= 1000000.0\r\n elif used > 999:\r\n unit1 = \"Kb\"\r\n used /= 1000.0\r\n if total > 999999:\r\n unit2 = \"Mb\"\r\n total /= 1000000.0\r\n elif total > 999:\r\n unit2 = \"Kb\"\r\n total /= 1000.0\r\n self.textStatus.set(string % (used, unit1, total, unit2))", "def actualizeHardwarelabel (self, data):\n if data.has_key(StatusMsg.label_state):\n self.hardwarelabel.setText(self.stateDecoder.get(data.get(StatusMsg.label_state)))", "def acutalizeActionlabel (self, data):\n if data.has_key(StatusMsg.label_action):\n self.actionlabel.setText(self.actionDecoder.get(data.get(StatusMsg.label_action)))", "def updateStatusBar(self, statusString):\n self.window().statusBar().showMessage(statusString)", "def show_status(self, status):\n self.statusBar().showMessage(status, 2000)", "def statusbar_msg(self, msg):\n self.statusbar.clearMessage()\n self.statusbar.showMessage(msg)", "def status_msg(string, status):\r\n if status:\r\n print string.ljust(74) + '[OK]'\r\n else:\r\n print string.ljust(70) + '[FAILED]'", "def err_message(self, message):\n self.errors.append(1)\n message = \"<b>\" + message + \"</b>\"\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n # Show if is was hidden\n if self.hidden:\n self.toggle()\n self.was_hidden = True\n self.left_label.set_markup(message)" ]
[ "0.79712987", "0.7420736", "0.7228758", "0.7161831", "0.70710754", "0.69951653", "0.6985524", "0.6982464", "0.6789776", "0.6676902", "0.6615886", "0.6576708", "0.6549624", "0.653623", "0.6525615", "0.648638", "0.6450446", "0.64447117", "0.6439653", "0.6434152", "0.6397675", "0.6365663", "0.628844", "0.62730116", "0.62332284", "0.6228081", "0.61916214", "0.6157518", "0.61442596", "0.61303246" ]
0.8840854
0
Generic error handling when the a pipeline tools script run via deadline command returns a nonzero exit code. Generates a technical error message for a given subprocess.CalledProcessError instance and displays it in the Katana console. Similarly, a humanreadable error message is presented to the user in a modal dialog. The technical error message contains the full commandline arguments, exit code, and standard output from the called process. Returns a userfriendly error message that can be presented to the user in the pipeline tools status label
def HandlePipelineToolsCalledProcessError( exc ): errorMsg = StringIO() errorMsg.write( "Pipeline Tools encountered an error - the command:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.cmd ) errorMsg.write( os.linesep * 2 ) errorMsg.write( "return a non-zero (%d) exit code" % exc.returncode ) if exc.output: errorMsg.write( " and the following output:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.output ) errorMsg = errorMsg.getvalue() # On Windows, print statements output to the console window that is created minimized when Katana launches print( errorMsg ) # Display a human-readable generic error message ShowModalDialog( "Pipeline Tools Error", "Pipeline Tools encountered an error. Check the Katana console for more detailed information." ) return "Pipeline Tools Error"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_build_error(error):\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (\n ' '.join(error.argv), str(error.error_code)))", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def error(text, exitcode=1):\n\n # If we get passed something like an Exception, we can convert\n # it down to a string.\n text = str(text)\n\n # If the message starts with whitespace, assume that it should come\n # *before* the command-name prefix.\n text_nows = text.lstrip()\n ws = text[:len(text) - len(text_nows)]\n\n # This has to be a constant value as we can't reliably get our actual\n # program name on all platforms.\n emsg(ws + \"pkgfmt: \" + text_nows)\n\n if exitcode != None:\n sys.exit(exitcode)", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def display_error(message, raise_exception = True):\r\n print \"Error:\", message\r\n print\r\n if raise_exception:\r\n raise ExternalCommandFailed\r\n else:\r\n sys.exit(1)", "def error_exit(self, msg):\n wrappedmsg = textwrap.fill(msg, 78)\n fullmsg = \"%s\\n%s\" % (wrappedmsg, self.get_usage_command())\n raise SBToolError(fullmsg, True)", "def error_exit():\n print(\"Invalid arguments!\")\n print(\"Type -h to get help.\")\n exit(0)", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def error_to_text(ex):\n\tif isinstance(ex, FailedProcessError) and ex.args[0] == 'youtube-dl' and ex.exitcode == 1:\n\t\treturn 'Download error: {}'.format(ex.stderr)\n\treturn \"Internal error {}: {}\".format(type(ex).__name__, ex)", "def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)", "def vpython_error_message():\n error_message = (\n \"<p>&#9888; Sorry, spacesimmer! OrbitX has crashed for \"\n \"some reason.</p>\"\n\n \"<p>Any information that OrbitX has on the crash has \"\n \"been saved to a logfile. If you want to get this problem fixed,\"\n \" send the contents of the log file \"\n \"<blockquote>\" +\n logs.logfile_name.replace('\\\\', '\\\\\\\\') +\n \"</blockquote> \"\n \"to Patrick Melanson along with a description of what was \"\n \"happening in the program when it crashed.</p>\"\n\n \"<p>Again, thank you for using OrbitX!</p>\"\n )\n vpython.canvas.get_selected().append_to_caption(f\"\"\"<script>\n if (document.querySelector('div.error') == null) {{\n error_div = document.createElement('div');\n error_div.className = 'error';\n error_div.innerHTML = \"{error_message}\";\n document.querySelector('body').prepend(error_div);\n }}\n </script>\"\"\")\n vpython.canvas.get_selected().append_to_caption(\"\"\"<style>\n .error {\n color: #D8000C !important;\n background-color: #FFBABA;\n margin: 10px 0;\n padding: 10px;\n border-radius: 5px 5px 5px 5px;\n width: 700px;\n }\n span.code {\n color: #D8000C !important;\n font-family: monospace;\n }\n blockquote {\n font-family: monospace;\n }\n </style>\"\"\")\n\n time.sleep(0.1) # Let vpython send out this update", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'", "def CallDeadlineCommand(arguments, hideWindow=True, useArgFile=False, useDeadlineBg=False, raiseOnExitCode=False):\n deadlineCommand = GetDeadlineCommand( useDeadlineBg )\n tmpdir = None\n\n if useArgFile or useDeadlineBg:\n tmpdir = tempfile.mkdtemp()\n\n if useDeadlineBg:\n arguments = [ \"-outputfiles\", os.path.join( tmpdir, \"dlout.txt\" ), os.path.join( tmpdir, \"dlexit.txt\" ) ] + arguments\n\n startupinfo = None\n creationflags = 0\n\n if os.name == 'nt':\n if hideWindow:\n # Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both.\n if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ):\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ):\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n else:\n # still show top-level windows, but don't show a console window\n CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag\n creationflags = CREATE_NO_WINDOW\n\n if useArgFile:\n arguments = [ CreateArgFile( arguments, tmpdir ) ]\n\n arguments.insert( 0, deadlineCommand )\n\n # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.\n proc = subprocess.Popen( arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags )\n output, errors = proc.communicate()\n\n if raiseOnExitCode and proc.returncode != 0:\n try:\n # The quote function was moved to shutil in python 3\n from shutil import quote as shell_quote\n except ImportError:\n # In python 2, quote lived in the pipes module\n from pipes import quote as shell_quote\n cmd = ' '.join([shell_quote(arg) for arg in arguments])\n raise subprocess.CalledProcessError(proc.returncode, cmd, output)\n\n if useDeadlineBg:\n with io.open( os.path.join( tmpdir, \"dlout.txt\" ), 'r', encoding='utf-8' ) as fileHandle:\n output = fileHandle.read()\n\n if tmpdir:\n try:\n shutil.rmtree( tmpdir )\n except:\n print( 'Failed to remove temp directory: \"%s\"' % tmpdir )\n\n return output.strip()", "def called_process_error2exit_decorator(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except subprocess.CalledProcessError as e:\n print(\"{err}:\\n{msg}\".format(err=str(e), msg=e.output))\n sys.exit(1)\n return func_wrapper", "def error(msg):\n click.secho(f'[ERROR] {msg}', fg='red')", "def shell_error(msg, exitcode=1):\n print(msg, file=sys.stderr)\n exit(exitcode)", "def error(code, message):\n sys.stderr.write(message)\n sys.exit(code)", "def error_msg(msg: str) -> None:\n print(\"ERROR: \", msg)\n exit(2)", "def error(text, exitcode=1):\n\n print(\"pkgmogrify: {0}\".format(text), file=sys.stderr)\n if exitcode != None:\n sys.exit(exitcode)", "def ConsoleExit(self, errorcode=200):\n pass", "def error(message, code=None):\n print_error(message)\n sys.exit(code or 1)", "def error(message):\n print message\n sys.exit(2)", "def repr_failure(self, excinfo):\n if isinstance(excinfo.value, NbCellError):\n msg_items = [bcolors.FAIL + \"Notebook cell execution failed\" + bcolors.ENDC]\n formatstring = bcolors.OKBLUE + \"Cell %d: %s\\n\\n\" + \\\n \"Input:\\n\" + bcolors.ENDC + \"%s\\n\\n\" + \\\n bcolors.OKBLUE + \"Traceback:%s\" + bcolors.ENDC\n msg_items.append(formatstring % excinfo.value.args)\n return \"\\n\".join(msg_items)\n else:\n return \"pytest plugin exception: %s\" % str(excinfo.value)", "def fail(msg, exit_code=1):\n sys.stderr.write(\"{}\\n\".format(msg))\n sys.exit(exit_code)", "def ErrorExit(text, exit_code=1):\n\n if exit_code is 0:\n raise InvalidValueError(\"exit code\", exit_code, \"Exit code of 0 means program ran successfully. Always pass ErrorExit a non-zero exit code.\")\n\n print('\\nERROR: {}'.format(text))\n print(' The execution of mbmlpg.py stopped')\n sys.exit(exit_code)", "def print_fail(msg, exc=None, retcode=1):\n\n print('\\n{}'.format(msg))\n if exc:\n print('\\n{}'.format(str(exc)))\n sys.exit(retcode)", "def finalize_error():\n print('')\n exit(-1)", "def fatal_error(parent, my_message):\n\n # Remove /tmp/.setup-running\n path = \"/tmp/.setup-running\"\n if os.path.exists(path):\n os.remove(path)\n\n # multiprocessing.active_children()\n\n error(parent, my_message)\n sys.exit(1)" ]
[ "0.5881938", "0.5847716", "0.5779613", "0.5747992", "0.5723428", "0.5697356", "0.56824833", "0.5620372", "0.55943125", "0.5581835", "0.5550394", "0.55393744", "0.5523604", "0.55169374", "0.551156", "0.54878414", "0.54639775", "0.54484504", "0.54443103", "0.5437845", "0.5429484", "0.54240525", "0.5367347", "0.5350659", "0.5344054", "0.53423834", "0.5341556", "0.5329471", "0.53187287", "0.53099465" ]
0.75752896
0
Opens the a dialog for viewing and modifying the job's pipeline tool settings. The dialog is launched in a deadline command subprocess. All settings are maintained by the JobWriter using a combination of the application name and the scene path.
def OpenIntegrationWindow( raiseOnExitCode=False ): global submissionInfo integrationPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "IntegrationUIStandAlone.py" ) scenePath = NodegraphAPI.GetSourceFile() if not scenePath: raise SceneNotSavedError() argArray = ["-ExecuteScript", integrationPath, "-v", "2", "-d", "Katana", "Draft", "Shotgun", "FTrack", "--path", scenePath] try: pipelineToolStatus = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=True) except subprocess.CalledProcessError as e: pipelineToolStatus = HandlePipelineToolsCalledProcessError( e ) return pipelineToolStatus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()", "def create_job(self):\n job = Job()\n process = Process()\n process.process_graph = {\"load_collection1\": {\"process_id\": \"load_collection\", \"arguments\": {}}}\n\n job.process = process\n\n self.dlg = JobAdaptDialog(iface=self.iface, job=job, backend=self.backend, main_dia=self)\n self.dlg.manualButton.setIcon(QIcon(os.path.join(os.path.dirname(__file__),\n 'images/info_icon.png')))\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.dlg.show()", "def optionsWindow():\n\t# create the main interface\n\tif cmds.window(kSetupOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kSetupOptionsWindow)\n\tmainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new twist joints\n\tif_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')\n\tif_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)\n\t\n\t# position the input fields for the twist joints\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])\n\t\n\t# build the section to get information for the hip constraint\n\tconstraintFrame = eval('cmds.frameLayout(collapsable=True, label=\"Hip Constraint Options:\" %s)'%amui.__frameAlignCenter__)\n\tconstraintForm = cmds.formLayout(nd=100)\n\t\n\t# attempt to guess what the pelvis is if there is a selection when the GUI is created\n\tpelvisText = 'CenterRoot'\n\tsel = cmds.ls(sl=True, l=True, type='transform')\n\tif sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list\n\t\ttry:\n\t\t\thip = cmds.listRelatives(sel[0], p=True, f=True) # just use the first knee in the selection\n\t\t\tpelvis = cmds.listRelatives(hip[0], p=True, f=True)\n\t\t\tpelvisText = pelvis[0]\n\t\texcept: pass\n\t\t\n\tif_pelvis = cmds.textFieldGrp(label='Pelvis Object:', tx=pelvisText)\n\tif_hipAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Hip Aim Axis:')\n\tif_hipFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Hip Front Axis:')\n\tif_pelvisAimAxis = cmds.floatFieldGrp(v1=0, v2=1, v3=0, nf=3, pre=4, label='Pelvis Aim Axis:')\n\tif_pelvisFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Pelvis Front Axis:')\n\t\n\t# position the input fields for the hip constraint\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvis, 'left', 30), (if_pelvis, 'top', 5)], attachNone=[(if_pelvis, 'right'), (if_pelvis, 'bottom')])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipAimAxis, 'left', 30)], attachNone=[(if_hipAimAxis, 'right'), (if_hipAimAxis, 'bottom')], attachControl=[(if_hipAimAxis, 'top', 5, if_pelvis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipFrontAxis, 'left', 30)], attachNone=[(if_hipFrontAxis, 'right'), (if_hipFrontAxis, 'bottom')], attachControl=[(if_hipFrontAxis, 'top', 5, if_hipAimAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisAimAxis, 'left', 30)], attachNone=[(if_pelvisAimAxis, 'right'), (if_pelvisAimAxis, 'bottom')], attachControl=[(if_pelvisAimAxis, 'top', 5, if_hipFrontAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisFrontAxis, 'left', 30)], attachNone=[(if_pelvisFrontAxis, 'right'), (if_pelvisFrontAxis, 'bottom')], attachControl=[(if_pelvisFrontAxis, 'top', 5, if_pelvisAimAxis)])\n\t\n\tcmds.setParent('..') # go up to constraintForm\n\tcmds.setParent('..') # go up to mainForm\n\t\n\t# position the frame for the hip constraint\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create='amTools.rigging.hipSetup.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(\n\t\tif_suffixName, \n\t\tif_numberTwistJoints, \n\t\tif_pelvis, \n\t\tif_hipAimAxis, \n\t\tif_hipFrontAxis, \n\t\tif_pelvisAimAxis, \n\t\tif_pelvisFrontAxis)\n\tutils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)", "def openTB4Settings(self):\n self.TB4_Window = QtWidgets.QDialog()\n self.TB4_ui = Ui_robotFourConfig()\n self.TB4_ui.setupUi(self.TB4_Window)\n self.TB4_Window.show()", "def openTB1Settings(self):\n self.TB1_Window = QtWidgets.QDialog()\n self.TB1_ui = Ui_robotOneConfig()\n self.TB1_ui.setupUi(self.TB1_Window)\n self.TB1_Window.show()", "def openTB3Settings(self):\n self.TB3_Window = QtWidgets.QDialog()\n self.TB3_ui = Ui_robotThreeConfig()\n self.TB3_ui.setupUi(self.TB3_Window)\n self.TB3_Window.show()", "def job_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#job-management\")\n except:\n pass", "def openRocConfig(self):\n self.rocConfig_Window = QtWidgets.QDialog()\n self.rocConfig_ui = Ui_rocConfigure()\n self.rocConfig_ui.setupUi(self.rocConfig_Window)\n self.rocConfig_Window.show()", "def open_settings(self, event):\n settings_dialog = cfg.SettingsDialog(parent=self, exclude=['window'])\n res = settings_dialog.ShowModal()\n if res == wx.ID_OK:\n # Reload relevant parts of app\n restart_monitor_timer = False\n restart_gui_timer = False\n reload_correlations = False\n reload_logger = False\n reload_graph = False\n\n for setting in settings_dialog.changed_settings:\n # If any 'monitor.' settings except 'monitor.divergence_threshold have changed then restart\n # monitoring timer with new settings.\n # If 'monitor.interval has changed then restart gui timer.\n # If 'monitor.monitoring_threshold' has changed, then refresh correlation data.\n # If any 'logging.' settings have changed, then reload logger config.\n if setting.startswith('monitor.') and setting != 'monitor.divergence_threshold':\n restart_monitor_timer = True\n if setting == 'monitor.interval':\n restart_gui_timer = True\n if setting == 'monitor.monitoring_threshold':\n reload_correlations = True\n if setting.startswith('logging.'):\n reload_logger = True\n if setting.startswith('monitor.calculations'):\n reload_graph = True\n\n # Now perform the actions\n if restart_monitor_timer:\n self.__log.info(\"Settings updated. Reloading monitoring timer.\")\n self.__cor.stop_monitor()\n\n # Build calculation params and start monitor\n calculation_params = [self.__config.get('monitor.calculations.long'),\n self.__config.get('monitor.calculations.medium'),\n self.__config.get('monitor.calculations.short')]\n\n self.__cor.start_monitor(interval=self.__config.get('monitor.interval'),\n calculation_params=calculation_params,\n cache_time=self.__config.get('monitor.tick_cache_time'),\n autosave=self.__config.get('monitor.autosave'),\n filename=self.__opened_filename)\n\n if restart_gui_timer:\n self.__log.info(\"Settings updated. Restarting gui timer.\")\n self.timer.Stop()\n self.timer.Start(self.__config.get('monitor.interval') * 1000)\n\n if reload_correlations:\n self.__log.info(\"Settings updated. Updating monitoring threshold and reloading grid.\")\n self.__cor.monitoring_threshold = self.__config.get(\"monitor.monitoring_threshold\")\n self.__refresh_grid()\n\n if reload_logger:\n self.__log.info(\"Settings updated. Reloading logger.\")\n log_config = cfg.Config().get('logging')\n logging.config.dictConfig(log_config)\n\n if reload_graph:\n self.__log.info(\"Settings updated. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])", "def openTB2Settings(self):\n self.TB2_Window = QtWidgets.QDialog()\n self.TB2_ui = Ui_robotTwoConfig()\n self.TB2_ui.setupUi(self.TB2_Window)\n self.TB2_Window.show()", "def __init__(self, parent, state, position = wx.DefaultPosition):\n ##Set up data.\n self.state = state\n modeName = MODE_LIST[self.state.GetSurface(\"Mode\")]\n wx.Dialog.__init__(self, parent, -1, \"%s Mode Settings\" %(modeName),\n pos = position,\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | \n wx.MINIMIZE_BOX |\n wx.MAXIMIZE_BOX)\n | wx.TAB_TRAVERSAL)\n ##Jconf pull-down menu.\n \n self.lblStBox1 = wx.StaticBox(self, -1, \"Programs to launch\" )\n ##Name Server checkbox.\n self.cbNameServer = wx.CheckBox(self, -1, \"Name Server\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbNameServer.SetToolTip(wx.ToolTip(\"Run Name Server at Launch\"))\n ##Conductor checkbox.\n self.cbConductor = wx.CheckBox(self, -1, \"Conductor\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbConductor.SetToolTip(wx.ToolTip(\"Run Conductor at Launch\"))\n ##Xplorer checkbox.\n self.cbXplorer = wx.CheckBox(self, -1, \"Xplorer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbXplorer.SetToolTip(wx.ToolTip(\"Run Xplorer at Launch\"))\n ##Desktop checkbox.\n self.cbDesktop = wx.CheckBox(self, -1, \"Desktop Mode\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbDesktop.SetToolTip(wx.ToolTip(\"Set Desktop Mode for\" +\n \" Conductor and Xplorer\"))\n \n self.lblStBox2 = wx.StaticBox(self, -1, \"Xplorer Configuration\" )\n ##Xplorer Type radio box.\n self.rbXplorer = wx.RadioBox(self, -1, \"Mode\",\n wx.DefaultPosition, wx.DefaultSize,\n RADIO_XPLORER_LIST, 1, wx.RA_SPECIFY_ROWS)\n self.rbXplorer.SetToolTip(wx.ToolTip(\"Which Xplorer format do you\" +\n \" want to launch?\"))\n ##Cluster button.\n self.bCluster = wx.Button(self, -1, \"Cluster Settings\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCluster.SetToolTip(wx.ToolTip(\"Set the computers and extra\" +\n \" variables in the cluster.\"))\n ##Configuration Choice\n self.chJconf = wx.Choice(self, -1, wx.DefaultPosition, [150,-1])\n self.chJconf.SetToolTip(wx.ToolTip(\"Choose Xplorer's configuration.\"))\n ##Edit Jconf button.\n self.bEditJconf = wx.Button(self, -1, \"Edit Configuration List\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bEditJconf.SetToolTip(wx.ToolTip(\"Edit the list of Xplorer\" +\n \" configurations.\")) \n #OK and Cancel button\n if windows:\n self.bOk = wx.Button( self, wx.ID_OK, \"OK\", wx.DefaultPosition, wx.DefaultSize, 0 )\n else:\n self.bOk = wx.Button( self, wx.ID_SAVE, \"Save\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCancel = wx.Button( self, wx.ID_CANCEL, \"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n \n ##Bind events.\n self.Bind(wx.EVT_LISTBOX, self.Refresh, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.Refresh, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_LISTBOX, self.UpdateData, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.UpdateData, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n if windows:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_OK)\n else:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.EditJconf, self.bEditJconf)\n self.Bind(wx.EVT_BUTTON, self.EditCluster, self.bCluster)\n \n ##Set sizers.\n vSizerMain = wx.BoxSizer( wx.VERTICAL )\n vSizer1 = wx.BoxSizer( wx.VERTICAL )\n svSizer1 = wx.StaticBoxSizer( self.lblStBox1, wx.VERTICAL )\n svSizer1.Add( self.cbNameServer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n hSizer1 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer1.Add( self.cbConductor, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n spacer1 = wx.StaticText(self, -1, \" \", wx.DefaultPosition, wx.DefaultSize, 0 )\n hSizer1.Add( spacer1, 0, wx.ALIGN_CENTER, 5 )\n hSizer1.Add( self.cbDesktop, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer1.Add( hSizer1, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n svSizer1.Add( self.cbXplorer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n vSizer1.Add( svSizer1, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.TOP, 5 )\n spacer2 = wx.StaticText(self, -1, \"\", wx.DefaultPosition, [10,10], 0 )\n vSizer1.Add( spacer2, 0, wx.ALIGN_CENTER, 5 )\n svSizer2 = wx.StaticBoxSizer( self.lblStBox2, wx.VERTICAL )\n hSizer2 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer2.Add( self.rbXplorer, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer2.Add( self.bCluster, 0, wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.TOP, 5 )\n svSizer2.Add( hSizer2, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer3.Add( self.chJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer3.Add( self.bEditJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer2.Add( hSizer3, 0, wx.ALIGN_CENTER, 5 )\n vSizer1.Add( svSizer2, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n if windows:\n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n else: \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n vSizer1.Add( hSizer4, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP, 5 )\n vSizerMain.Add( vSizer1, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n \n vSizerMain.SetSizeHints(self)\n self.SetSizer(vSizerMain)\n #self.CenterOnParent(wx.BOTH)\n ##Set the background color.\n #Style(self)\n if not CLUSTER_ENABLED:\n self.bCluster.Hide()\n ##Set up OK button.\n ##Update Display\n self.React()", "def openGameTools(*args):\n pyqt.showDialog(gameTools)", "def createJobDialog(self):\n\n def validatename(text):\n if not hasattr(self.DB.meta,'peatsa_jobs'):\n return 1\n if text in self.DB.meta.peatsa_jobs:\n return -1\n else:\n return 1\n def close():\n jobdlg.destroy()\n def loadmuts(): \n filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),\n filetypes=[(\"All files\",\"*\")])\n if filename:\n mutlist.importfile(filename)\n return\n def loadmutsfromDB():\n for p in self.DB.getRecs(): \n mut = self.DB.get(p).Mutations\n if mut == None or mut=='':\n continue\n if type(mut) is types.StringType:\n mutlist.appendtext(mut+'\\n')\n else:\n mutstring = mut.getMutationString()\n if mutstring != None:\n mutlist.appendtext(mutstring+'\\n')\n return\n def getstruct():\n filename=tkFileDialog.askopenfilename(defaultextension='.pdb',\n initialdir=os.getcwd(),\n filetypes=[(\"pdb\",\"*.pdb\"),(\"All files\",\"*.*\")])\n pdbentry.setvalue(filename)\n return\n def getligand():\n self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',\n initialdir=os.getcwd(),\n filetypes=[(\"mol2\",\"*.mol2\"),(\"All files\",\"*.*\")])\n \n def submit():\n \n #if calcmenu.getcurselection() == 'both':\n # calcs = ['stability','binding']\n if calcmenu.getcurselection() == 'pka':\n calcs = ['scan']\n else:\n calcs = [calcmenu.getcurselection()]\n mutationlist = mutlist.getvalue().split('\\n')\n mutationlist.remove('')\n pdbfile=None; pdb = None\n quality = mutqualentry.getvalue()\n \n if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:\n tkMessageBox.showinfo('No ref protein',\n 'Set a reference (wt) protein first')\n return\n #if self.useref.get() == 1:\n #we use ref pdb by default now\n pdbfile = self.writetempPDB()\n pdbname = self.getrefPDBName()\n \n \n if len(mutationlist) == 0 or mutationlist==[u'']:\n print 'mutation list is empty'\n return\n if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:\n print 'job name already used'\n return\n name=nameentry.getvalue()\n expcol = expcolmenu.getcurselection()\n self.submitJob(name=name, pdbname=pdbname,\n pdb=pdb, pdbfile=pdbfile,\n ligandfile=self.ligandfile,\n mutations=mutationlist,\n calcs=calcs, mutationquality=quality,\n meta={'expcol':expcol,'pdbname':pdbname}) \n close()\n \n jobdlg = Toplevel()\n jobdlg.geometry('+220+220')\n jobdlg.title('Create Calculation')\n balloon = Pmw.Balloon(jobdlg)\n nameentry = Pmw.EntryField(jobdlg,\n labelpos = 'w',\n label_text = 'Name:',\n validate = validatename,\n value = 'mycalc')\n nameentry.pack(fill=BOTH,expand=1)\n balloon.bind(nameentry, 'Calculation name can be anything, but should be unique') \n expcols = ['']+self.DB.getSimpleFields()\n expcolmenu = Pmw.OptionMenu(jobdlg,\n labelpos = 'w',\n label_text = 'Exp. col:',\n items = expcols,\n initialitem = '',\n menubutton_width = 8) \n expcolmenu.pack(fill=BOTH,expand=1)\n balloon.bind(expcolmenu, 'Field with experimental data to compare, optional') \n calcmenu = Pmw.OptionMenu(jobdlg,\n labelpos = 'w',\n label_text = 'Calculation Type:',\n items = self.calctypes,\n initialitem = 'stability',\n menubutton_width = 8)\n calcmenu.pack(fill=X,expand=1)\n fr=Frame(jobdlg)\n fr.pack(fill=X,expand=1)\n mutqualentry = Pmw.EntryField(jobdlg,\n labelpos = 'w',\n label_text = 'Quality:',\n validate = validatename,\n value = '2.0')\n mutqualentry.pack(fill=BOTH,expand=1) \n Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)\n self.ligandfile=None\n mutlist = Pmw.ScrolledText(jobdlg,\n labelpos = 'n',\n label_text='Mutations:',\n usehullsize = 1,\n hull_width = 200,\n hull_height = 250,\n text_wrap='word') \n mutlist.pack(fill=BOTH,expand=1)\n Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1) \n Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)\n balloon.bind(mutlist, 'Enter one mutation per line in the form\\n A:0003:ALA or A3A')\n f=Frame(jobdlg); f.pack(fill=X,expand=1)\n Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)\n Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2) \n jobdlg.grab_set()\n jobdlg.transient(self.parent)\n self.parent.wait_window(jobdlg)\n return", "def ui(self):\r\n \r\n # delete the window if its handle exists\r\n if cmds.window(self.window, exists=True):\r\n cmds.deleteUI(self.window, window=True)\r\n \r\n #reads settings\r\n self.read()\r\n \r\n # initialize the window\r\n self.window = cmds.window(\r\n self.window,\r\n title=self.title,\r\n width=200,\r\n sizeable=False,\r\n mnb=False,\r\n mxb=False\r\n )\r\n self.mainCol = cmds.columnLayout( adjustableColumn=True )\r\n cmds.text( label='', align='center',height=5)\r\n cmds.text( label='email (From)', align='left' , width=150)\r\n self.userFld = cmds.textField(text=self.login)\r\n \r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='Password', align='left' )\r\n self.pswdFld = cmds.textField(text=self.password)\r\n \r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='email (To)', align='left' )\r\n self.toFld = cmds.textField(text=self.to)\r\n \r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='RenderTime in Minutes', align='left' )\r\n self.timeFld = cmds.textField(text=self.time, )\r\n\r\n cmds.text( label='', align='center',height=10)\r\n cmds.text( label='Smtp Server:port', align='left' )\r\n self.smtpFld = cmds.textField(text=self.smtp, )\r\n\r\n\r\n cmds.text( label='', align='center',height=10)\r\n\r\n self.saveBtn = cmds.button( label='Save Settings',command=self.save)\r\n self.installBtn = cmds.button( label='Install Scripts',command=self.install)\r\n self.installBtn = cmds.button( label='Send Test Email',command=self.test)\r\n \r\n cmds.separator( height=40, style='doubleDash' )\r\n cmds.text( label='Script by Dhruv Govil', align='center' )\r\n cmds.text( label='www.dgovil.com', align='center',hyperlink=True )\r\n cmds.text( label='', align='center',height=10)\r\n cmds.showWindow( self.window )", "def PopulateSubmitter( gui ):\n global submissionInfo\n print( \"Grabbing submitter info...\" )\n try:\n stringSubInfo = CallDeadlineCommand( [ \"-prettyJSON\", \"-GetSubmissionInfo\", \"Pools\", \"Groups\", \"MaxPriority\", \"UserHomeDir\", \"RepoDir:submission/Katana/Main\", \"RepoDir:submission/Integration/Main\", ], useDeadlineBg=True )\n output = json.loads( stringSubInfo, encoding=\"utf-8\" )\n except:\n print( \"Unable to get submitter info from Deadline:\\n\\n\" + traceback.format_exc() )\n raise\n if output[ \"ok\" ]:\n submissionInfo = output[ \"result\" ]\n else:\n print( \"DeadlineCommand returned a bad result and was unable to grab the submitter info.\\n\\n\" + output[ \"result\" ] )\n raise ValueError( output[ \"result\" ] )\n # Create a widget with a vertical box layout as a container for widgets to include in the tab\n scrollWidget = QWidget()\n scrollLayout = QGridLayout(scrollWidget)\n scrollLayout.setSpacing(4)\n scrollLayout.setContentsMargins(4, 4, 4, 4)\n\n buttonLayout = QHBoxLayout()\n\n # First layout: General options\n scrollLayout.addWidget(CreateSeparator( \"Job Description\" ),0,0,1,3)\n\n jobNameLabel = QLabel( \"Job Name\" )\n jobNameLabel.setToolTip(\"The name of your job. This is optional, and if left blank, it will default to 'Untitled'.\")\n scrollLayout.addWidget(jobNameLabel,1,0)\n gui.jobNameWidget = QLineEdit( os.path.basename(FarmAPI.GetKatanaFileName()).split('.')[0] )\n scrollLayout.addWidget(gui.jobNameWidget, 1, 1, 1, 1 )\n\n commentLabel = QLabel( \"Comment\" )\n commentLabel.setToolTip(\"A simple description of your job. This is optional and can be left blank.\")\n scrollLayout.addWidget(commentLabel,2,0)\n gui.commentWidget = QLineEdit( \"\" )\n scrollLayout.addWidget(gui.commentWidget, 2, 1, 1, 1 )\n\n departmentLabel = QLabel( \"Department\" )\n departmentLabel.setToolTip( \"The department you belong to. This is optional and can be left blank.\" )\n scrollLayout.addWidget(departmentLabel, 3, 0)\n gui.departmentWidget = QLineEdit( \"\" )\n scrollLayout.addWidget(gui.departmentWidget, 3, 1, 1, 1 )\n\n # Second layout: Job options\n scrollLayout.addWidget(CreateSeparator( \"Job Options\" ),4,0,1,3)\n\n pools = submissionInfo[\"Pools\"]\n poolLabel = QLabel( \"Pool\" )\n poolLabel.setToolTip( \"The pool that your job will be submitted to.\" )\n scrollLayout.addWidget(poolLabel, 5, 0)\n\n gui.poolsWidget = QComboBox()\n gui.poolsWidget.addItems(pools)\n scrollLayout.addWidget(gui.poolsWidget, 5, 1 )\n\n secondPoolLabel = QLabel( \"Secondary Pool\" )\n secondPoolLabel.setToolTip( \"The secondary pool lets you specify a pool to use if the primary pool does not have any available Slaves.\" )\n scrollLayout.addWidget(secondPoolLabel, 6, 0 )\n\n gui.secondPoolsWidget = QComboBox()\n gui.secondPoolsWidget.addItems(pools)\n scrollLayout.addWidget(gui.secondPoolsWidget, 6, 1 )\n\n groups = submissionInfo[ \"Groups\" ]\n groupLabel = QLabel( \"Group\" )\n groupLabel.setToolTip( \"The group that your job will be submitted to.\" )\n scrollLayout.addWidget(groupLabel, 7, 0)\n\n gui.groupWidget = QComboBox()\n gui.groupWidget.addItems(groups)\n scrollLayout.addWidget(gui.groupWidget, 7, 1)\n\n priorityLabel = QLabel( \"Priority\" )\n priorityLabel.setToolTip( \"A job can have a numeric priority from 0 to 100, where 0 is the lowest priority and 100 is the highest.\" )\n scrollLayout.addWidget(priorityLabel, 8, 0)\n\n maxPriority = submissionInfo[\"MaxPriority\"]\n\n gui.priorityBox = QSpinBox()\n gui.priorityBox.setMinimum(0)\n gui.priorityBox.setMaximum( maxPriority )\n scrollLayout.addWidget(gui.priorityBox, 8, 1)\n\n taskTimeoutLabel = QLabel( \"Task Timeout\" )\n taskTimeoutLabel.setToolTip( \"The number of minutes a Slave has to render a task for this job before it requeues it. Specify 0 for no limit.\" )\n scrollLayout.addWidget(taskTimeoutLabel, 9, 0)\n\n gui.taskTimeoutBox = QSpinBox()\n gui.taskTimeoutBox.setMinimum(0)\n gui.taskTimeoutBox.setMaximum(10000)\n scrollLayout.addWidget(gui.taskTimeoutBox, 9, 1)\n\n concurrentTasksLabel = QLabel( \"Concurrent Tasks\" )\n concurrentTasksLabel.setToolTip(\"The number of tasks that can render concurrently on a single Slave. This is useful if the rendering application only uses one thread to render and your Slaves have multiple CPUs.\")\n scrollLayout.addWidget(concurrentTasksLabel, 10, 0 )\n gui.concurrentTasksWidget = QSpinBox( )\n scrollLayout.addWidget(gui.concurrentTasksWidget, 10, 1)\n gui.concurrentTasksWidget.setMinimum(1)\n gui.concurrentTasksWidget.setMaximum(16)\n gui.limitTasksSlaveLimit = QCheckBox( \"Limit Tasks To Slave's Task Limit\" )\n gui.limitTasksSlaveLimit.setToolTip( \"If you limit the tasks to a Slave's task limit, then by default, the Slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual Slaves by an administrator.\" )\n scrollLayout.addWidget(gui.limitTasksSlaveLimit, 10, 2)\n\n machineLimitLabel = QLabel( \"Machine Limit\" )\n machineLimitLabel.setToolTip(\"Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.\")\n scrollLayout.addWidget( machineLimitLabel, 11, 0 )\n\n gui.machineLimitWidget = QSpinBox()\n scrollLayout.addWidget(gui.machineLimitWidget, 11, 1)\n gui.isBlackListWidget = QCheckBox( \"Machine List Is Blacklist\" )\n gui.isBlackListWidget.setToolTip(\"You can force the job to render on specific machines by using a whitelist, or you can avoid specific machines by using a blacklist.\")\n scrollLayout.addWidget(gui.isBlackListWidget, 11, 2)\n\n machineListLabel = QLabel( \"Machine List\" )\n machineListLabel.setToolTip(\"The whitelisted or blacklisted list of machines.\")\n scrollLayout.addWidget( machineListLabel, 12, 0 )\n\n machineListLayout = QHBoxLayout()\n gui.machineListWidget = QLineEdit( \"\" )\n machineListLayout.addWidget(gui.machineListWidget)\n getMachineListWidget = QPushButton( \"...\" )\n getMachineListWidget.pressed.connect( lambda: BrowseMachineList(gui.machineListWidget) )\n machineListLayout.addWidget(getMachineListWidget)\n scrollLayout.addLayout( machineListLayout, 12, 1, 1, 2 )\n\n limitsLabel = QLabel( \"Limits\" )\n limitsLabel.setToolTip(\"The Limits that your job requires.\")\n scrollLayout.addWidget( limitsLabel, 13, 0 )\n limitsLayout = QHBoxLayout()\n gui.limitsWidget = QLineEdit( \"\" )\n limitsLayout.addWidget(gui.limitsWidget)\n getLimitsWidget = QPushButton( \"...\" )\n getLimitsWidget.pressed.connect( lambda: BrowseLimitList(gui.limitsWidget) )\n limitsLayout.addWidget(getLimitsWidget)\n scrollLayout.addLayout( limitsLayout, 13, 1, 1, 2 )\n\n dependenciesLabel = QLabel( \"Dependencies\" )\n dependenciesLabel.setToolTip(\"Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering.\")\n scrollLayout.addWidget( dependenciesLabel, 14, 0 )\n dependenciesLayout = QHBoxLayout()\n gui.dependenciesWidget = QLineEdit( \"\" )\n dependenciesLayout.addWidget(gui.dependenciesWidget)\n getDependenciesWidget = QPushButton( \"...\" )\n getDependenciesWidget.pressed.connect( lambda: BrowseDependencyList(gui.dependenciesWidget) )\n dependenciesLayout.addWidget(getDependenciesWidget)\n scrollLayout.addLayout( dependenciesLayout, 14, 1, 1, 2 )\n\n onJobCompleteLabel = QLabel( \"On Job Complete\" )\n onJobCompleteLabel.setToolTip(\"If desired, you can automatically archive or delete the job when it completes.\")\n scrollLayout.addWidget( onJobCompleteLabel, 15, 0 )\n gui.onJobCompleteWidget = QComboBox( )\n gui.onJobCompleteWidget.addItems([\"Nothing\", \"Archive\", \"Delete\"])\n scrollLayout.addWidget(gui.onJobCompleteWidget, 15, 1)\n gui.submitSuspendedWidget = QCheckBox( \"Submit Job as Suspended\" )\n gui.submitSuspendedWidget.setToolTip( \"If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render.\")\n scrollLayout.addWidget(gui.submitSuspendedWidget, 15, 2)\n\n # Third layout: Katana options\n scrollLayout.addWidget(CreateSeparator( \"Katana Options\" ),16,0,1,3)\n\n frameRangeLabel = QLabel( \"Frame Range\" )\n frameRangeLabel.setToolTip(\"The list of frames to render.\")\n scrollLayout.addWidget( frameRangeLabel, 17, 0 )\n gui.frameRangeWidget = QLineEdit( \"\" ) # Populate based on frame range\n scrollLayout.addWidget( gui.frameRangeWidget, 17, 1, 1, 1 )\n\n frameRange = FarmAPI.GetSceneFrameRange()\n gui.frameRangeWidget.setText( str(frameRange['start']) + \"-\" + str(frameRange['end']) )\n\n gui.submitSceneBox = QCheckBox( \"Submit Katana Scene File\" )\n gui.submitSceneBox.setToolTip( \"If this option is enabled, the scene file will be submitted with the job, and then copied locally to the Slave machine during rendering.\" )\n scrollLayout.addWidget(gui.submitSceneBox, 17, 2 )\n\n framesPerTaskLabel = QLabel( \"Frames Per Task\" )\n framesPerTaskLabel.setToolTip( \"This is the number of frames that will be rendered at a time for each job task.\" )\n scrollLayout.addWidget( framesPerTaskLabel, 18, 0 )\n gui.framesPerTaskWidget = QSpinBox( )\n gui.framesPerTaskWidget.setMinimum(1)\n scrollLayout.addWidget( gui.framesPerTaskWidget, 18, 1, 1, 1 )\n\n gui.useWorkingDirectory = QCheckBox( \"Use Working Directory\" )\n gui.useWorkingDirectory.setToolTip( \"If enabled, the current working directory will be used during rendering. This is required if your Katana project file contains relative paths.\" )\n gui.useWorkingDirectory.setChecked(True)\n scrollLayout.addWidget( gui.useWorkingDirectory, 18, 2 )\n\n renderNodeSelectLabel = QLabel( \"Render Node Submission\" )\n renderNodeSelectLabel.setToolTip( \"Choose to render the whole scene, render all nodes as separate jobs, or render separate nodes\" )\n scrollLayout.addWidget( renderNodeSelectLabel, 19, 0 )\n\n gui.renderSelectBox = QComboBox()\n gui.renderSelectBox.addItems( [\"Submit All Render Nodes As Separate Jobs\", \"Select Render Node\"] )\n scrollLayout.addWidget( gui.renderSelectBox, 19, 1 )\n\n gui.includeImageWrite = QCheckBox( \"Include ImageWrite Nodes\" )\n gui.includeImageWrite.setToolTip( \"If enabled, ImageWrite nodes will be included for submission.\" )\n scrollLayout.addWidget( gui.includeImageWrite, 19, 2 )\n\n renderNodeLabel = QLabel( \"Render Node\" )\n renderNodeLabel.setToolTip( \"Set the render node to render with, or leave blank to use the node already set.\" )\n scrollLayout.addWidget( renderNodeLabel, 20, 0 )\n\n gui.frameDependent = QCheckBox( \"Submit Jobs As Frame Dependent\" )\n gui.frameDependent.setToolTip( \"If enabled, the Katana Job(s) will have Frame Dependencies. If your scene contains static content, do not use!\" )\n scrollLayout.addWidget( gui.frameDependent, 20, 2 )\n\n gui.renderNodeBox = QComboBox()\n gui.renderSelectBox.currentIndexChanged.connect( lambda: RenderSelectionChanged( gui.renderSelectBox, gui.renderNodeBox ) )\n scrollLayout.addWidget( gui.renderNodeBox, 20, 1)\n gui.renderNodeBox.setDisabled(True)\n # Submit button\n buttonLayoutSpacer = QSpacerItem( 0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum )\n buttonLayout.addItem( buttonLayoutSpacer )\n\n gui.pipelineToolStatusLabel = QLabel( \"No Pipeline Tools Set\" )\n gui.pipelineToolStatusLabel.setAlignment( QtCore.Qt.AlignCenter )\n buttonLayout.addWidget( gui.pipelineToolStatusLabel )\n pipelineToolsButton = QPushButton( \"Pipeline Tools\" )\n pipelineToolsButton.pressed.connect( lambda: PipelineToolsClicked( gui ) )\n buttonLayout.addWidget( pipelineToolsButton )\n\n submitButton = QPushButton( \"Submit\" )\n submitButton.pressed.connect( lambda: SubmitPressed(gui) )\n buttonLayout.addWidget( submitButton )\n\n scrollLayout.addLayout( buttonLayout,21,0,1,3 )\n\n verticalStretchLayout = QVBoxLayout()\n verticalStretchLayout.addStretch()\n scrollLayout.addLayout( verticalStretchLayout, 22, 0 )\n\n scrollArea = QScrollArea()\n scrollArea.setWidget(scrollWidget)\n scrollArea.setWidgetResizable(True)\n scrollArea.setFrameStyle(QFrame.NoFrame + QFrame.Plain)\n\n vLayout = QVBoxLayout()\n vLayout.setObjectName('vLayout')\n vLayout.addWidget(scrollArea)\n\n gui.setLayout(vLayout)\n\n LoadStickySettings( gui )\n try:\n pipelineToolStatusMessage = RetrievePipelineToolStatus( raiseOnExitCode=True )\n except subprocess.CalledProcessError as e:\n pipelineToolStatusMessage = HandlePipelineToolsCalledProcessError( e )\n UpdatePipelineToolStatusLabel( gui, pipelineToolStatusMessage )\n\n # Populate the render node drop down based on the effective check state\n # of the \"Include ImageWrite Nodes\" checkbox after sticky settings are applied\n PopulateRenderNodeDropDown(gui.includeImageWrite.isChecked(), gui.renderNodeBox)\n # We delay wiring up this signal handler until after the sticky settings are applied to avoid\n # rebuilding the drop-down list multiple times unnecessarily\n gui.includeImageWrite.stateChanged.connect(lambda checked: PopulateRenderNodeDropDown(checked, gui.renderNodeBox))\n\n # Check if this tab is part of a pane in the main window, or if it is contained in a floating pane\n if gui.window() != UI4.App.MainWindow.CurrentMainWindow():\n # Resize the floating pane's window to accommodate the tab's widgets\n requiredSize = scrollWidget.sizeHint()\n gui.window().resize(max(requiredSize.width() + 20, 200), min(requiredSize.height() + 40, 1000))", "def start(self):\n window_layout = self.build_gui()\n window = sg.Window(self.app_name, window_layout)\n fields = self.config.get(\"fields\")\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n\n if event == 'Build':\n # Validate fields\n errors = False\n for (key, value) in values.items():\n if key in fields:\n errmsg = \"\"\n if fields.get(key).get(\"type\") == \"str\":\n errmsg = self.validate_text_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"int\":\n errmsg = self.validate_int_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"list\":\n errmsg = self.validate_list_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"date\":\n errmsg = self.validate_date_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"textarea\":\n errmsg = self.validate_textarea_field(\n fields.get(key), value)\n\n if errmsg != \"\":\n sg.Popup(\"Opps!\", f\"{errmsg}\")\n errors = True\n break\n\n # Build document\n if not errors:\n self.sanitize_values(values)\n try:\n filename = self.build_document(values)\n sg.Popup(\n \"Congrats!\", f\"Your file ({filename}) was generated!\")\n break\n except Exception:\n e = sys.exc_info()[0]\n sg.Popup(f\"Problem generating your file. (Error: {e})\")", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def launch_reporteditor():\r\n import sys\r\n from PyQt4 import QtGui\r\n from freeseer.frontend.reporteditor.reporteditor import ReportEditorApp\r\n\r\n profile = settings.profile_manager.get()\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig,\r\n storage_args=['Global'], read_only=True)\r\n db = profile.get_database()\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n main = ReportEditorApp(config, db)\r\n main.show()\r\n sys.exit(app.exec_())", "def guiMode(options):\n configuration = {'config_project_name': 'untitled', 'config_address': '0.0.0.0',\n 'config_port': 8081, 'config_multiple_instance': True, 'config_enable_file_cache': True,\n 'config_start_browser': True, 'config_resourcepath': './res/'}\n start(MainWindow, address=configuration['config_address'], port=configuration['config_port'],\n multiple_instance=configuration['config_multiple_instance'],\n enable_file_cache=configuration['config_enable_file_cache'],\n start_browser=configuration['config_start_browser'])", "def launchUI():\n app = QtWidgets.QApplication(sys.argv)\n ui = ClientFileManager()\n ui.resize(1200, 650)\n ui.show()\n sys.exit(app.exec_())", "def on_logger_plot_settings_triggered(self):\n\n # Set current parameters from time series plot widget class\n self.rawDataModule.plotSettings.set_dialog_data()\n self.rawDataModule.plotSettings.show()", "def __init__(self):\n\t\tself.walltime_edit = urwid.Edit( ('editcp',\"walltime=\"), \"200:00:00\" )\n\t\tself.nodes_edit = urwid.IntEdit( ('editcp', \"nodes=\"), 0 )\n\t\tself.myri_ppn_edit = urwid.IntEdit( ('editcp', \"myri:ppn=\"), 4)\n\t\tself.workdir_edit = urwid.Edit( (\"editcp\", \"WORKDIR(-d) \"), '~/qjob_output')\n\t\tself.runtime_output_checkbox = urwid.CheckBox(\"See output while running\")\n\t\tself.other_options_edit = urwid.Edit( (\"editcp\", \"others:\"), '-q cmb -j oe -S /bin/bash')\n\t\tself.source_bash_profile_checkbox = urwid.CheckBox(\"source ~/.bash_profile\")\n\t\tself.source_bash_profile_checkbox.set_state(True)\n\t\tself.just_write_down_checkbox = urwid.CheckBox(\"Write jobfile. No submission.\")\n\t\tself.jobname_prefix_edit = urwid.Edit( (\"editcp\", \"jobname_prefix:\"), '~/qjob/job')\n\t\tself.jobnumber_edit = urwid.IntEdit( (\"editcp\", \"job number:\"), 0)\n\t\tself.job_content_reset_button = urwid.Button(\"Job Content Reset\", self.job_content_reset)\n\t\tself.exit_button = urwid.Button(\"Exit\", self.program_exit)\n\t\tself.job_edit = urwid.Edit( ('editcp',\"\"), multiline=True )\n\t\t\n\t\tself.items = [\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.walltime_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.nodes_edit, 'editbx', 'editfc'),\n\t\t\t\turwid.AttrWrap( self.myri_ppn_edit, 'editbx', 'editfc'),\n\t\t\t\t],\n\t\t\t\t2 ), \n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.workdir_edit, 'editbx', 'editfc' ), \n\t\t\t\turwid.AttrWrap( self.runtime_output_checkbox, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t2),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.AttrWrap( self.other_options_edit, 'editbx', 'editfc' ), ('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.GridFlow(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.source_bash_profile_checkbox, 'buttn','buttnf'),\n\t\t\t\turwid.AttrWrap( self.just_write_down_checkbox, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap( self.jobname_prefix_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.jobnumber_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap(self.job_content_reset_button, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap(self.exit_button, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t34, 2, 1, 'left'),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Pile(\n\t\t\t[\n\t\t\turwid.Text('One line one job. One job with >1 commands put on one line, separated by ;'),\n\t\t\turwid.AttrWrap(self.job_edit, 'editbx', 'editfc'),\n\t\t\t], 1),\n\t\t\t('fixed left',2), ('fixed right',2) )\n\t\t\t\n\t\t]\n\t\t\n\t\tself.listbox = urwid.ListBox( self.items )\n\t\t\n\t\tinstruct = urwid.Text(\"Job submission program based on Urwid. F8 to submit, F12 to quit.\")\n\t\theader = urwid.AttrWrap( instruct, 'header' )\n\t\t\n\t\tself.footer_text = urwid.Text(\"Mar 15th, 2008 by Yu Huang\")\n\t\tfooter = urwid.AttrWrap(self.footer_text, 'footer')\n\t\t\n\t\tself.top_frame = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'), header, footer)", "def settings(self, kwargs=None):\n # Create sublayout\n setting_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.Start_posy, self.Start_posx, self.Start_ysize, self.Start_xsize\n )\n\n self.layout.addWidget(\n frame, self.proj_posy, self.proj_posx, self.proj_ysize, self.proj_xsize\n )\n\n # Order functions\n def change_name(filename):\n self.variables.default_values_dict[\"settings\"][\"Current_filename\"] = str(\n filename\n )\n\n def project_selector_action(project):\n load_valid_sensors_for_project(str(project))\n self.variables.default_values_dict[\"settings\"][\"Current_project\"] = str(\n project\n )\n\n def sensor_selector_action(sensor):\n self.variables.default_values_dict[\"settings\"][\"Current_sensor\"] = str(\n sensor\n )\n\n def operator_selector_action(operator):\n self.variables.default_values_dict[\"settings\"][\"Current_operator\"] = str(\n operator\n )\n\n def dir_selector_action():\n fileDialog = QFileDialog()\n directory = fileDialog.getExistingDirectory()\n dir_textbox.setText(directory)\n self.variables.default_values_dict[\"settings\"][\"Current_directory\"] = str(\n directory\n )\n\n def load_measurement_settings_file():\n \"\"\" This function loads a mesuerment settings file\"\"\"\n\n # First update the settings that the state machine is up to date\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n fileDialog = QFileDialog()\n file = fileDialog.getOpenFileName()\n\n if file[0]:\n file = open(str(file[0]), \"r\")\n dict = yaml.load(file)\n file.close()\n\n # l.info(\"Loaded new measurement settings file: \" + str(file[0]))\n self.variables.default_values_dict[\"settings\"].update(\n dict\n ) # Updates the values of the dict, it either updates the values or adds them if not incluced\n self.variables.ui_plugins[\"Settings_window\"].configure_settings()\n\n @raise_exception\n def save_measurement_settings_file(kwargs=None):\n \"\"\" This function saves a mesuerment settings file\"\"\"\n\n # First update the settings that the state machine is up to date\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n fileDialog = QFileDialog()\n file = fileDialog.getSaveFileName()\n\n if file[0]:\n # gets me all settings which are to be saved\n write_init_file(\n file[0],\n self.variables.ui_plugins[\"Settings_window\"].get_all_settings(),\n )\n self.log.info(\"Settings file successfully written to: \" + str(file))\n\n def load_valid_sensors_for_project(project_name):\n \"\"\"This function loads the valid sensors for each project\"\"\"\n # Warning sensor_comboBox must be accessable for this function to work\n sensor_comboBox.clear()\n try:\n # self.variables.default_values_dict[\"settings\"][\"Sensor_types\"][project_name]\n sensor_comboBox.addItems(\n list(self.variables.pad_files_dict[project_name].keys())\n ) # Adds all items to the combo box\n # Select the first element to be right, if possible\n self.variables.default_values_dict[\"settings\"][\n \"Current_sensor\"\n ] = sensor_comboBox.currentText()\n\n except:\n self.log.error(\"No sensors defined for project: \" + str(project_name))\n self.variables.default_values_dict[\"settings\"][\n \"Current_sensor\"\n ] = \"None\"\n\n # Project selector\n # Label of the Error Log\n proj_label = QLabel()\n proj_label.setText(\"Select project\")\n proj_label.setFont(self.font)\n\n proj_comboBox = QComboBox() # Creates a combo box\n\n for projects in self.variables.pad_files_dict:\n proj_comboBox.addItem(str(projects)) # Adds all projects to the combo box\n proj_comboBox.activated[str].connect(project_selector_action)\n\n if \"Current_project\" in self.variables.default_values_dict[\"settings\"]:\n self.variables.default_values_dict[\"settings\"][\"Current_project\"] = list(\n self.variables.pad_files_dict.keys()\n )[\n 0\n ] # That one project is definetly choosen\n else:\n self.variables.default_values_dict[\"settings\"].update(\n {\n \"Current_project\": self.variables.default_values_dict[\n \"settings\"\n ].get(\"Projects\", [\"No Projects\"])[0]\n }\n )\n\n # Sensore selection\n\n # Label of the sensor selector\n sensor_label = QLabel()\n sensor_label.setText(\"Select sensor\")\n sensor_label.setFont(self.font)\n\n sensor_comboBox = QComboBox() # Creates a combo box\n\n current_project = self.variables.default_values_dict[\"settings\"].get(\n \"Current_project\", None\n )\n sensor_comboBox.addItems(\n self.variables.pad_files_dict[current_project]\n ) # Adds all items to the combo box\n sensor_comboBox.activated[str].connect(sensor_selector_action)\n\n if \"Current_sensor\" in self.variables.default_values_dict[\"settings\"]:\n try:\n self.variables.default_values_dict[\"settings\"][\"Current_sensor\"] = list(\n self.variables.pad_files_dict[current_project]\n )[\n 0\n ] # That one project is definetly choosen\n except:\n self.variables.default_values_dict[\"settings\"][\n \"Current_sensor\"\n ] = \"None\"\n else:\n if current_project and self.variables.pad_files_dict:\n self.variables.default_values_dict[\"settings\"].update(\n {\n \"Current_sensor\": list(\n self.variables.pad_files_dict[current_project]\n )[0]\n }\n )\n else:\n self.variables.default_values_dict[\"settings\"].update(\n {\"Current_sensor\": \"None\"}\n )\n # Measurement name selection\n\n # Label of the input file\n\n inp_label = QLabel()\n inp_label.setText(\"Output filename\")\n inp_label.setFont(self.font)\n\n inp_input_name = QLineEdit()\n inp_input_name.textChanged.connect(change_name)\n # inp_input_name.setMaximumWidth(300)\n\n if \"Current_filename\" in self.variables.default_values_dict[\"settings\"]:\n inp_input_name.setText(\n str(self.variables.default_values_dict[\"settings\"][\"Current_filename\"])\n )\n else:\n self.variables.default_values_dict[\"settings\"].update(\n {\"Current_filename\": \"enter_filename_here\"}\n )\n inp_input_name.setText(\n str(self.variables.default_values_dict[\"settings\"][\"Current_filename\"])\n )\n\n # Operator selector\n\n # Label of the Operator\n op_label = QLabel()\n op_label.setText(\"Select Operator\")\n op_label.setFont(self.font)\n\n op_comboBox = QComboBox() # Creates a combo box\n\n for projects in self.variables.default_values_dict[\"settings\"].get(\n \"Operator\", \"None\"\n ):\n op_comboBox.addItem(str(projects)) # Adds all items to the combo box\n\n op_comboBox.activated[str].connect(operator_selector_action)\n\n if \"Current_operator\" in self.variables.default_values_dict[\"settings\"]:\n self.variables.default_values_dict[\"settings\"][\n \"Current_operator\"\n ] = self.variables.default_values_dict[\"settings\"][\"Operator\"][\n 0\n ] # That one project is definetly choosen\n else:\n self.variables.default_values_dict[\"settings\"].update(\n {\n \"Current_operator\": self.variables.default_values_dict[\n \"settings\"\n ].get(\"Operator\", [\"None\",])[0]\n }\n )\n\n # Save path selector\n\n # Save button\n save_to_btn = QPushButton(\"Save to\")\n save_to_btn.clicked.connect(dir_selector_action)\n save_to_btn.resize(save_to_btn.sizeHint())\n\n # Appearance of the Error Log\n dir_textbox = QLabel()\n dir_textbox.setStyleSheet(\"background : rgb(245,245,245)\")\n dir_textbox.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n dir_textbox.setMinimumHeight(25)\n dir_textbox.setMinimumWidth(700)\n # dir_textbox.setMaximumHeight(25)\n # dir_textbox.setMaximumWidth(700)\n\n if (\n \"Current_directory\" in self.variables.default_values_dict[\"settings\"]\n ): # TODO check if directory exists\n dir_textbox.setText(\n str(self.variables.default_values_dict[\"settings\"][\"Current_directory\"])\n )\n else:\n self.variables.default_values_dict[\"settings\"].update(\n {\n \"Current_directory\": str(\n osp.join(osp.dirname(sys.modules[__name__].__file__))\n )\n }\n )\n dir_textbox.setText(\n str(osp.join(osp.dirname(sys.modules[__name__].__file__)))\n )\n\n # Default file loader\n file_load_btn = QPushButton(\"Load settings file\")\n file_load_btn.clicked.connect(load_measurement_settings_file)\n file_load_btn.resize(file_load_btn.sizeHint())\n\n # Save default file\n save_file_btn = QPushButton(\"Save settings file\")\n save_file_btn.clicked.connect(save_measurement_settings_file)\n save_file_btn.resize(save_file_btn.sizeHint())\n\n # Adding all widgets to the sublayout\n # setting_layout.addWidget(frame,0,0,4,4)\n setting_layout.addWidget(proj_label, 0, 0)\n setting_layout.addWidget(proj_comboBox, 1, 0)\n setting_layout.addWidget(sensor_label, 0, 1)\n setting_layout.addWidget(sensor_comboBox, 1, 1)\n setting_layout.addWidget(inp_label, 0, 2)\n setting_layout.addWidget(inp_input_name, 1, 2)\n setting_layout.addWidget(op_label, 0, 3)\n setting_layout.addWidget(op_comboBox, 1, 3)\n setting_layout.addWidget(save_to_btn, 2, 0)\n setting_layout.addWidget(dir_textbox, 2, 1, 0, 3)\n setting_layout.addWidget(file_load_btn, 3, 0)\n setting_layout.addWidget(save_file_btn, 3, 1)\n\n setting_layout.setContentsMargins(8, 8, 8, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n setting_layout,\n self.proj_posy,\n self.proj_posx,\n self.proj_ysize,\n self.proj_xsize,\n )", "def setupOptionsFrame(self):\n\n # CPU / CUDA options\n self.device.addItems([\"cuda\", \"cpu\"])\n self.scriptedEffect.addLabeledOptionsWidget(\"Device:\", self.device)\n\n self.modality.addItems([\"CT\", \"MRI\"])\n self.scriptedEffect.addLabeledOptionsWidget(\"Modality:\", self.modality)\n\n # Add ROI options\n self.roiSelector.nodeTypes = ['vtkMRMLMarkupsROINode']\n self.roiSelector.noneEnabled = True\n self.roiSelector.setMRMLScene(slicer.mrmlScene)\n self.scriptedEffect.addLabeledOptionsWidget(\"ROI: \", self.roiSelector)\n\n # Toggle ROI visibility button\n toggleROIVisibilityButton = qt.QPushButton(\"Toggle ROI Visibility\")\n toggleROIVisibilityButton.objectName = self.__class__.__name__ + 'ToggleROIVisibility'\n toggleROIVisibilityButton.setToolTip(\"Toggle selected ROI visibility\")\n toggleROIVisibilityButton.connect('clicked()', self.toggleROIVisibility)\n self.scriptedEffect.addOptionsWidget(toggleROIVisibilityButton)\n\n # Apply button\n applyButton = qt.QPushButton(\"Apply\")\n applyButton.objectName = self.__class__.__name__ + 'Apply'\n applyButton.setToolTip(\"Extract liver from input volume\")\n applyButton.connect('clicked()', self.onApply)\n self.scriptedEffect.addOptionsWidget(applyButton)", "def advanced_gui(self, master):\r\n\r\n # Turn off polling function\r\n self.newProj.isQuickGenerate = False\r\n self._retLoop = None\r\n\r\n #Remove active widgets from the screen and then clear widget list out\r\n if self.widgetList:\r\n for w in self.widgetList:\r\n w.grid_remove()\r\n del self.widgetList[:]\r\n\r\n osName = platform.system()\r\n\r\n if osName != 'Darwin':\r\n labelFont = 'Arial 9 bold'\r\n else:\r\n labelFont = 'bold'\r\n\r\n ### Widget 0 is a label for padding column 0\r\n self.widgetList.append(Label(self, text=''))\r\n self.widgetList[0].grid(row=0, column=0, sticky=E+W, padx=5)\r\n\r\n ### Widget 1 is a button to return to simple menu\r\n self.widgetList.append(Button(self, text='Return', \\\r\n command=lambda: self.launch_basic(master)))\r\n self.widgetList[1].grid(row=16, column=1, sticky=W)\r\n\r\n ### Widget 2 is a label for the project name text field\r\n self.widgetList.append(Label(self, text='Project Name: ', font=labelFont))\r\n self.widgetList[2].grid(row=0, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 3 is the text field for project name entry\r\n self.widgetList.append(Entry(self, width=25))\r\n self.widgetList[3].insert(0, self.newProj.name)\r\n self.widgetList[3].grid(row=1, column=1, sticky=W, pady=(0, 0))\r\n\r\n ### Widget 4 is the label for project type\r\n self.widgetList.append(Label(self, text='Project Type:', font=labelFont))\r\n self.widgetList[4].grid(row=2, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 5 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='New', variable=self.advancedProjType, \\\r\n value=0))\r\n self.widgetList[5].grid(row=3, column=1, sticky=W)\r\n\r\n ### Widget 6 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Clone', variable=self.advancedProjType, \\\r\n value=1))\r\n self.widgetList[6].grid(row=3, column=1, sticky=E)\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList.append(Label(self, text='Device:', font=labelFont))\r\n self.widgetList[7].grid(row=0, column=3, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.pop_adv_devices()\r\n #self.widgetList.append(OptionMenu(self, userDev, *self.localSDK.devList))\r\n self.widgetList.append(Combobox(self, state='readonly'))\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n self.widgetList[8].grid(row=1, column=3, sticky=W, pady=(0, 0))\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n\r\n ### Widget 9 is a label for the library configuration radio buttons\r\n libraryConfigurationWidget = Label(self, text='Library Configuration:', font=labelFont)\r\n self.widgetList.append(libraryConfigurationWidget)\r\n self.widgetList[9].grid(row=4, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 10 is a radio button for the library configuration\r\n halOnlyWidget = Radiobutton(self, text='HAL only', variable=self.advancedLibType,value=0)\r\n self.widgetList.append(halOnlyWidget)\r\n self.widgetList[10].grid(row=6, column=1, sticky=W)\r\n\r\n ### Widget 11 is a radio button for the library configuration\r\n platformWidget = Radiobutton(self, text='Platform', variable=self.advancedLibType, value=1)\r\n self.widgetList.append(platformWidget)\r\n self.widgetList[11].grid(row=5, column=1, sticky=W)\r\n\r\n # Set default to select platform library\r\n self.advancedLibType.set(1)\r\n \r\n # in new version there is not hal vs. platform\r\n if self.localSDK.isNewVersion():\r\n libraryConfigurationWidget.grid_remove()\r\n halOnlyWidget.grid_remove()\r\n platformWidget.grid_remove()\r\n\r\n ### Widget 12 is a label for the library configuration radio buttons\r\n self.widgetList.append(Label(self, text='RTOS Configuration:', font=labelFont))\r\n self.widgetList[12].grid(row=7, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 13 is a radio button for the library configuration\r\n self.widgetList.append(Radiobutton(self, text='None', variable=self.advancedRtosType, \\\r\n value=0))\r\n self.widgetList[13].grid(row=8, column=1, sticky=W)\r\n\r\n ### Widget 14 is a radio button for the library configuration\r\n mqxWidget = Radiobutton(self, text='MQX', variable=self.advancedRtosType, value=1)\r\n self.widgetList.append(mqxWidget)\r\n mqxWidget.grid(row=9, column=1, sticky=W)\r\n\r\n # in KSDK 2.0 and newer version there is no MQX support so the MQX option has to be removed\r\n # in some older version of KSDK (1.2, 1.3) MQX support is missing so this option has to be removed\r\n if not self.localSDK.isMQXSupported():\r\n mqxWidget.grid_remove()\r\n\r\n\r\n ### Widget 15 is a radio button for the library configuration\r\n freeRTOSWidget = Radiobutton(self, text='FreeRTOS', variable=self.advancedRtosType, value=2)\r\n self.widgetList.append(freeRTOSWidget)\r\n freeRTOSWidget.grid(row=10, column=1, sticky=W)\r\n # if FreeRTOS is not supported in KSDK option should be removed\r\n if not self.localSDK.isFreeRTOSSupported():\r\n freeRTOSWidget.grid_remove()\r\n\r\n ### Widget 16 is a radio button for the library configuration\r\n uCOSIIWidget = Radiobutton(self, text='uC/OS-II', variable=self.advancedRtosType, value=3)\r\n self.widgetList.append(uCOSIIWidget)\r\n uCOSIIWidget.grid(row=11, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIISupported():\r\n uCOSIIWidget.grid_remove()\r\n\r\n ### Widget 17 is a radio button for the library configuration\r\n uCOSIIIWidget = Radiobutton(self, text='uC/OS-III', variable=self.advancedRtosType, value=4)\r\n self.widgetList.append(uCOSIIIWidget)\r\n uCOSIIIWidget.grid(row=12, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIIISupported():\r\n uCOSIIIWidget.grid_remove()\r\n\r\n ### Widget 18 is a label for the toolchain check boxes\r\n self.widgetList.append(Label(self, text='Choose Supported Toolchain(s):', font=labelFont))\r\n self.widgetList[18].grid(row=4, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 19 is a check box for KDS\r\n kdsOptionWidget = Checkbutton(self, text=kSdk.KDSname, variable=self.advIsKds)\r\n self.widgetList.append(kdsOptionWidget)\r\n self.widgetList[19].grid(row=5, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 20 is a check box for IAR\r\n iarOptionWidget = Checkbutton(self, text=kSdk.IARname, variable=self.advIsIar)\r\n self.widgetList.append(iarOptionWidget)\r\n self.widgetList[20].grid(row=6, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 21 is a check box for MDK\r\n keilMdkOptionWidget = Checkbutton(self, text=kSdk.keilMDKARMname, variable=self.advIsMdk)\r\n self.widgetList.append(keilMdkOptionWidget)\r\n self.widgetList[21].grid(row=7, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 22 is a check box for ATS\r\n atollicOptionWidget = Checkbutton(self, text=kSdk.AtollicStudio, variable=self.advIsAts)\r\n self.widgetList.append(atollicOptionWidget)\r\n self.widgetList[22].grid(row=8, column=3, sticky=W, columnspan=2)\r\n\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KinetisDesignStudio):\r\n kdsOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.IARname):\r\n iarOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KeilMDK):\r\n keilMdkOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.AtollicStudio):\r\n atollicOptionWidget.grid_remove()\r\n\r\n ### Widget 23 is a check box for GCC\r\n self.widgetList.append(Checkbutton(self, text='GCC Command Line', variable=self.advIsGcc))\r\n self.widgetList[23].grid(row=9, column=3, sticky=W, columnspan=2)\r\n self.widgetList[23].state([\"disabled\"])\r\n self.widgetList[23].grid_remove()\r\n\r\n ### Widget 24 is a label for adding BSP\r\n #self.widgetList.append(Label(self, text='USB and Board Support:', font=labelFont))\r\n boardSupportLabel = Label(self, text='Board Support:', font=labelFont)\r\n self.widgetList.append(boardSupportLabel)\r\n self.widgetList[24].grid(row=10, column=3, sticky=W, columnspan=2, pady=(5, 0))\r\n\r\n ### Widget 25 is a checkbox for adding BSP\r\n includeBSPFilesOption = Checkbutton(self, text='Include BSP files', variable=self.advIsBsp)\r\n self.widgetList.append(includeBSPFilesOption)\r\n self.widgetList[25].grid(row=11, column=3, sticky=W, columnspan=2)\r\n self.widgetList[25].state([\"!disabled\"])\r\n \r\n if self.localSDK.isNewVersion():\r\n boardSupportLabel.grid_remove()\r\n includeBSPFilesOption.grid_remove()\r\n\r\n ### Widget 26 is a label for the output path entry\r\n self.widgetList.append(Label(self, text='Project Parent Directory:', \\\r\n font=labelFont))\r\n self.widgetList[26].grid(row=13, column=1, sticky=W, columnspan=4, pady=(5, 0))\r\n\r\n ### Widget 27 is a text entry for the output path\r\n if self.newProj.osType == 'Windows':\r\n entryWidth = int(77.0 / WIN_SCALE)\r\n self.widgetList.append(Entry(self, width=entryWidth))\r\n else:\r\n self.widgetList.append(Entry(self, width=71))\r\n self.newProj.workSpace = self.newProj.sdkPath \r\n if self.newProj.osType == 'Windows':\r\n self.newProj.workSpace = kT.string_replace(self.newProj.workSpace, '/', '\\\\')\r\n self.widgetList[27].insert(0, self.newProj.workSpace)\r\n self.widgetList[27].grid(row=14, column=1, sticky=W, columnspan=4)\r\n\r\n ### Widget 28 is a button for browsing to a directory\r\n self.dir_opt['title'] = 'Select the directory you want the project to be generated into. '\r\n self.widgetList.append(Button(self, text='Browse', \\\r\n command=lambda: self.proj_set_directory(False, 27)))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[28].grid(row=14, column=5, sticky=E)\r\n else:\r\n self.widgetList[28].grid(row=14, column=4, sticky=E)\r\n\r\n self.widgetList[28].state([\"disabled\"])\r\n\r\n ### Widget 29 is a checkbox for making a standalone project\r\n self.widgetList.append(Checkbutton(self, text='Generate standalone project', \\\r\n variable=self.advIsStandalone))\r\n self.widgetList[29].grid(row=15, column=1, sticky=W, columnspan=2, pady=5)\r\n\r\n ### Widget 30 is a help button\r\n self.widgetList.append(Button(self, text='Help', \\\r\n command=lambda: self.advanced_help(master, (Constants.ADV_HELP if self.localSDK.isNewVersion() else ADV_HELP))))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[30].grid(row=1, column=5, sticky=E, pady=(0, 0))\r\n else:\r\n self.widgetList[30].grid(row=1, column=4, sticky=E, pady=(0, 0))\r\n #self.widgetList[30].state([\"disabled\"])\r\n\r\n ### Widget 31 is a button to generate the project\r\n if self.newProj.osType == 'Windows':\r\n style = Style()\r\n style.configure(\"Bold.TButton\", font='system 8 bold')\r\n self.widgetList.append(Button(self, text='Advanced Generate!', style=\"Bold.TButton\", \\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=4, sticky=E+W+N+S, rowspan=2, columnspan=2)\r\n else:\r\n self.widgetList.append(Button(self, text='Advanced Generate!',\\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=3, sticky=E+N+S, rowspan=2, columnspan=2)\r\n self.widgetList[31].state([\"!disabled\"])\r\n\r\n ### Widget 32 is a label for padding row 13\r\n self.widgetList.append(Label(self, text='', font=labelFont))\r\n self.widgetList[32].grid(row=0, column=6, sticky=E+W, padx=5)\r\n\r\n ### Widget 33 is a label for explaining the return button\r\n self.widgetList.append(Label(self, text='Click here to return to previous menu.'))\r\n self.widgetList[33].grid(row=17, column=1, columnspan=3, sticky=W)\r\n\r\n ### Widget 34 is a checkbox for adding USB\r\n self.widgetList.append(Checkbutton(self, text='Include USB', variable=self.advIsUsb))\r\n self.widgetList[34].grid(row=12, column=3, sticky=W, columnspan=2)\r\n self.widgetList[34].state([\"disabled\"])\r\n self.widgetList[34].grid_remove()\r\n\r\n ### Widget 35 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='Device', variable=self.advancedDevType, \\\r\n value=0))\r\n self.widgetList[35].grid(row=3, column=3, sticky=W)\r\n\r\n ### Widget 36 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Board', variable=self.advancedDevType, \\\r\n value=1))\r\n self.widgetList[36].grid(row=3, column=3, sticky=E)\r\n\r\n ### Widget 37 is the label for project type\r\n self.widgetList.append(Label(self, text='Device or Board:', font=labelFont))\r\n self.widgetList[37].grid(row=2, column=3, sticky=W, pady=(5, 0))\r\n\r\n self.poll_advanced()\r\n \r\n #update project to set correct supported tools, path etc.\r\n self.update_proj()\r\n return", "def set_up_menu(self):\n self.app.title = \"work\"\n self.timer.start()", "def open_preferences(self, event):\n editPrefFrame = Single_pref(parent=self, ID=997)\n editPrefFrame.Centre()\n editPrefFrame.Show()\n editPrefFrame.ShowModal()\n editPrefFrame.Destroy()", "def common_prefs_begin_edit(self):\n\t\treturn Job(SDK.PrlSrv_CommonPrefsBeginEdit(self.handle)[0])", "def onLoad (self):\n #productive #onButton\n profprint()\n self.fileDialog = qt.QFileDialog(self.parent)\n self.fileDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config\"))\n self.fileDialog.options = self.fileDialog.DontUseNativeDialog\n self.fileDialog.acceptMode = self.fileDialog.AcceptOpen\n self.fileDialog.defaultSuffix = \"cfg\"\n self.fileDialog.setNameFilter(\"Configuration File (*.cfg)\")\n self.fileDialog.connect(\"fileSelected(QString)\", self.onLoadFileSelected)\n self.fileDialog.show()", "def open_preferences(self):\n result = OptionsDialog.get_options(self.settings,\n TcamView.has_dutils())\n\n if result:\n log.info(\"Saving settings\")\n else:\n log.info(\"Settings not saved\")\n\n if self.view:\n self.view.set_settings(self.settings)\n\n self.get_focus()" ]
[ "0.5803222", "0.5764439", "0.5632621", "0.5579987", "0.55609196", "0.5531322", "0.5498198", "0.5456681", "0.5440068", "0.54032004", "0.54000485", "0.5395688", "0.5395272", "0.53947246", "0.5367619", "0.5364008", "0.53474754", "0.5346228", "0.53405243", "0.5282741", "0.5273741", "0.525581", "0.5250372", "0.5211561", "0.5211416", "0.51916945", "0.5135263", "0.5097685", "0.5094813", "0.5083552" ]
0.6004767
0
Returns the path to DeadlineCommand.
def GetDeadlineCommand( useDeadlineBg=False ): deadlineBin = "" try: deadlineBin = os.environ[ 'DEADLINE_PATH' ] except KeyError: # if the error is a key error it means that DEADLINE_PATH is not set. however Deadline command may be in the PATH or on OSX it could be in the file /Users/Shared/Thinkbox/DEADLINE_PATH pass # On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist. if deadlineBin == "" and os.path.exists( "/Users/Shared/Thinkbox/DEADLINE_PATH" ): with io.open( "/Users/Shared/Thinkbox/DEADLINE_PATH", encoding="utf-8" ) as f: deadlineBin = f.read().strip() exeName = "deadlinecommand" if useDeadlineBg: exeName += "bg" deadlineCommand = os.path.join( deadlineBin, exeName ) return deadlineCommand
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_deadline_command_path():\n\n deadline_bin = os.environ.get('DEADLINE_PATH', '')\n\n # On Linux, the Deadline Client installer creates a system-wide script to set the DEADLINE_PATH environment\n # variable. Cloud-init does not load system environment variables. Cherry-pick the\n # environment variable installed by the Deadline Client installer.\n if not deadline_bin and os.path.exists(DL_ENV_SCRIPT_PATH_LINUX):\n print(f'using environement script at \"{DL_ENV_SCRIPT_PATH_LINUX}\"...')\n with io.open(DL_ENV_SCRIPT_PATH_LINUX, 'r', encoding='utf8') as env_script:\n env_script_contents = env_script.read()\n dl_path_match = DL_ENV_SCRIPT_PATH_RE.search(env_script_contents)\n if dl_path_match:\n deadline_bin = dl_path_match.group('DeadlineDir')\n\n # On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist.\n if deadline_bin == \"\" and os.path.exists(DL_PATH_FILE_MACOS):\n print(f'using MacOS Deadline path file at \"{DL_PATH_FILE_MACOS}\"...')\n with io.open(DL_PATH_FILE_MACOS, 'r', encoding='utf8') as f:\n deadline_bin = f.read().strip()\n\n if not deadline_bin:\n raise ValueError('Could not determine deadline path')\n\n deadline_command = os.path.join(deadline_bin, \"deadlinecommand\")\n\n return deadline_command", "def command(self) -> str:\n cmd = ''\n if self.argv:\n cmd = self.argv[0]\n return cmd", "def rel_command(self):\n return self.command.lstrip('/')", "def getPath(self):\r\n\t\treturn self.pathToGoal", "def get_command(self):\n return 'date && cd ' + \\\n os.path.join(ChronosJob.cloud_path_dict[self.cloud], \\\n 'userfiles', self.job_dir_relative_path) + \\\n ' && python3 /home/src/gene_prioritization.py ' + \\\n ' -run_directory ./' + \\\n ' -run_file run.yml' + \\\n ' && date;'", "def _get_base_command(self):\n import inspect\n import os\n # get current script directory path. We are in /an/unknown/path/kalliope/core\n cur_script_directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n # get parent dir. Now we are in /an/unknown/path/kalliope\n parent_dir = os.path.normpath(cur_script_directory + os.sep + os.pardir)\n # we add the kalliope.py file name\n real_entry_point_path = parent_dir + os.sep + KALLIOPE_ENTRY_POINT_SCRIPT\n # We test that the file exist before return it\n logger.debug(\"Real Kalliope.py path: %s\" % real_entry_point_path)\n if os.path.isfile(real_entry_point_path):\n crontab_cmd = \"python %s start --brain-file %s --run-synapse \" % (real_entry_point_path,\n self.brain.brain_file)\n return crontab_cmd\n raise IOError(\"kalliope.py file not found\")", "def cmdpath(self):\n return os.system('pwd')", "def get_command(pid):", "def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None", "def pdb_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"pdb\")", "def scriptpath(self, code) -> str:\n return ''", "def PATH(self) -> str:\n return \".\".join(self.SEQ)", "def get_command(self):\n return self.command", "def real_path(self):\n\t\treturn self.args[0]", "def getCommand(self):\n return self.__cmd", "def getCommandLine():\n import sys, os\n cmdline = os.path.abspath(sys.argv[0])\n for elem in sys.argv[1:]:\n cmdline += ' ' + ecohydrolib.util.getAbsolutePathOfItem(elem)\n return cmdline", "def command_name(self):\n return None", "def _get_base_command(self):\r\n cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])\r\n r_command = self._commandline_join(\r\n ['R', '--slave', '--no-restore', '--args'])\r\n source_dir_arg = self._commandline_join(['--source_dir',\r\n self._get_R_script_dir()])\r\n script_arguments = self._commandline_join(\r\n [self.Parameters[k] for k in self._parameters])\r\n\r\n command_parts = [\r\n cd_command, r_command, source_dir_arg,\r\n script_arguments, '<', self._get_R_script_path()]\r\n return self._commandline_join(command_parts).strip()", "def get_cmd(self):\n return self.cmd", "def command():\n return _config.command", "def _get_base_command(self):\r\n cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])\r\n r_command = self._commandline_join(\r\n ['R', '--slave', '--no-restore', '--args'])\r\n source_dir_arg = self._commandline_join(['--source_dir',\r\n self._get_R_script_dir()])\r\n\r\n script_arguments = self._commandline_join(\r\n [self.Parameters[k] for k in self._parameters])\r\n\r\n command_parts = [\r\n cd_command, r_command, source_dir_arg,\r\n script_arguments, '<', self._get_R_script_path()]\r\n return self._commandline_join(command_parts).strip()", "def execution_path(self, filename):\n return os.path.join(os.path.dirname(inspect.getfile(sys._getframe(0))), filename)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def runner_path():\n git_base = os.popen('git rev-parse --show-toplevel').read().strip()\n return os.path.join(git_base, RUNNER_SCRIPT_BASENAME)", "def path(self):\n return pjoin(self._dj._jobsdir, self._status, self.full_name())", "def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")", "def get_vernissagecmd_path():\n return vernissagecmd_path", "def get_path_arg(self):\n # The bluespec compiler automatically adds build_dir to the front of the path, but bluetcl does not,\n # so we add it manually and get a warning from the bluespec compiler about redundant folders in the path\n return ['-p', ':'.join([self.build_dir] + self.bsv_path + BSVProject.default_paths)]", "def gui_path(self) -> str:\n return self.join_path(self.gui_concept.arg)", "def get_executable(self) -> str:\n ..." ]
[ "0.753901", "0.6118858", "0.6027574", "0.58908194", "0.5830067", "0.5762068", "0.570046", "0.5663638", "0.56532186", "0.56523234", "0.5645256", "0.5634848", "0.56305516", "0.5628725", "0.5616503", "0.5605415", "0.5578746", "0.55568534", "0.55538136", "0.5526211", "0.5518902", "0.55159444", "0.5501617", "0.5495646", "0.54944587", "0.5480896", "0.5456078", "0.5413415", "0.53995174", "0.53837854" ]
0.73081684
1
Creates a utf8 encoded file with each argument in arguments on a separate line.
def CreateArgFile( arguments, tmpDir ): tmpFile = os.path.join( tmpDir, "args.txt" ) with io.open( tmpFile, 'w', encoding="utf-8-sig" ) as fileHandle: fileHandle.write( "\n".join( arguments ) ) return tmpFile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_i18n_data_file(cls, filename, encoding):\n cls.cluster.fs.setuser(cls.cluster.superuser)\n f = cls.cluster.fs.open(filename, \"w\")\n for x in range(256):\n f.write(\"%d\\t%s\\n\" % (x, chr(x).encode(encoding)))\n f.close()", "def output_file(data, filename):\n with open(filename + '.txt', 'w+') as f_out:\n for char in data:\n f_out.write('U+' + str(hex(ord(char)))[2:] + '\\n')", "def dump_args(args, outdir='.'):\n with open( Path(outdir)/'args.txt', 'w' ) as file:\n for k, v in args.items():\n file.write('{}: {}\\n'.format(k, v))", "def create_usdzconvert_arguments(args: list) -> list:\n usdz_converter_path = current_app.config.get('USDZ_CONVERTER_PATH') / \\\n current_app.config.get('USDZ_CONVERTER_SCRIPT_PATH')\n\n arguments = [_get_converter_interpreter_arg(),\n usdz_converter_path.resolve().as_posix()]\n\n for arg in args:\n arguments.append(arg)\n\n return arguments", "def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)", "def _printstr(self, args):\n s = \"\\n\"\n\n for arg in args:\n #s += arg.encode('utf-8', 'pout.replace')\n s += arg\n\n return s", "def encoding():\n\n return render_template(\"UTF-8-demo.txt\")", "def main():\n p = argparse.ArgumentParser(description='Convert images into unicode')\n p.add_argument('image', metavar='<path>', type=str,\n help='path to the file, use - for stdin')\n p.add_argument('--no-x256', action='store_false', dest='x256', default=True,\n help='prints with x256 unicode coloring')\n p.add_argument('--char-set', metavar='<name>', default='default',\n help='prints with character set (e.g. windows)')\n args = p.parse_args()\n print_image_as_unicode(args.image, char_set=CHAR_SETS[args.char_set],\n x256=args.x256)", "def create_files(filename_list, encoding):\n for filename in filename_list:\n codecs.open(filename, 'w', encoding).close()", "def add_args(parser):\n add_encoder_args(parser)\n add_decoder_args(parser)", "def hexdump(args=None):\n args = parser.parse_args(args)\n with LogSetup(args):\n contents = args.file.read()\n args.file.close()\n dump(contents, width=args.width)", "def encode_arg(arg):\n arg_utf8 = utf8(arg)\n\n return ELEM_SEP.join([str(len(str(arg_utf8))), str(arg_utf8)])", "def transform_file_to_utf_8_from(file_path, in_encoding=\"latin1\", out_file_name=\"\"):\n in_file = codecs.open(file_path, encoding=in_encoding)\n in_lines = in_file.readlines()\n if not out_file_name:\n out_file_name = file_path.replace(\".txt\", \".utf8.txt\")\n out_file = codecs.open(out_file_name, \"w+\")\n for line in in_lines:\n out_file.write(line)\n out_file.close()", "def rc_file_text(rc_file_args):\n return yaml.dump(rc_file_args, Dumper=Dumper)", "def exportBulletFile(*argv):", "def encoding(options):\n pass", "def main(args):\n # Results: print to console and also write to output file\n pass", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def unicode_open(filename, *args, **kwargs):\n kwargs['encoding'] = \"utf-8\"\n if PY3:\n return open(filename, *args, **kwargs)\n return codecs.open(filename, *args, **kwargs)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode='w', encoding='utf-8') as a_file:\n i = 0\n for char in text:\n a_file.write(char)\n i += 1\n return i", "def writeToFile(outputFile, unicode_text):\n fp = outputFile\n # workaround problem if caller gives byte string instead\n unicode_text = safe_unicode(unicode_text)\n utf8_text = unicode_text.encode('utf-8')\n fp.write(utf8_text)\n #fp.close()", "def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))", "def convert(args):\n if args.unique:\n output_filepath = '{}.as.unique.wordpairs.txt'.format(\n args.data.split('.txt')[0])\n else:\n output_filepath = '{}.as.wordpairs.txt'.format(\n args.data.split('.txt')[0])\n pairs = []\n logger.info('Saving output to {}'.format(output_filepath))\n with open(args.data, 'r', encoding='utf-8') as input_stream:\n for line in input_stream:\n line = line.strip()\n seq = line.split('\\t')\n if len(seq[0].split()) == 1:\n pairs.append((seq[0], seq[1]))\n else:\n xtokens = seq[0].split()\n ytokens = seq[1].split()\n if len(xtokens) != len(ytokens):\n raise Exception(\n 'Invalid input sequences: should contain the same '\n 'number of tokens: \\n {} \\n {}'.format(seq[0], seq[1]))\n for xtoken, ytoken in zip(xtokens, ytokens):\n pairs.append((xtoken, ytoken))\n if args.unique:\n pairs = set(pairs)\n with open(output_filepath, 'w', encoding='utf-8') as output_str:\n for pair in sorted(pairs):\n print('{}\\t{}'.format(pair[0], pair[1]), file=output_str)\n # with open(args.data, 'r', encoding='utf-8') as input_stream:\n # with open(output_filepath, 'w', encoding='utf-8') as output_str:\n # for line in input_stream:\n # line = line.strip()\n # if line:\n # seq = line.split('\\t')\n # xtokens = seq[0].split()\n # ytokens = seq[1].split()\n # if len(xtokens) != len(ytokens):\n # raise Exception(\n # 'Invalid input sequences: should contain the same '\n # 'number of tokens: \\n {} \\n {}'.format(seq[0], seq[1]))\n # print('{}\\t{}'.format(' '.join(xtokens), ' '.join(ytokens)),\n # file=output_str)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode='w', encoding=\"utf-8\") as myFile:\n chars_written = myFile.write(text)\n return chars_written", "def lines_to_file(file_name: str, write_dir: str, lines: Sequence[str]):\n with open(os.path.join(write_dir, file_name), \"w\", encoding=\"utf-8\") as f:\n for l in lines:\n f.write(f\"{l}\\n\")", "def _glyphs2ufo_entry_point():\n args = sys.argv[1:]\n args.insert(0, \"glyphs2ufo\")\n return main(args)", "def main():\n # We could test ?><:*|\"' and chr(1 to 32) on linux.\n # We could test ?<>*|\"' on OSX.\n # On Windows, skip the Chinese characters for now as the log parsing code is\n # using the current code page to generate the log.\n if sys.platform == 'win32':\n filename = u'foo, bar, ~p#o,,ué^t%t .txt'\n else:\n filename = u'foo, bar, ~p#o,,ué^t%t 和平.txt'\n with open(filename, 'w') as f:\n f.write('Bingo!')\n return 0", "def _ufo2glyphs_entry_point():\n args = sys.argv[1:]\n args.insert(0, \"ufo2glyphs\")\n return main(args)", "def report_args(args):\n\n print (\"SETTINGS:\\n\")\n print (\"-f : Output data file >> {:s}\".format(args.file))\n print (\"-l : Length of data series >> {:d}\".format(args.length))\n print (\"-p : Process >> {:s}\".format(args.process))\n print (\"-d : Ouput diretory >> {:s}\".format(args.directory))\n print (\"\\n\")", "def arg_maker(self, args):\n added_lines = ''\n arg_line = ''\n for arg in args:\n if arg == 'user':\n added_lines += '\\n\\tuser = self.username'\n arg_line += ' user,'\n if arg == 'cwd':\n added_lines += '\\n\\tcwd = self.fs.cwd'\n arg_line += ' cwd,'\n if arg == 'table':\n added_lines += '\\n\\ttable = self.users_database[1]'\n arg_line += ' table,'\n if arg == 'database':\n added_lines += '\\n\\tdatabase = self.users_database[0]'\n arg_line += ' database,'\n if arg == 'args':\n arg_line += ' line,'\n arg_line = arg_line[1:-1]\n return added_lines, arg_line" ]
[ "0.6052145", "0.57538974", "0.567268", "0.55836433", "0.55042565", "0.5475151", "0.54015994", "0.5380762", "0.5356525", "0.5350646", "0.5287505", "0.5250849", "0.52459705", "0.5193831", "0.51840913", "0.51814663", "0.5180244", "0.5170334", "0.51647687", "0.5129749", "0.50674057", "0.50642794", "0.5041777", "0.5040224", "0.50371057", "0.5030606", "0.5024481", "0.5014219", "0.5012535", "0.49990484" ]
0.7158849
0
Run DeadlineCommand with the specified arguments returning the standard out
def CallDeadlineCommand(arguments, hideWindow=True, useArgFile=False, useDeadlineBg=False, raiseOnExitCode=False): deadlineCommand = GetDeadlineCommand( useDeadlineBg ) tmpdir = None if useArgFile or useDeadlineBg: tmpdir = tempfile.mkdtemp() if useDeadlineBg: arguments = [ "-outputfiles", os.path.join( tmpdir, "dlout.txt" ), os.path.join( tmpdir, "dlexit.txt" ) ] + arguments startupinfo = None creationflags = 0 if os.name == 'nt': if hideWindow: # Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both. if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: # still show top-level windows, but don't show a console window CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag creationflags = CREATE_NO_WINDOW if useArgFile: arguments = [ CreateArgFile( arguments, tmpdir ) ] arguments.insert( 0, deadlineCommand ) # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards. proc = subprocess.Popen( arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags ) output, errors = proc.communicate() if raiseOnExitCode and proc.returncode != 0: try: # The quote function was moved to shutil in python 3 from shutil import quote as shell_quote except ImportError: # In python 2, quote lived in the pipes module from pipes import quote as shell_quote cmd = ' '.join([shell_quote(arg) for arg in arguments]) raise subprocess.CalledProcessError(proc.returncode, cmd, output) if useDeadlineBg: with io.open( os.path.join( tmpdir, "dlout.txt" ), 'r', encoding='utf-8' ) as fileHandle: output = fileHandle.read() if tmpdir: try: shutil.rmtree( tmpdir ) except: print( 'Failed to remove temp directory: "%s"' % tmpdir ) return output.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call_deadline_command_raw(self, arguments):\n # make a copy so we don't mutate the caller's reference\n arguments = list(arguments)\n arguments.insert(0, self._deadline_command_path)\n try:\n proc = subprocess.Popen(\n arguments,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except:\n raise Exception('Failed to call Deadline.')\n\n output, errors = proc.communicate()\n if proc.returncode != 0:\n raise ValueError('DeadlineCommandError: \\n%s\\n%s' % (output, errors))\n return output.decode('utf8')", "def run(*argv):\n print(*argv, file=sys.stderr)\n subprocess.check_call(argv, stdout=sys.stderr)", "def run(self, stdout=None, stderr=None):", "def do_command(): # pragma: no cover\n args = parse_args(sys.argv[1:])\n status = run(args)\n sys.exit(status)", "def main(args=sys.argv[1:], env=Environment()):\r\n # Parse Top-Level Arguments\r\n exit_status = 0\r\n resolver = CommandParser(env)\r\n try:\r\n command, command_args = resolver.parse(args)\r\n\r\n # Set logging level\r\n debug_level = command_args.get('--debug')\r\n if debug_level:\r\n logger = logging.getLogger()\r\n handler = logging.StreamHandler()\r\n logger.addHandler(handler)\r\n logger.setLevel(DEBUG_LOGGING_MAP.get(debug_level, logging.DEBUG))\r\n\r\n kwargs = {\r\n 'proxy': command_args.get('--proxy'),\r\n 'config_file': command_args.get('--config')\r\n }\r\n if command_args.get('--timings'):\r\n client = TimedClient(**kwargs)\r\n else:\r\n client = Client(**kwargs)\r\n\r\n # Do the thing\r\n runnable = command(client=client, env=env)\r\n data = runnable.execute(command_args)\r\n if data:\r\n out_format = command_args.get('--format', 'table')\r\n if out_format not in VALID_FORMATS:\r\n raise ArgumentError('Invalid format \"%s\"' % out_format)\r\n output = format_output(data, fmt=out_format)\r\n if output:\r\n env.out(output)\r\n\r\n if command_args.get('--timings'):\r\n out_format = command_args.get('--format', 'table')\r\n api_calls = client.get_last_calls()\r\n timing_table = KeyValueTable(['call', 'time'])\r\n\r\n for call, _, duration in api_calls:\r\n timing_table.add_row([call, duration])\r\n\r\n env.err(format_output(timing_table, fmt=out_format))\r\n\r\n except InvalidCommand as ex:\r\n env.err(resolver.get_module_help(ex.module_name))\r\n if ex.command_name:\r\n env.err('')\r\n env.err(str(ex))\r\n exit_status = 1\r\n except InvalidModule as ex:\r\n env.err(resolver.get_main_help())\r\n if ex.module_name:\r\n env.err('')\r\n env.err(str(ex))\r\n exit_status = 1\r\n except DocoptExit as ex:\r\n env.err(ex.usage)\r\n env.err(\r\n '\\nUnknown argument(s), use -h or --help for available options')\r\n exit_status = 127\r\n except KeyboardInterrupt:\r\n env.out('')\r\n exit_status = 1\r\n except CLIAbort as ex:\r\n env.err(str(ex.message))\r\n exit_status = ex.code\r\n except SystemExit as ex:\r\n exit_status = ex.code\r\n except SoftLayerAPIError as ex:\r\n if 'invalid api token' in ex.faultString.lower():\r\n env.out(\"Authentication Failed: To update your credentials, use \"\r\n \"'sl config setup'\")\r\n else:\r\n env.err(str(ex))\r\n exit_status = 1\r\n except SoftLayerError as ex:\r\n env.err(str(ex))\r\n exit_status = 1\r\n except Exception:\r\n import traceback\r\n env.err(\"An unexpected error has occured:\")\r\n env.err(traceback.format_exc())\r\n env.err(\"Feel free to report this error as it is likely a bug:\")\r\n env.err(\" https://github.com/softlayer/softlayer-python/issues\")\r\n exit_status = 1\r\n\r\n sys.exit(exit_status)", "def _call(self, argstr, need_stderr, dry_run, **kwargs):", "def run_and_capture(*argv):\n print(*argv, file=sys.stderr)\n return subprocess.check_output(argv)", "def cli(args): # noqa; pylint: disable=unused-argument", "def cmd_dele(args):", "def runprocess(self, argv, check_stdout=None, check_stderr=None,\n check_returncode=0, stdin_string='', fail_message=None,\n timeout=5, verbosity=None, env=None):\n if env is None:\n env = os.environ\n env.setdefault('GIT_COMMITTER_DATE', self.isodate_now)\n argv_repr = ' '.join(shellquote(a) for a in argv)\n if verbosity is None:\n verbosity = self.verbosity\n if verbosity:\n print(self.term.blue(argv_repr))\n if verbosity > 2:\n print(self.term.yellow(stdin_string.rstrip()))\n PIPE = subprocess.PIPE\n proc = subprocess.Popen(argv, stdout=PIPE, stderr=PIPE, stdin=PIPE,\n env=env)\n try:\n stdout, stderr = proc.communicate(stdin_string.encode('utf-8'),\n timeout=timeout)\n timeout_expired = False\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout = stderr = b''\n timeout_expired = True\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n returncode = proc.returncode\n failed = any([\n timeout_expired,\n (check_stdout is not None and check_stdout != stdout),\n (check_stderr is not None and check_stderr != stderr),\n (check_returncode is not None and check_returncode != returncode),\n ])\n if failed and not verbosity:\n print(self.term.blue(argv_repr))\n if failed or verbosity >= 2:\n if stdout:\n print(stdout.rstrip())\n if stderr:\n print(self.term.yellow(stderr.rstrip()))\n print('→ %s' % self.term.blue(str(proc.returncode)))\n if failed:\n if timeout_expired:\n self.die('Command timeout expired')\n elif fail_message:\n self.die(fail_message)\n else:\n self.die('Command failed')\n return SubprocessResult(stdout, stderr, returncode)", "def run(*args, **kwargs):\n kwargs[\"check\"] = True\n print(\"+\", \" \".join(args[0]))\n return subprocess.run(*args, **kwargs)", "def test_with_command_line_arguments(self, arguments):\n fixed_arguments = self.get_argument_string(arguments)\n result = self.run(\n arguments=fixed_arguments,\n timeout=self.full_timeout,\n use_fresh_profile=True)\n return self._handle_test_result(result)", "def deadlines_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\"Polymall Module 1.10 - 4 Jun 2359 \\n\"\n \"Lesson Plan submission - end of Sem 1\\n\")", "def cli() -> None:", "def cli() -> None:", "def execute(self, *args, **options):\n show_traceback = options.get('traceback', False)\n\n try:\n self.stdout = options.get('stdout', sys.stdout)\n self.stderr = options.get('stderr', sys.stderr)\n\n output = self.handle(*args, **options)\n if output:\n self.stdout.write(output)\n\n except CommandError as exception:\n if show_traceback:\n traceback.print_exc()\n else:\n self.stderr.write(\n smart_str(self.style.ERROR('Error: %s\\n' % exception)))\n sys.exit(1)", "def run(args):\n parser = make_parser()\n opts = parser.parse_args(args)\n\n try:\n return opts.__command__.run(opts)\n\n except NextstrainCliError as error:\n exit_status = 1\n\n if DEBUGGING:\n traceback.print_exc()\n else:\n if isinstance(error, UsageError):\n warn(opts.__parser__.format_usage())\n exit_status = 2\n\n warn(error)\n\n return exit_status\n\n except AssertionError:\n traceback.print_exc()\n warn(\"\\n\")\n warn(dedent(\"\"\"\\\n An error occurred (see above) that likely indicates a bug in the\n Nextstrain CLI.\n\n To report this, please open a new issue and include the error above:\n <https://github.com/nextstrain/cli/issues/new/choose>\n \"\"\"))\n return 1", "def run(argv: List[str], stdout: TextIO, stderr: TextIO) -> int:\n parser = _make_argument_parser()\n args, out, err = _parse_args(parser=parser, argv=argv)\n if len(out) > 0:\n stdout.write(out)\n\n if len(err) > 0:\n stderr.write(err)\n\n if args is None:\n return 1\n\n command, errors = _parse_args_to_params(args=args)\n if errors:\n for error in errors:\n print(error, file=stderr)\n return 1\n\n assert command is not None\n\n if not command.scenarios_dir.exists():\n print(\n f\"The directory you specified in --scenarios_dir does not exist \"\n f\"on your system: {command.scenarios_dir}\",\n file=stderr,\n )\n return 1\n\n if not command.scenarios_dir.is_dir():\n print(\n f\"The path you specified in --scenarios_dir is expected to be a directory, \"\n f\"but it is not: {command.scenarios_dir}\",\n file=stderr,\n )\n return 1\n\n if isinstance(command, Once):\n errors = rasaeco.render.once(scenarios_dir=command.scenarios_dir)\n elif isinstance(command, Continuously):\n server = None # type: Optional[ThreadedServer]\n\n with contextlib.ExitStack() as exit_stack:\n if command.port is not None:\n server = ThreadedServer(\n port=command.port,\n scenarios_dir=command.scenarios_dir,\n stdout=stdout,\n stderr=stderr,\n )\n server.start()\n exit_stack.push(server)\n\n stop = queue.Queue() # type: queue.Queue[bool]\n\n work_thread = threading.Thread(\n target=_render_continuously,\n args=(stdout, stderr, command.scenarios_dir, stop),\n )\n\n prefix = \"In the main\"\n work_thread.start()\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n print(f\"{prefix}: Got a keyboard interrupt.\", file=stdout)\n finally:\n print(f\"{prefix}: Sending a stop from the main thread...\", file=stdout)\n stop.put(True)\n print(\n f\"{prefix}: Waiting for the work thread in main to join...\",\n file=stdout,\n )\n work_thread.join()\n\n else:\n raise AssertionError(\"Unhandled command: {}\".format(command))\n\n if errors:\n for error in errors:\n print(error, file=stderr)\n return 1\n\n return 0", "def run(argv: Optional[list[str]] = None) -> tuple[str, str]:\n argv = argv if argv is not None else []\n\n with PipeStream() as stdin:\n stdin.writer.close()\n\n with PipeStream() as stdout:\n with PipeStream() as stderr:\n gada.main(\n [\"gada\"] + argv,\n stdin=stdin.reader,\n stdout=stdout.writer,\n stderr=stderr.writer,\n )\n stdout.writer.close()\n stderr.writer.close()\n return (\n stdout.reader.read().decode(errors=\"ignore\"),\n stderr.reader.read().decode(errors=\"ignore\"),\n )", "def test_debug_wrapper_run_command(mock_compile, mock_open, mock_argv):\n mock_argv.return_value = [\"kubernetes_debug_wrapper.py\", \"main.py\",\n \"--epochs\", \"10\"]\n expected_output = \"foo\"\n mock_compile.return_value = \"print('{}')\".format(expected_output)\n\n with catch_stdout() as caught_output:\n kubernetes_debug_wrapper._run_command()\n output = caught_output.getvalue()\n\n assert expected_output in output", "def run(command):\n if arguments['--dry-run']:\n print command\n else:\n subprocess.call(command, shell=True)", "def do_command(self, args):\n pass", "def main():\n if len(sys.argv) == 2:\n leak_file = sys.argv[1]\n\n r = remote('chal.cybersecurityrumble.de', 2946)\n r.recvuntil('code.\\n')\n r.sendline(get_payload(f'cat {leak_file}'))\n result = r.recvall()[:-1]\n with open(leak_file, 'wb') as fptr:\n fptr.write(result)\n\n else:\n while True:\n cmd = raw_input('$ ').strip().decode()\n\n context.log_level = 'warn'\n r = remote('chal.cybersecurityrumble.de', 2946)\n r.recvuntil('code.\\n')\n r.sendline(get_payload(cmd))\n result = r.recvall()[:-1]\n context.log_level = 'info'\n print(result.decode())", "def runner() -> CliRunner:\n return CliRunner(mix_stderr=False)", "def execute(arg):\n print('Invalid command!!!')\n return", "def _run(*args):\n return subprocess.run(\n args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True,\n universal_newlines=True)", "def main(args):\n options = parse_cmd_parameters_(args)\n execute_(options)", "def run_from_args(command):\n return Effect(Run.from_args(command))", "def test_print_graveyard_removal(capfd):\n os.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file + \" -p\")\n out, err = capfd.readouterr()\n assert (\"12682136550675318127\" in out) == True", "def main(*arguments, **options):\n capture = options.get('capture', False)\n saved_argv = sys.argv\n saved_stdout = sys.stdout\n try:\n sys.argv = arguments\n if capture:\n sys.stdout = StringIO()\n coloredlogs.cli.main()\n if capture:\n return sys.stdout.getvalue()\n finally:\n sys.argv = saved_argv\n sys.stdout = saved_stdout" ]
[ "0.6623097", "0.6296079", "0.6084702", "0.5997185", "0.59120613", "0.5874816", "0.57683825", "0.5742775", "0.57391727", "0.5712437", "0.56482756", "0.5596094", "0.558642", "0.553605", "0.553605", "0.5521694", "0.55093294", "0.54556483", "0.5436485", "0.54264593", "0.54078025", "0.53955185", "0.5392372", "0.5391141", "0.5385261", "0.53747237", "0.5359656", "0.5356255", "0.5352478", "0.5345353" ]
0.65875596
1
Get the path to the file where we will store sticky settings
def GetStickySettingsFilePath(): global submissionInfo deadlineHome = submissionInfo[ "UserHomeDir" ].strip() return os.path.join( deadlineHome, "settings", "katana_sticky.json" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settingsFilePath(self):\n return self._settingsFilePath", "def get_preference_file():\n\n return \"{}/{}\".format(_MANAGER_PREFERENCE_PATH, _MANAGER_PREFERENCE_FILE)", "def get_preference_file_cache_destination_path():\n\n return read_preference_key(search_key=\"cache_manager_cache_path\")", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def get_cached_addon_path():\n settingspath = get_cached_setting_path()\n if not settingspath:\n logger.error(\"#SETTINGSPATH# resolution required but was not found\")\n return\n\n return os.path.join(settingspath, \"Addons\") + \"\\\\\"", "def config_file(self):\n return self[CONFIG_FILE_KEY]", "def get_tool_settings_file_path(self, tool_id):\n\n settings_path = path_utils.get_user_data_dir(appname=tool_id)\n settings_file = path_utils.clean_path(os.path.expandvars(os.path.join(settings_path, 'settings.cfg')))\n\n return settings_file", "def findSettingsFile():\n settingsName = 'oct-fire-settings.json'\n userPath = os.path.expanduser('~')\n if os.path.exists(settingsName):\n return settingsName\n elif os.path.exists(os.path.join(userPath, settingsName)):\n return os.path.join(userPath, settingsName)\n elif os.path.exists(os.path.join(userPath, 'Desktop', settingsName)):\n return os.path.join(userPath, 'Desktop', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Documents', settingsName)):\n return os.path.join(userPath, 'Documents', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Downloads', settingsName)):\n return os.path.join(userPath, 'Downloads', settingsName)\n raise Exception('Could not locate settings file')", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def settings_save_path(ctx):\n click.echo(ctx.obj['save_path'])", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def app_settings():\n return {\n 'app_wksp_path': os.path.join(App.get_app_workspace().path, ''),\n 'threddsdatadir': App.get_custom_setting(\"thredds_path\"),\n 'threddsurl': App.get_custom_setting(\"thredds_url\"),\n 'logfile': os.path.join(App.get_app_workspace().path, 'workflow.log')\n }", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def get_temp_dir():\n return settings.FILE_STORE_TEMP_DIR", "def get_user_config_dir(options):\n return '/root/.spinnaker'", "def get_cached_setting_path():\n rainmeterpath = get_cached_program_path()\n\n if not rainmeterpath:\n return\n\n # Check if Rainmeter.ini is in Rainmeter program directory\n if os.path.exists(rainmeterpath + \"Rainmeter.ini\"):\n logger.info(\"Rainmeter.ini found in \" + rainmeterpath)\n return rainmeterpath\n\n else: # If not, look in %APPDATA%\\Rainmeter\\\n appdata = os.getenv(\"APPDATA\")\n if os.path.exists(os.path.join(appdata, \"Rainmeter\\\\Rainmeter.ini\")):\n logger.info(\"Rainmeter.ini found in \" +\n os.path.join(appdata, \"Rainmeter\") + \"\\\\\")\n return os.path.join(appdata, \"Rainmeter\") + \"\\\\\"\n\n else:\n logger.info(\"Rainmeter.ini could not be located.\")\n return None", "def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)", "def settingsFilePath(self, value):\n self._settingsFilePath = value\n self.readSettingsFile()", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH", "def get_variables_filepath(cls):\n return fileops.join_path(PathMapper.get_config_path(), \n Variables.VARIABLES_FILE)", "def configPath(self):\n return os.path.dirname(__file__)", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)", "def store_path(self):\n return path.join(env.store_home, self._store_path)", "def get_file_save_path(self):\n return self.out", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')" ]
[ "0.72601885", "0.7198174", "0.69512", "0.6910759", "0.69085604", "0.68241256", "0.67362624", "0.6648517", "0.66195136", "0.6618425", "0.6611885", "0.65249866", "0.6479099", "0.64735585", "0.64711976", "0.6452971", "0.638629", "0.6381435", "0.63718975", "0.63349026", "0.63190585", "0.63122004", "0.6304727", "0.62829256", "0.6274689", "0.62680995", "0.62539154", "0.62349385", "0.6198735", "0.618951" ]
0.8301903
0
Writes the current settings from Submitter UI to the sticky settings file.
def WriteStickySettings( gui ): global stickySettingWidgets, stickyWidgetSaveFunctions print( "Writing sticky settings..." ) configFile = GetStickySettingsFilePath() stickySettings = {} for setting, widgetName in stickySettingWidgets.iteritems(): try: widget = getattr( gui, widgetName ) stickySettings[setting] = stickyWidgetSaveFunctions[ type( widget ) ]( widget ) except AttributeError: print( traceback.format_exc() ) try: fileContents = json.dumps( stickySettings, encoding="utf-8" ) with io.open( configFile, "w", encoding="utf-8" ) as fileHandle: fileHandle.write( fileContents.decode("utf-8") ) except IOError: print( "Could not write sticky settings" ) print( traceback.format_exc() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_settings(self):\n logger.info(f'Saving settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name in section.keys():\n value = self.get_control_value(setting_name)\n if value is not None:\n section[setting_name] = value\n\n write_settings(self.settings_dict)", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()", "def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))", "def save_setting(self):\n if self.is_checked.get():\n if \"Email\" not in s.alert:\n s.updateAlert(\"Email\")\n s.updateEmail(self.email_addr_entry.get())\n if not self.is_checked.get():\n if \"Email\" in s.alert:\n s.deleteAlert(\"Email\")\n s.deleteEmail()\n # Check the refresh interval\n if self.is_minimize_to_system_tray.get():\n s.updateMinimize(\"True\")\n else:\n s.updateMinimize(\"False\")\n\n if self.is_launch_at_start_up.get():\n s.updateLaunchAtStartup(\"True\")\n become_persistent(__file__)\n else:\n s.updateLaunchAtStartup(\"False\")\n remove_startup()\n\n s.updateSetting(self.interval_entry.get())\n Tracker.save_state(Tracker.FILENAME, s)", "def save(self):\n sublime.save_settings(self.file_name)", "def save(self):\n with open(self.SETTINGS_FILE, 'w') as handle:\n data = dict()\n for (key, value) in self.__dict__.items():\n if not key.startswith('__'):\n data[key] = value\n json.dump(data, handle)", "def save():\n log.info(\"Saving settings file\")\n with open(SETTINGS_FILE, \"w\") as file:\n json.dump(_names, file)", "def save_settings(self, plugin_settings, instance_settings):\n instance_settings.set_value(\"output_directory\", self.output_directory)\n instance_settings.set_value(\"labels\", self.labels)\n if self._sub:\n instance_settings.set_value(\"topic_name\", self._sub.name)", "def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def save(self):\n return self.client._perform_empty(\"PUT\", \"/admin/general-settings\", body = self.settings)", "def save_settings(self, outfile='settings.p'):\n #NOTE: drawback, must edited w/n ipython, best to save settings in plain ascii text format\n settings = {'DataDir':self.DataDir,\n 'ProcDir':self.ProcDir,\n 'OutDir':self.OutDir,\n 'AuxDir':self.AuxDir,\n 'Igthresh':self.Igthresh,\n 'Width':self.Set.Width,\n 'Length':self.Set.Length,\n 'Dates':self.Set.Dates,\n 'DatesSerial':self.Set.DatesSerial,\n 'TimeIntervals':self.Set.TimeIntervals,\n 'TimeIndex':self.Set.TimeIndex,\n 'Igrams':self.Set.Igrams,\n 'IgramsSerial':self.Set.IgramsSerial,\n 'Paths':self.Set.Paths,\n 'Omissions':self.Set.Omissions,\n 'Tandems':self.Set.Tandems}\n pickle.dump(settings,open(name,'wb'))", "def saveSettings(self):\n # settings object\n settings = QtCore.QSettings()\n\n # store current working directory\n settings.setValue(\"mainWindow/currentDirectory\", os.getcwd())\n\n # window size\n settings.setValue(\"mainWindow/size\", self.size())", "def write_preferences_file(self):\n user_data_dir = find_pmag_dir.find_user_data_dir(\"thellier_gui\")\n if not os.path.exists(user_data_dir):\n find_pmag_dir.make_user_data_dir(user_data_dir)\n pref_file = os.path.join(user_data_dir, \"thellier_gui_preferences.json\")\n with open(pref_file, \"w+\") as pfile:\n print('-I- writing preferences to {}'.format(pref_file))\n json.dump(self.preferences, pfile)", "def save_settings(dic):\n json.dump(dic, open(\"resources/files/settings.txt\", \"w\"))\n\n # LEGACY\n # with open(\"resources/files/settings.txt\", \"w\", newline=\"\\n\") as w:\n # for sett, val in dic.items():\n # w.write(sett + '\\\\' + val + '\\n')", "def save_prefs(self):\n prefs_file = open(expanduser(self.prefs_path), 'w')\n pickle.dump(self.prefs, prefs_file)", "def writeSettings(self):\n settings = QtCore.QSettings()\n output_directory = self.ui.outputDirLineEdit.text()\n settings.setValue(\"output_directory\", output_directory)", "def writeSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.settings.setValue(vol,self.param.vol[i-1])\n info = f\"info{i}\"\n self.settings.setValue(info,self.param.info[i-1])\n ip = f\"ip{i}\"\n self.settings.setValue(ip,self.param.ip[i-1])\n muted = f\"muted{i}\"\n self.settings.setValue(muted,self.param.muted[i-1])", "def writeSettingsFile(self, JSON, path=None):\n if path is not None:\n settingsFilePath = path\n else:\n settingsFilePath = self.settingsFilePath\n with open(settingsFilePath, 'w') as settingsFile:\n settingsFile.write(JSON)", "def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return", "def save(self):\n self.client._perform_empty(\"PUT\", \"/project-folders/%s/settings\" % (self.project_folder_id), body = self.settings)", "def save( self ):\n ini = codecs.open(self.filename,\"w\",\"utf-8\",errors=\"replace\",buffering=0)\n for (name,value) in self.conf.items():\n print >>ini, name, \"=\", value\n ini.close()", "def write_view_settings(self, key, settings=None):\n logger.debug(\"Writing view settings for: {}\".format(key))", "def _save_settings(self):\n # data to be save :\n # -----------------\n # futurePivot node\n\n # create attributes\n self._create_data_attribute()\n\n # connect futurePivot node\n pm.connectAttr(\n \"%s%s\" % (self._futurePivot.name(), \".message\"),\n self._object.attr(\"pivotData.futurePivot\"),\n f=True,\n )", "def _save(self):\n file = open(\"settings.ini\", \"w\")\n self._parser.write(file)\n file.close()", "def write_xbee_settings():\n device.apply_changes()\n device.write_changes()", "def save(self):\n if self.location is None:\n logger.debug(\"Save requested but not saving settings, \"\n \"location is None\")\n return\n\n if self._saving or not self._dirty:\n return\n\n self._saving = True\n\n logger.debug(\"Saving settings...\")\n\n with open(self.location + \".new\", 'w') as f:\n self.write(f)\n\n try:\n # make it readable by current user only, to protect private data\n os.fchmod(f.fileno(), 384)\n except:\n pass # fail gracefully, eg if on windows\n\n f.flush()\n\n try:\n os.rename(self.location, self.location + \".old\")\n except:\n pass # if it doesn'texist we don't care\n\n os.rename(self.location + \".new\", self.location)\n\n try:\n os.remove(self.location + \".old\")\n except:\n pass\n\n self._saving = False\n self._dirty = False", "def Save(self, settingsfile):\n try:\n with codecs.open(settingsfile, encoding=\"utf-8-sig\", mode=\"w+\") as f:\n json.dump(self.__dict__, f, encoding=\"utf-8\", ensure_ascii=False)\n with codecs.open(settingsfile.replace(\"json\", \"js\"), encoding=\"utf-8-sig\", mode=\"w+\") as f:\n f.write(\"var settings = {0};\".format(json.dumps(self.__dict__, encoding='utf-8', ensure_ascii=False)))\n except ValueError:\n Parent.Log(ScriptName, \"Failed to save settings to file.\")" ]
[ "0.71699524", "0.7144108", "0.6855974", "0.68193734", "0.66913515", "0.66821957", "0.64933175", "0.64606106", "0.6453299", "0.63580054", "0.63520503", "0.63510686", "0.6333627", "0.6321657", "0.6306876", "0.62875223", "0.6263997", "0.62562144", "0.62060374", "0.61840034", "0.6182092", "0.61726373", "0.6155534", "0.61297417", "0.6108536", "0.60341483", "0.602756", "0.60166943", "0.5989774", "0.5980866" ]
0.7189759
0
Reads in settings from the sticky settings file, then update the UI with the new settings
def LoadStickySettings( gui ): global stickySettingWidgets, stickyWidgetLoadFunctions configFile = GetStickySettingsFilePath() print( "Reading sticky settings from: %s" % configFile ) stickySettings = None try: with io.open( configFile, "r", encoding="utf-8" ) as fileHandle: stickySettings = json.load( fileHandle, encoding="utf-8" ) except IOError: print( "No sticky settings found. Using default settings." ) except ValueError: print( "Invalid sticky settings. Using default settings." ) print( traceback.format_exc() ) except Exception: print( "Could not read sticky settings. Using default settings." ) print( traceback.format_exc() ) if stickySettings: for setting, value in stickySettings.iteritems(): widgetName = stickySettingWidgets.get(setting) if widgetName: try: widget = getattr(gui, widgetName) stickyWidgetLoadFunctions[ type( widget ) ]( widget, value ) except AttributeError: print( traceback.format_exc() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def update_control_widgets(self):\n logger.info(f'Loading settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name, value in section.items():\n self.set_control_value(setting_name, value)", "def updateSettingsUI(self):\n\n pass", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def set_config(self, settings='settings.json'): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['photo1'] = self.photo1.get()\n self.settings['photo2'] = self.photo2.get()\n self.settings['smc1'] = self.smc1.get()\n self.settings['smc2'] = self.smc2.get()\n self.settings['smc3'] = self.smc3.get()\n self.settings['smc4'] = self.smc4.get()\n self.settings['watering'] = self.watering.get()\n self.settings['cycle'] = self.cycle.get()\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), settings)\n if os.path.exists(settings_path):\n with open(settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def readSettingsFile(self):\n with open(self.settingsFilePath, 'r') as settingsFile:\n self.settings = json.loads(settingsFile.read())", "def update(self, settings):\n self.settings.cache_clear()\n self._settings = settings\n log.info(\"Updated settings to %s\", self._settings)", "def update_settings(self):\n settings = {\n \"reference\": self,\n \"draw_tangents\": self.cbDrawTangents.isChecked(),\n }\n if self.cbShowSolarAngle.isChecked():\n settings[\"show_solar_angle\"] = self.cbSolarAngleType.currentText(), self.cbSolarBody.currentText()\n else:\n settings[\"show_solar_angle\"] = None\n\n self.view.set_remote_sensing_appearance(settings)", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def load_settings(self):\n\n self.std = settings.settings", "def load_settings(self):\n settings_file = open('./resources/settings.json')\n settings = json.load(settings_file)\n settings_file.close()\n try:\n if settings['camera'] in self.camera_list:\n self.comboCamera.setCurrentIndex(settings['camera'])\n self.comboRotation.setCurrentIndex(settings['rotation'])\n self.spinMinHue.setValue(settings['colors']['min_hue'])\n self.spinMaxHue.setValue(settings['colors']['max_hue'])\n self.spinMinSaturation.setValue(settings['colors']['min_saturation'])\n self.spinMaxSaturation.setValue(settings['colors']['max_saturation'])\n self.spinMinValue.setValue(settings['colors']['min_value'])\n self.spinMaxValue.setValue(settings['colors']['max_value'])\n self.spinDiameter.setValue(settings['diameter'])\n self.lineEditLifter.setText(settings['lifter'])\n self.checkSaveVideo.setChecked(settings['save_video'])\n except KeyError:\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Error in settings.json. Loading defaults instead.')", "def WriteStickySettings( gui ):\n global stickySettingWidgets, stickyWidgetSaveFunctions\n print( \"Writing sticky settings...\" )\n\n configFile = GetStickySettingsFilePath()\n\n stickySettings = {}\n\n for setting, widgetName in stickySettingWidgets.iteritems():\n try:\n widget = getattr( gui, widgetName )\n stickySettings[setting] = stickyWidgetSaveFunctions[ type( widget ) ]( widget )\n except AttributeError:\n print( traceback.format_exc() )\n\n try:\n fileContents = json.dumps( stickySettings, encoding=\"utf-8\" )\n with io.open( configFile, \"w\", encoding=\"utf-8\" ) as fileHandle:\n fileHandle.write( fileContents.decode(\"utf-8\") )\n except IOError:\n print( \"Could not write sticky settings\" )\n print( traceback.format_exc() )", "def open_settings(self, event):\n settings_dialog = cfg.SettingsDialog(parent=self, exclude=['window'])\n res = settings_dialog.ShowModal()\n if res == wx.ID_OK:\n # Reload relevant parts of app\n restart_monitor_timer = False\n restart_gui_timer = False\n reload_correlations = False\n reload_logger = False\n reload_graph = False\n\n for setting in settings_dialog.changed_settings:\n # If any 'monitor.' settings except 'monitor.divergence_threshold have changed then restart\n # monitoring timer with new settings.\n # If 'monitor.interval has changed then restart gui timer.\n # If 'monitor.monitoring_threshold' has changed, then refresh correlation data.\n # If any 'logging.' settings have changed, then reload logger config.\n if setting.startswith('monitor.') and setting != 'monitor.divergence_threshold':\n restart_monitor_timer = True\n if setting == 'monitor.interval':\n restart_gui_timer = True\n if setting == 'monitor.monitoring_threshold':\n reload_correlations = True\n if setting.startswith('logging.'):\n reload_logger = True\n if setting.startswith('monitor.calculations'):\n reload_graph = True\n\n # Now perform the actions\n if restart_monitor_timer:\n self.__log.info(\"Settings updated. Reloading monitoring timer.\")\n self.__cor.stop_monitor()\n\n # Build calculation params and start monitor\n calculation_params = [self.__config.get('monitor.calculations.long'),\n self.__config.get('monitor.calculations.medium'),\n self.__config.get('monitor.calculations.short')]\n\n self.__cor.start_monitor(interval=self.__config.get('monitor.interval'),\n calculation_params=calculation_params,\n cache_time=self.__config.get('monitor.tick_cache_time'),\n autosave=self.__config.get('monitor.autosave'),\n filename=self.__opened_filename)\n\n if restart_gui_timer:\n self.__log.info(\"Settings updated. Restarting gui timer.\")\n self.timer.Stop()\n self.timer.Start(self.__config.get('monitor.interval') * 1000)\n\n if reload_correlations:\n self.__log.info(\"Settings updated. Updating monitoring threshold and reloading grid.\")\n self.__cor.monitoring_threshold = self.__config.get(\"monitor.monitoring_threshold\")\n self.__refresh_grid()\n\n if reload_logger:\n self.__log.info(\"Settings updated. Reloading logger.\")\n log_config = cfg.Config().get('logging')\n logging.config.dictConfig(log_config)\n\n if reload_graph:\n self.__log.info(\"Settings updated. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])", "def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def load_measurement_settings_file():\n\n # First update the settings that the state machine is up to date\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n fileDialog = QFileDialog()\n file = fileDialog.getOpenFileName()\n\n if file[0]:\n file = open(str(file[0]), \"r\")\n dict = yaml.load(file)\n file.close()\n\n # l.info(\"Loaded new measurement settings file: \" + str(file[0]))\n self.variables.default_values_dict[\"settings\"].update(\n dict\n ) # Updates the values of the dict, it either updates the values or adds them if not incluced\n self.variables.ui_plugins[\"Settings_window\"].configure_settings()", "def cli(ctx, root):\n try:\n ctx.obj = create_initial_context(root)\n except SettingsBroken as e:\n click.echo(\n 'Failed to read the settings file: %s' % str(e),\n err=True\n )\n exit(1)", "def read_settings():\n \n settings = OrdDic()\n settings.update(json.load(open(\"resources/files/settings.txt\", \"r\")))\n\n ## OLD WAY BELOW\n\n #r = open(\"resources/files/settings.txt\", \"r\", newline=\"\\n\")\n # for option in r.read().split('\\n'):\n # try:\n # #option = option.split('\\\\')\n # #settings.update({option[0]: option[1]})\n # # settings.update(json.loads(option))\n # except IndexError:\n # pass\n return settings", "async def settings(self, ctx: BBContext):\n pass", "def reload_settings():\n global settings, cancel_thread\n\n # cancel the thread if the settings say so\n if cancel_thread is None:\n if settings.get('disabled') is False:\n cancel_thread = start_thread()\n else:\n if settings.get('disabled') is True:\n light_scheme_set = None\n current_timeout = 0\n cancel_thread()\n cancel_thread = None", "def settings():\n\n mysettings = MySettings(app)\n form = SettingsForm(request.form, obj=mysettings)\n\n if request.method == 'POST' and form.validate():\n mysettings.update(\n app,\n form.influxdb_host.data,\n form.influxdb_port.data,\n form.influxdb_db.data,\n form.influxdb_table.data,\n form.influxdb_user.data,\n form.influxdb_pw.data,\n form.diematicd_host.data,\n form.diematicd_port.data\n )\n return redirect(url_for('controller'))\n\n return render_template('settings.html', form=form, user=current_user)", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def update_settings(self):\n\n param = \"settings.py\"\n self._check_path_availability([\"get_settings_dir\", \"get_settings_dir_to\"])\n self.updater.update_files(\n self.analizer.get_settings_dir(),\n self.analizer.get_settings_dir_to(),\n param,\n )\n return self.write_debug_message(\"Settings upgrade is done!\\n\")", "def load_settings(self):\n self.settings = db.get_settings()\n if len(self.settings) < 2:\n while(True):\n consumer_key = raw_input(\"Enter your consumer key\")\n consumer_secret = raw_input(\"Enter your consumer_secret\")\n if len(consumer_key) > 5 and len(consumer_secret) > 5:\n db.add_settings(consumer_key, consumer_secret)\n break", "def load_settings(self, outfile='settings.p'):\n settings = pickle.load(open(path,'rb'))\n self.__dict__.update(settings)", "def load(self):\n if not path.isfile(self.SETTINGS_FILE):\n return\n data = load_json_from_disk(self.SETTINGS_FILE)\n for (key, value) in data.items():\n self.__dict__[key] = value" ]
[ "0.686287", "0.67737114", "0.6748555", "0.6717438", "0.6571519", "0.6435366", "0.63965124", "0.6376148", "0.6361373", "0.62330866", "0.621005", "0.62071073", "0.61983466", "0.61828625", "0.61292857", "0.6116447", "0.59903854", "0.5935539", "0.5932085", "0.58979905", "0.58402485", "0.58400583", "0.5837058", "0.582838", "0.58244294", "0.5815016", "0.5812625", "0.5772277", "0.5769879", "0.5749196" ]
0.69833964
0
Converts a url patternesque string into a path, given a context dict, and splits the result.
def pathify(urlpattern, **context): repl = lambda match: context[match.group(1)] path = re.sub(r':([a-z]+)', repl, urlpattern) return tuple(path[1:].split('/'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def resolveContext(self, context):\n if context is None:\n return context\n elif isinstance(context, tuple):\n return context\n elif isinstance(context, tuple):\n return tuple(context.split('/'))\n else:\n return context.getPhysicalPath()", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def _split_url(url):\n return url[1:].split('/')", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def transform_url(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return encode_url_path(result)", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def extract_path(path: str) -> str:\n return _RE_URL.sub(r'{\\1}', path)", "def context_to_path_string(context: List[str]) -> str:\n if len(context) == 0:\n return \"\"\n elif len(context) == 1:\n return context[0]\n else:\n return f'{context[0]}.{SpreadsheetGenerator.context_to_path_string(context[1:])}'", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def path_to_url(path):\r\n if os.sep == '/':\r\n return path\r\n else:\r\n return '/'.join(split_all(path))", "def split_url(url): # Change the url so it can be iterated\n url = url.split('index') \n url = url[0] + 'page-1.html'\n url = url.split('page-')\n url = f\"{url[0]}page-1.html\"\n return url", "def get_url_path(url):\n return filter(lambda x: x!='', url.split('/'))", "def test_split_fullpath_with_route_domain():\n\n # Expected input must have route specified, otherwise reject\n tests = [\n [\"/Partition/1.2.3.4%0:80\", \"/Partition\", \"1.2.3.4\", 0, 80],\n [\"/Part/Folder/1.2.3.4%1:443\", \"/Part/Folder\", \"1.2.3.4\", 1, 443],\n [\"/Part/::ffff:0:0%2.8080\", \"/Part\", \"::ffff:0:0\", 2, 8080],\n [\"/Part/1.2.3.4:8080\", None, None, None, None],\n [\"/Part/::ffff:0:0.8080\", None, None, None, None]\n ]\n\n for test in tests:\n results = split_fullpath_with_route_domain(test[0])\n assert results[0] == test[1]\n assert results[1] == test[2]\n assert results[2] == test[3]\n assert results[3] == test[4]", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def parse_path_pattern_from_entities(sourcepath,bids_entities):\n path = deepcopy(sourcepath)\n values = [val for key,val in bids_entities.items()]\n key_map={\n 'sub':'%subject%',\n 'ses':'%session%',\n 'task':'%task%',\n 'acq':'%acquisition%',\n 'run':'%run%'\n }\n assert '%' not in path # otherwise it will mess up the logic\n for key,val in bids_entities.items():\n pathcopy = deepcopy(path)\n # Replace all other values which are superstrings of the current one\n superstrings = [x for x in values if val in x and val!=x]\n for string in superstrings:\n pathcopy = pathcopy.replace(string,'*'*len(string))\n # handle ambiguity\n if pathcopy.count(val) > 1:\n raise ValueError('Ambiguity: The path has multiple instances of {}'.format(val))\n if pathcopy.count(val) < 1:\n raise ValueError('There is no {} in path'.format(val))\n path = path.replace(val,key_map[key])\n values[values.index(val)] = key_map[key]\n path = _modify_entities_of_placeholder_pattern(path)\n path = path.replace('\\\\','/')\n # Find first changing value and put the pattern from there\n first_placeholder = path.find('%')\n # Identify where should the pattern start\n start = path[:first_placeholder].rfind('/') + 1 if '/' in path[:first_placeholder] else 0\n path = path[start:]\n return path", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def _parse_path(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Path, auto_id: mapry.py.generate.AutoID,\n py: mapry.Py) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_PATH_TPL.render(\n uid=uid,\n value_expr=value_expr,\n ref_parts=ref_parts,\n target_expr=target_expr,\n a_type=a_type,\n py=py).rstrip(\"\\n\")", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def build_path(self, context):\n if not self._uuid:\n raise ValueError(\"Descriptor UUID not initialized\")\n\n parts = self.build_container_path_parts(context) \n parts.append(self._uuid)\n self._path = '/'.join(map(str, parts))\n return self._path", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def parse_url_path(url_path):\r\n\r\n m = re.match('^/([^/]+)/?$',url_path)\r\n if m:\r\n return (m.group(1),None)\r\n \r\n m = re.match('^/([^/]+)/(.+)$',url_path)\r\n if m:\r\n return (m.group(1),m.group(2).replace('%25','%'))\r\n \r\n return (None,None)", "def url_subpath(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n forbidden = ''.join(sorted(forbidden))\n raise ValueError('%(s)r contains forbidden characters'\n ' (%(forbidden)r)'\n % locals())\n stripped = normpath(s).lstrip(sep)\n if stripped == curdir:\n return ''\n if sep != '/':\n return stripped.replace(sep, '/')\n return stripped", "def GetPathFromUrl(url):\n return __ParseUrl(url)[2]", "def compile_route_to_url(self):\n\n if 'http' in self.redirect_url:\n return self.redirect_url\n\n # Split the url into a list\n split_url = self.redirect_url.split('/')\n\n # Start beginning of the new compiled url\n compiled_url = '/'\n\n # Iterate over the list\n for url in split_url:\n\n # if the url contains a parameter variable like @id:int\n if '@' in url:\n url = url.replace('@', '').replace(\n ':int', '').replace(':string', '')\n compiled_url += str(self.param(url)) + '/'\n else:\n compiled_url += url + '/'\n\n # The loop isn't perfect and may have an unwanted trailing slash\n if compiled_url.endswith('/') and not self.redirect_url.endswith('/'):\n compiled_url = compiled_url[:-1]\n\n # The loop isn't perfect and may have 2 slashes next to eachother\n if '//' in compiled_url:\n compiled_url = compiled_url.replace('//', '/')\n\n return compiled_url", "def SplitTestPath(test_result, test_path_format):\n if test_path_format == TELEMETRY_TEST_PATH_FORMAT:\n separator = '/'\n elif test_path_format == GTEST_TEST_PATH_FORMAT:\n separator = '.'\n else:\n raise ValueError('Unknown test path format: %s' % test_path_format)\n\n test_path = test_result['testPath']\n if separator not in test_path:\n raise ValueError('Invalid test path: %s' % test_path)\n\n return test_path.split(separator, 1)", "def process_url(url):\n parsed = urlparse(url)\n if parsed.scheme:\n return parsed.netloc, parsed.path\n else:\n host_part = parsed.path\n hostname = host_part.partition(\"/\")[0]\n path = \"/\" + host_part.partition(\"/\")[2]\n return hostname, path", "def construct_path(id_val):\n id_val = str(id_val)\n path = id_val[:3] + \"/\" + id_val[3:6] + \"/\" + id_val[6:9] + \"/\"\n path += id_val\n return path", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list" ]
[ "0.59003174", "0.5704174", "0.5683664", "0.5584328", "0.55201805", "0.546162", "0.5402494", "0.535743", "0.53368884", "0.5284471", "0.5279856", "0.52473545", "0.5235247", "0.52138245", "0.51656365", "0.5129309", "0.5124352", "0.5093053", "0.5055723", "0.5051632", "0.5041933", "0.5019335", "0.50099516", "0.5006845", "0.49897105", "0.49890924", "0.49680176", "0.49618635", "0.49493295", "0.49473396" ]
0.7681451
0
init cluster_temp for all the center point
def __initCluster(self): data_size, cluster_center = self.data_size, self.cluster_center self.cluster_temp = np.zeros(data_size, dtype=int) self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float) for center in cluster_center: self.cluster_temp[center] = center
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def _init_centroid(self, seed: int):\n random.seed(seed)\n self.centroid_info = dict()\n self.cluster_result = dict()\n self.centroid_stable_flag = dict()\n for key_index, chosen_value in enumerate(\n random.sample(self.list_data, self.n_cluster)):\n self.centroid_info.setdefault(\"c\" + str(key_index), float(chosen_value))\n self.cluster_result.setdefault(\"c\" + str(key_index), list())\n self.centroid_stable_flag.setdefault(\"c\" + str(key_index), False)", "def _init_cluster(self):\n self._Init_Cluster()", "def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}", "def _init_centroid(self, data):\n\n\t\tcentroids = data[:self._k]\n\t\tx_assignee = data\n\n\t\treturn centroids, x_assignee", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def init_centroids(self, data_points):\n # print(\"Init centroid\")\n # return list(map(lambda x: x[1], random.sample(labelled_data, self.k)))\n\n # Project the data: this step will take several seconds\n\n centroids_scaled = self.naive_sharding(data_points, self.k)\n return list(centroids_scaled)\n\n #sample = np.random.permutation(len(labelled_data))[:self.k]\n\n #return list(map(lambda x: labelled_data[x][1], sample))", "def kmeans_intialize_centroids(k, n, data, T):\r\n # cast to a list to be fed to a c extension\r\n sp_initial = kpp.kmeans_pp(k, n, T).astype(int).tolist()\r\n\r\n # cast to a list to be fed to a c extension\r\n km_initial = kpp.kmeans_pp(k, n, data).astype(int).tolist()\r\n\r\n return sp_initial, km_initial", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)", "def init_centroids(self, points , k):\n centroids = points.copy()\n numpy.random.shuffle(centroids)\n return centroids[0:k,:]", "def __initialise_smart(self, X, args):\n\t\tcentroids = np.zeros((self.K,self.D))\n\t\tif X.shape[0] > 10*self.K:\n\t\t\tdata = X[:10*self.K,:]\n\t\telse:\n\t\t\tdata = X\n\t\tN = data.shape[0]\n\n\t\t\t#choosing centroids\n\t\t\t#points are chosen from dataset with farhtest point clustering\n\t\tran_index = np.random.choice(N)\n\t\tcentroids[0,:] = data[ran_index]\n\n\t\tfor k in range(1,self.K):\n\t\t\tdistances = np.zeros((N,k)) #(N,K)\n\t\t\tfor k_prime in range(k):\n\t\t\t\tdistances[:,k_prime] = np.sum(np.square(data - centroids[k_prime,:]), axis =1) #(N,K')\n\t\t\tdistances = np.min(distances, axis = 1) #(N,)\n\t\t\tdistances /= np.sum(distances) #normalizing distances to make it a prob vector\n\t\t\tnext_cl_arg = np.random.choice(range(data.shape[0]), p = distances) #chosen argument for the next cluster center\n\t\t\tcentroids[k,:] = data[next_cl_arg,:]\n\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.sum(np.divide(np.square(X - centroids[k,:]), var), axis = 1) + 1e-5\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0", "def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def initialize_pos(img: np.ndarray):\n\n h, w = img.shape[0:2]\n\n for cluster in range(numclusters):\n i = np.random.randint(h) # row index\n j = np.random.randint(w) # col index\n current_cluster_centers[cluster, 0, :] = img[i, j, :]\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def __get_initial_centroid(self, x_train, seed=None):\n if self.init == \"random\":\n # randomly select n_cluster point from the input dataset\n if seed:\n random.seed(seed)\n return np.asarray(random.choices(x_train, k=self.n_cluster))", "def clusters(self,rng):\n #clusts = subclust(normalize(self.training_data),0.4,0.5)\n if self.extended:\n dat = self.training_data / rng\n else:\n dat = self.training_data[:,0:-1] / rng[0:-1]\n\n clusts = subclust(normalize(dat))\n\n print len(clusts),\"initial clusters for class\",self.name\n if self.extended:\n return np.array([self.training_data[i] for i in clusts])\n else:\n return np.array([self.training_data[i,0:-1] for i in clusts])", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )", "def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()" ]
[ "0.69504863", "0.6859036", "0.67012495", "0.6668851", "0.6667392", "0.6468853", "0.6415132", "0.64095896", "0.63832414", "0.6361127", "0.63474107", "0.6336359", "0.62062657", "0.62016225", "0.61754805", "0.61420494", "0.6140045", "0.6138546", "0.6138051", "0.6124449", "0.6099616", "0.6078627", "0.6015025", "0.6010184", "0.59976006", "0.5989706", "0.5969851", "0.5948663", "0.594797", "0.59202534" ]
0.825215
0
calculate the delta of each vector save the delta point as master
def calculate_delta(self): rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size self.result[rho_des_index[0]][1] = -1 for i in range(1, data_size): for j in range(0, i): old_i, old_j = rho_des_index[i], rho_des_index[j] min_pos, max_pos = min(old_j, old_i), max(old_j, old_i) if distance[(min_pos, max_pos)] < self.result[old_i][1]: self.result[old_i][1] = distance[(min_pos, max_pos)] self.master[old_i] = old_j self.result[rho_des_index[0]][1] = max(self.result[:, 1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n Ddemo_trajs.append(d_traj)", "def delta(self):\r\n return self.nd1()", "def compose_after_from_vector_inplace(self, delta):\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n n_points = self.pdm.model.mean.n_points\n\n # compute:\n # -> dW/dp when p=0\n # -> dW/dp when p!=0\n # -> dW/dx when p!=0 evaluated at the source landmarks\n\n # dW/dp when p=0 and when p!=0 are the same and simply given by\n # the Jacobian of the model\n dW_dp_0 = model_jacobian\n dW_dp = dW_dp_0\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dp: n_points x n_params x n_dims\n\n dW_dx = self.transform.jacobian_points(points)\n # dW_dx: n_points x n_dims x n_dims\n\n #TODO: Can we do this without splitting across the two dimensions?\n dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]\n dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]\n dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,\n self.n_parameters))\n dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y\n dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,\n (n_points, self.n_parameters, self.n_dims))\n # dW_dx: n_points x n_dims x n_dims\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dx_dW_dp_0: n_points x n_params x n_dims\n\n J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)\n H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)\n\n Jp = np.linalg.solve(H, J)\n # Jp: n_params x n_params\n\n self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))\n return self", "def delta(self) -> None:", "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def deltaCalc(self, expected):\n \n n = len(self.structure)\n self.delta = [None] * n\n self.delta[n - 1] = []\n \n for i in xrange(len(expected)):\n curr = self.a[n - 1][i]\n self.delta[n - 1].append(self.derivativeFunc(curr) * (expected[i] - curr))\n self.delta[n - 1] = np.array(self.delta[n - 1])\n \n # From n - 1 to 1 layer \n for i in xrange(n - 1, 0, -1):\n currDelta = self.delta[i]\n if i != (n - 1):\n currDelta = currDelta[0][:-1]\n \n self.delta[i - 1] = np.array(np.dot(currDelta, self.theta[i]))\n self.delta[i - 1][0] *= self.a[i - 1]\n \n return", "def compose_after_from_vector_inplace(self, delta):\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n n_points = self.pdm.model.mean.n_points\n\n # compute:\n # -> dW/dp when p=0\n # -> dW/dp when p!=0\n # -> dW/dx when p!=0 evaluated at the source landmarks\n\n # dW/dq when p=0 and when p!=0 are the same and given by the\n # Jacobian of the global transform evaluated at the mean of the\n # model\n dW_dq = self._global_transform_jacobian(points)\n # dW_dq: n_points x n_global_params x n_dims\n\n # dW/db when p=0, is the Jacobian of the model\n dW_db_0 = model_jacobian\n # dW_db_0: n_points x n_weights x n_dims\n\n # dW/dp when p=0, is simply the concatenation of the previous\n # two terms\n dW_dp_0 = np.hstack((dW_dq, dW_db_0))\n # dW_dp_0: n_points x n_params x n_dims\n\n # by application of the chain rule dW_db when p!=0,\n # is the Jacobian of the global transform wrt the points times\n # the Jacobian of the model: dX(S)/db = dX/dS * dS/db\n dW_dS = self.pdm.global_transform.jacobian_points(points)\n dW_db = np.einsum('ilj, idj -> idj', dW_dS, dW_db_0)\n # dW_dS: n_points x n_dims x n_dims\n # dW_db: n_points x n_weights x n_dims\n\n # dW/dp is simply the concatenation of dX_dq with dX_db\n dW_dp = np.hstack((dW_dq, dW_db))\n # dW_dp: n_points x n_params x n_dims\n\n dW_dx = self.transform.jacobian_points(points)\n #dW_dx = np.dot(dW_dx, self.global_transform.linear_component.T)\n # dW_dx: n_points x n_dims x n_dims\n\n #TODO: Can we do this without splitting across the two dimensions?\n dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]\n dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]\n dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,\n self.n_parameters))\n dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y\n dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,\n (n_points, self.n_parameters, self.n_dims))\n # dW_dx: n_points x n_dims x n_dims\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dx_dW_dp_0: n_points x n_params x n_dims\n\n J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)\n H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)\n\n Jp = np.linalg.solve(H, J)\n # Jp: n_params x n_params\n\n self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))", "def getDelta(A):\n A_t = np.append([A[-1,:]],A,axis=0)\n A_d = np.append(A,[A[0,:]],axis=0)\n delta = deltaClass(A - A_t[0:-1,:],A - A_d[1:,:]) \n return delta", "def compute_deltas(self, target):\n self.deltas[-1] = self.zetas[-1] - target\n for i in reversed(range(self.n_layers-1)):\n self.deltas[i] = np.multiply(np.dot(self.deltas[i+1], self.weights[i+1].T),\n self.activation_fdx(self.activations[i]))\n return self.deltas", "def update(self, final_delta = None):\n l = len(self.derivatives)\n\n if final_delta:\n #self.derivatives[ l - 1 ] += final_delta NOTE: not supported in CodeSkulptor\n self.derivatives[ l - 1 ] = self.derivatives[ l - 1 ] + final_delta\n\n for i in range(l - 2, -1, -1):\n #self.derivatives[ i ] += self.derivatives[ i + 1 ] NOTE: not supported in CodeSkulptor\n self.derivatives[ i ] = self.derivatives[ i + 1 ] + self.derivatives[ i ]", "def mk_diff_y():\n D = mk_single_diffy()\n MDY = zeros( (vecLen, vecLen) )\n \n for cheb in range(0,vecLen,M):\n MDY[cheb:cheb+M, cheb:cheb+M] = D\n del cheb\n return MDY", "def _update_Deltas(self, a, deltas, Deltas):\n \n updated_Deltas = []\n a = a[-2::-1] \n for Delta, delta, ai in zip(reversed(Deltas), deltas, a):\n updated_Deltas.insert(0, Delta + np.outer(delta, ai))\n \n return updated_Deltas", "def _get_current_delta(model):\n delta = []\n _delta = []\n for i in MILPSolver.prob.nn.layers:\n (s, e) = vmodel.get_var_indices(i.depth, 'delta')\n d = model._vars[s:e]\n _d = np.asarray(model.cbGetNodeRel(d))\n delta.append(d)\n _delta.append(_d)\n\n return delta, _delta", "def doDeltas(self, index, delta):\n raise NotImplementedError", "def _compute_diff(self, begin, end):\n d = self.diff\n x = self.x\n for i in range(begin, end):\n for j in range(i):\n d[i].append((d[i][j] - d[i-1][j]) / (x[i] - x[i-j-1]))", "def calculate_d_vals(self) -> None:\n # Skip last point if path is non-cyclic\n point_inds = range(self.num_points) if self.is_cyclic else range(self.num_points - 1)\n for i in point_inds:\n z_i = self.points[i % self.num_points]\n z_j = self.points[(i + 1) % self.num_points]\n z_i.d_val = abs(z_i - z_j)", "def diferencia_vectores():\n vector1 = carga_vector()\n vector2 = carga_vector()\n diferencia = []\n for i in range(0, 3):\n diferencia.append(float(vector1[i] - vector2[i]))\n print('\\nLa diferencia de los vectores {} y {} es {}.'.format(vector1, vector2, diferencia))", "def getDelta(self,u,w,v=None):\r\n if v==None :\r\n return self._deltaDot[u,w]\r\n elif self._sigma[u,v]==0 or self._sigma[u,w]==0 or self._sigma[w,v]==0:\r\n return 0.0\r\n elif (self._d[u,v]==self._d[u,w]+self._d[w,v]):\r\n return 1.0 * self._sigma[u,w]*self._sigma[w,v]/self._sigma[u,v]\r\n else:\r\n return 0.0", "def delta_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n dv2, dv3 = [], []\r\n for p in p_range:\r\n v1 = c*beta(p, m1)\r\n v2 = c*beta(p, m2)\r\n v3 = c*beta(p, m3)\r\n dv2.append(abs(v1-v2))\r\n dv3.append(abs(v1-v3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n# p1_name = r'K$^+$'\r\n# p2_name = r'$\\pi^+$'\r\n# p3_name = r'p$^+$'\r\n ax.plot(p_range, dv2, 'r', label=r'$\\left|v_{K^+}-v_{\\pi^+}\\right|$')\r\n ax.plot(p_range, dv3, 'b', label=r'$\\left|v_{K^+}-v_{p^+}\\right|$')\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'$\\left|\\Delta v\\right|$ / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20, loc=[0.65, 0.2])\r\n plt.show\r\n return", "def backward_pass(self, delta):\r\n self.d_x = np.dot(delta, self.w.T)\r\n self.d_b = np.matmul(np.ones((1, delta.shape[0])), delta)\r\n self.d_w = np.dot(self.x.T, delta)\r\n return self.d_x", "def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope", "def fourPtFiniteDiff(x,y):\n dydx = np.zeros(y.shape,float)\n for i in range(2,len(y)-2):\n a = y[i-2]\n b = y[i-1]\n c = y[i+1]\n d = y[i+2]\n dydx[i] = (a-8*b+8*c-d)/(12*(x[1]-x[0]))\n dydx[-1] = (y[-1]-y[-2])/(x[-1]-x[-2])\n dydx[-2] = (y[-2]-y[-3])/(x[-2]-x[-3])\n dydx[0] = (y[1]-y[0])/(x[1]-x[0])\n dydx[1] = (y[2]-y[1])/(x[2]-x[1])\n return dydx", "def current_update():\n # Compute the multiplier coefficient:\n ci = dt / (L * dx)\n for k in range(0, nx-1):\n I[k] = I[k] - (ci * (V[k + 1] - V[k]))", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def update(self, delta):\n # Computes new positions\n for part in self.particles:\n part.set_xyvxvy(self.runge_kutta(part.to_y(), 0, delta))", "def gen_delta(self):\n delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,\n self.params.nside, self.params.npix)\n return delta", "def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def forward(self) -> Vec:\n return (self.emitters[0][1] - self.emitters[0][0]).norm()" ]
[ "0.65990263", "0.6551683", "0.6391416", "0.6347046", "0.6311589", "0.6305406", "0.6271971", "0.62243825", "0.61688155", "0.6113892", "0.6110934", "0.6104737", "0.6018288", "0.5975151", "0.5968072", "0.592978", "0.59040904", "0.584247", "0.57922715", "0.57737917", "0.5765913", "0.5749168", "0.5742818", "0.57265496", "0.5718568", "0.57132626", "0.5698917", "0.56929386", "0.56886715", "0.5677029" ]
0.673058
0
use the multiplication of normalized rho and delta as gamma to determine cluster center
def calculate_gamma(self): result = self.result # scaler = preprocessing.StandardScaler() # train_minmax = scaler.fit_transform(result) # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1] # self.gamma = (st_delta + st_rho) / 2 self.gamma = result[:, 0] * result[:, 1] self.gamma_des_index = np.argsort(-self.gamma)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_cluster_center(self, threshold):\n gamma = self.gamma\n self.cluster_center = np.where(gamma >= threshold)[0]", "def M_step(X, gamma):\n N = X.shape[0] # number of objects\n C = gamma.shape[1] # number of clusters\n d = X.shape[1] # dimension of each object\n\n ### YOUR CODE HERE\n qsum = np.sum(gamma, axis=0)\n pi = qsum/N\n \n # Update mu\n mu = np.zeros((C,d))\n for c in range(C):\n mu_sum = np.zeros((d,))\n for i in range(N):\n x_i = X[i]\n mu_sum += gamma[i,c] * x_i\n mu[c] = mu_sum / qsum[c]\n \n \n # Update sigma\n sigma = np.zeros((C, d, d))\n for c in range(C):\n sigma_sum = np.zeros((d,d))\n for i in range(N):\n x_i = X[i]\n td = (x_i - mu[c]).reshape((d,1))\n sigma_sum += gamma[i,c] * td.dot(td.T)\n sigma[c] = sigma_sum / qsum[c]\n\n return pi, mu, sigma", "def E_step(X, pi, mu, sigma):\r\n N = X.shape[0] # number of objects\r\n C = pi.shape[0] # number of clusters\r\n d = X.shape[1] # dimension of each object\r\n gamma = np.zeros((N, C)) # distribution q(T)\r\n print(\"Hei\")\r\n ### YOUR CODE HERE\r\n for c in np.arange(0,C):\r\n for ix in np.arange(0,N):\r\n x = X[ix,:]\r\n xc = x - mu[c,:]\r\n sigmac = sigma[c,:,:]\r\n sigmacInv_xc = solve(a=sigmac, b= xc)\r\n exp_arg_c = -0.5*np.dot(xc , sigmacInv_xc)\r\n acc = 0.0\r\n for d in np.arange(0,C):\r\n xd = x - mu[d,:]\r\n sigmad = sigma[d,:,:]\r\n sigmadInv_xd = solve(a=sigmad, b= xd)\r\n exp_arg_d = -0.5*np.dot(xd, sigmadInv_xd)\r\n exp_diff = exp_arg_d - exp_arg_c\r\n acc = acc + (pi[d]/pi[c]) * np.sqrt(det(sigmad)/det(sigmac))*np.exp(exp_diff) \r\n gamma[ix,c] = 1/acc \r\n \r\n \r\n return gamma", "def get_center_of_mass_allies(self,obs):", "def M_step(X, gamma):\r\n N = X.shape[0] # number of objects\r\n C = gamma.shape[1] # number of clusters\r\n d = X.shape[1] # dimension of each object\r\n\r\n pi = np.zeros([C])\r\n mu = np.zeros([C, d])\r\n sigma = np.zeros([C, d, d])\r\n \r\n for c in np.arange(0,C):\r\n mu_nominator = np.zeros([1,d])\r\n sigma_nominator = np.zeros([1,d])\r\n qc_sum = 0.0\r\n for i in np.arange(0,N):\r\n x_vec = X[i,].reshape([1,d])\r\n mu_nominator = mu_nominator + gamma[i,c]*x_vec\r\n sigma_nominator = sigma_nominator + gamma[i,c]*np.transpose(x_vec)*x_vec\r\n qc_sum = qc_sum + gamma[i,c]\r\n pi[c] = qc_sum/N\r\n mu[c,] = mu_nominator/qc_sum\r\n sigma[c,:,:] = sigma_nominator/qc_sum\r\n\r\n return pi, mu, sigma", "def gamma_star(self):\n return self.reciprocal_lattice_parameters[5]", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def E_step(X, pi, mu, sigma):\n N = X.shape[0] # number of objects\n C = pi.shape[0] # number of clusters\n d = mu.shape[1] # dimension of each object\n gamma = np.zeros((N, C)) # distribution q(T)\n\n ### YOUR CODE HERE\n # For all objects in dataset X\n for i in range(N):\n z = 0\n # Likelihood: P(x_i|t_i=c,theta) = N(x_i|mu_c, sigma_c²)\n # N(x_i|mu_c, sigma_c²) = (1/sqrt((2pi)^n*sigma_c_det)) * exp(-0.5*(x_i-mu_c).T*sigma_c⁻1*(x_i-mu_c))\n x_i = X[i]\n # For all clusters in mixture distribution\n for c in range(C):\n # parameters for cluster c\n pi_c = pi[c] # Prior prob. p(ti=c)\n mu_c = mu[c, :] # vector of means\n sigma_c = sigma[c, :] # covariance matrix\n # Covariance matrix determinant\n sigma_c_det = np.linalg.det(sigma_c)\n # Compute inverse as y = A⁻1*x (trick2)\n x = x_i - mu_c\n y = np.linalg.solve(sigma_c, x)\n exp = np.exp(-0.5*np.matmul(x, y))\n # Constant term\n norm_ct_c = pi_c / np.sqrt(sigma_c_det)\n # c component of q distribution for x_i\n gamma[i, c] = norm_ct_c * exp\n z += gamma[i, c]\n for c in range(C):\n gamma[i, c] /= z\n # # Normalize cluster distribution q(t_i=c): Softmax (trick1)\n # numerator = np.exp(gamma[i, :] - np.max(gamma[i, :]))\n # denominator = numerator.sum()\n # gamma[i, :] = numerator / denominator\n \n return gamma", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers", "def gamma(k, z):\n return 1", "def test_get_distribution_centers(self):\n pass", "def private_centers(\n data, norm, epsilon, delta\n):\n sigma = np.sqrt(2 * np.log(1.25 / delta)) / epsilon\n n, d = data.shape\n return np.mean(data, 0) + norm / n * np.random.normal(0, sigma, d)", "def centers(pos):\n number_of_nodes = len(pos)\n ε, δ = 0.03, 0.1\n r = 1 / (ε**2) * (int(np.log2(number_of_nodes - 1)) + 1 + np.log(1/δ))\n k = int(np.sqrt(r))\n k = np.min((k, number_of_nodes))\n \n return k_means_pp(k, pos)", "def c_centers_with_ghost(self,num_ghost):\n self.compute_c_centers_with_ghost(num_ghost)\n return self._c_centers_with_ghost", "def glaucophane():\n\n rho = 3070.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 122.3; C[0,1] = 45.7; C[0,2] = 37.2; C[0,3] = 0.; C[0,4] = 2.3; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 231.5; C[1,2] = 74.9; C[1,3] = 0.; C[1,4] = -4.8; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 254.6; C[2,3] = 0.; C[2,4] = -2.37; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 79.6; C[3,4] = 0.; C[3,5] = 8.9\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 52.8; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 51.2\n\n return C, rho", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def gamma(x):\n return 0.0", "def _get_gamma(self):\n gamma = None\n if self.is_clayey():\n gamma = 16.8 + 0.15*self._data[SoilProperty.N60]\n else:\n gamma = 16 + 0.1 * self._data[SoilProperty.N60]\n gamma=_clamp(gamma,10,2.8*9.81)#do we need this\n return gamma", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def gamma(a,b,c,d):\n g1 = max((c + d) * (1 - b) * b / (c*d * np.math.log(2)), 0.0)\n g2 = max((c + d) * 21**2 / (c*d * (1 - b) * b*a**2), 1.0)\n g = np.math.sqrt(g1 * np.math.log(g2, 2))\n return g", "def make_gamma(dc, C):\n Gamma = sp.zeros((3, 3))\n Gamma[0, 0] = dc[0] ** 2 * C[0, 0] + dc[1] ** 2 * C[5, 5] + dc[2] ** 2 * C[4, 4]\n Gamma[0, 0] += 2 * dc[1] * dc[2] * C[4, 5] + 2 * dc[2] * dc[0] * C[0, 4]\n Gamma[0, 0] += 2 * dc[0] * dc[1] * C[0, 5]\n\n Gamma[1, 1] = dc[0] ** 2 * C[5, 5] + dc[1] ** 2 * C[1, 1] + dc[2] ** 2 * C[3, 3]\n Gamma[1, 1] += 2 * dc[1] * dc[2] * C[1, 3] + 2 * dc[2] * dc[0] * C[3, 5]\n Gamma[1, 1] += 2 * dc[0] * dc[1] * C[1, 5]\n\n Gamma[2, 2] = dc[0] ** 2 * C[4, 4] + dc[1] ** 2 * C[3, 3] + dc[2] ** 2 * C[2, 2]\n Gamma[2, 2] += 2 * dc[1] * dc[2] * C[2, 3] + 2 * dc[2] * dc[0] * C[2, 4]\n Gamma[2, 2] += 2 * dc[0] * dc[1] * C[3, 4]\n\n Gamma[0, 1] = dc[0] ** 2 * C[0, 5] + dc[1] ** 2 * C[1, 5] + dc[2] ** 2 * C[3, 4]\n Gamma[0, 1] += dc[1] * dc[2] * (C[3, 5] + C[1, 4]) + dc[2] * dc[0] * (\n C[0, 3] + C[4, 5]\n )\n Gamma[0, 1] += dc[0] * dc[1] * (C[0, 1] + C[5, 5])\n\n Gamma[0, 2] = dc[0] ** 2 * C[0, 4] + dc[1] ** 2 * C[3, 5] + dc[2] ** 2 * C[2, 4]\n Gamma[0, 2] += dc[1] * dc[2] * (C[3, 4] + C[2, 5]) + dc[2] * dc[0] * (\n C[0, 2] + C[4, 4]\n )\n Gamma[0, 2] += dc[0] * dc[1] * (C[0, 3] + C[4, 5])\n\n Gamma[1, 2] = dc[0] ** 2 * C[4, 5] + dc[1] ** 2 * C[1, 3] + dc[2] ** 2 * C[2, 3]\n Gamma[1, 2] += dc[1] * dc[2] * (C[3, 3] + C[1, 2]) + dc[2] * dc[0] * (\n C[2, 5] + C[3, 4]\n )\n Gamma[1, 2] += dc[0] * dc[1] * (C[1, 4] + C[3, 5])\n\n Gamma[1, 0] = Gamma[0, 1]\n Gamma[2, 0] = Gamma[0, 2]\n Gamma[2, 1] = Gamma[1, 2]\n return Gamma", "def gamma_0(d_matrix, ne, ng, omega, MCut):\r\n prefactor = mu_0**2/(12*np.pi**3*hbar**2*c**2)\r\n\r\n return prefactor*omega**3*(domega(ne,ng) - omega)**3*np.abs(T(d_matrix, ne,ng,omega,MCut))**2", "def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):\n return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),\n use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,\n comment=\"gamma-centered mode\")", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def test_approximate_gamma(self, k):\n mean_column = prior.PriorParams.field_index(\"mean\")\n var_column = prior.PriorParams.field_index(\"var\")\n x = self.priors[self.n][k][mean_column]\n xvar = self.priors[self.n][k][var_column]\n # match mean/variance\n alpha_0, beta_0 = approximate_gamma_mom(x, xvar)\n ck_x = alpha_0 / beta_0\n ck_xvar = alpha_0 / beta_0**2\n assert np.isclose(x, ck_x)\n assert np.isclose(xvar, ck_xvar)\n # match approximate sufficient statistics\n logx, _, _ = approx.approximate_log_moments(x, xvar)\n alpha_1, beta_1 = approx.approximate_gamma_kl(x, logx)\n ck_x = alpha_1 / beta_1\n ck_logx = hypergeo._digamma(alpha_1) - np.log(beta_1)\n assert np.isclose(x, ck_x)\n assert np.isclose(logx, ck_logx)\n # compare KL divergence between strategies\n kl_0 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_0, scale=1 / beta_0),\n )\n kl_1 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_1, scale=1 / beta_1),\n )\n assert kl_1 < kl_0" ]
[ "0.6678306", "0.6379675", "0.6162399", "0.59755903", "0.5975015", "0.59482974", "0.59137064", "0.58591443", "0.5844926", "0.5769318", "0.5760845", "0.5734815", "0.5687573", "0.56765157", "0.56329596", "0.56303257", "0.5629205", "0.558961", "0.55850154", "0.5579336", "0.55714023", "0.55590713", "0.55448", "0.5543241", "0.552207", "0.55218226", "0.5519667", "0.5517149", "0.5510134", "0.5469557" ]
0.64044535
1
Intercept a point with gamma greater than 0.2 as the cluster center
def calculate_cluster_center(self, threshold): gamma = self.gamma self.cluster_center = np.where(gamma >= threshold)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def predict_center(point):\n point_cluster_num = predict_cluster(point)\n center = centers[point_cluster_num]\n return center", "def center(x):\n return x - x.mean()", "def gauss_spot(self, xy, sigma, center=None):\r\n\r\n x = np.arange(0, xy, 1.)\r\n\r\n y = x[:,np.newaxis]\r\n\r\n \r\n\r\n if center is None:\r\n\r\n x0 = y0 = xy // 2\r\n\r\n else:\r\n\r\n x0 = center[0]\r\n\r\n y0 = center[1]\r\n\r\n \r\n\r\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / sigma**2)", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def computeIntercepts():\n pass", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def proj_hyperparam(self, X, y, log_alpha):\n return np.clip(log_alpha, -16, [5, 2])", "def M_step(X, gamma):\n N = X.shape[0] # number of objects\n C = gamma.shape[1] # number of clusters\n d = X.shape[1] # dimension of each object\n\n ### YOUR CODE HERE\n qsum = np.sum(gamma, axis=0)\n pi = qsum/N\n \n # Update mu\n mu = np.zeros((C,d))\n for c in range(C):\n mu_sum = np.zeros((d,))\n for i in range(N):\n x_i = X[i]\n mu_sum += gamma[i,c] * x_i\n mu[c] = mu_sum / qsum[c]\n \n \n # Update sigma\n sigma = np.zeros((C, d, d))\n for c in range(C):\n sigma_sum = np.zeros((d,d))\n for i in range(N):\n x_i = X[i]\n td = (x_i - mu[c]).reshape((d,1))\n sigma_sum += gamma[i,c] * td.dot(td.T)\n sigma[c] = sigma_sum / qsum[c]\n\n return pi, mu, sigma", "def reparameterize(self, mu, logvar):\n\t\tlogvar = torch.exp(logvar/2)\n\t\tif self.cuda_flag:\n\t\t\tepsilon = torch.randn((mu.size())).float().cuda()\n\t\telse:\n\t\t\tepsilon = torch.randn((mu.size())).float()\n\t\tlatent_vector = torch.mul(epsilon, logvar) + mu \n\t\treturn latent_vector", "def E_step(X, pi, mu, sigma):\n N = X.shape[0] # number of objects\n C = pi.shape[0] # number of clusters\n d = mu.shape[1] # dimension of each object\n gamma = np.zeros((N, C)) # distribution q(T)\n\n ### YOUR CODE HERE\n # For all objects in dataset X\n for i in range(N):\n z = 0\n # Likelihood: P(x_i|t_i=c,theta) = N(x_i|mu_c, sigma_c²)\n # N(x_i|mu_c, sigma_c²) = (1/sqrt((2pi)^n*sigma_c_det)) * exp(-0.5*(x_i-mu_c).T*sigma_c⁻1*(x_i-mu_c))\n x_i = X[i]\n # For all clusters in mixture distribution\n for c in range(C):\n # parameters for cluster c\n pi_c = pi[c] # Prior prob. p(ti=c)\n mu_c = mu[c, :] # vector of means\n sigma_c = sigma[c, :] # covariance matrix\n # Covariance matrix determinant\n sigma_c_det = np.linalg.det(sigma_c)\n # Compute inverse as y = A⁻1*x (trick2)\n x = x_i - mu_c\n y = np.linalg.solve(sigma_c, x)\n exp = np.exp(-0.5*np.matmul(x, y))\n # Constant term\n norm_ct_c = pi_c / np.sqrt(sigma_c_det)\n # c component of q distribution for x_i\n gamma[i, c] = norm_ct_c * exp\n z += gamma[i, c]\n for c in range(C):\n gamma[i, c] /= z\n # # Normalize cluster distribution q(t_i=c): Softmax (trick1)\n # numerator = np.exp(gamma[i, :] - np.max(gamma[i, :]))\n # denominator = numerator.sum()\n # gamma[i, :] = numerator / denominator\n \n return gamma", "def xintercept(self):\n if self.slope() == 0:\n return None\n else:\n return self.c/self.a", "def cauchy(self, loc, gamma):\n c = loc + gamma * np.tan(np.pi * (self.random() - 0.5))\n return c if c > 0 else self.cauchy(loc, gamma)", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def lorentz(x, x0, gamma): \n return (0.5/pi) * gamma / ((x-x0)**2 + 0.25 * gamma**2)", "def center(self, X):\n X = X - self.mu\n X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma)\n return X", "def fit(self, gamma, K):\n self.K = K\n self.gamma = gamma\n self.cluster()\n self.generate_phi()\n self.LRC()", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def E_step(X, pi, mu, sigma):\r\n N = X.shape[0] # number of objects\r\n C = pi.shape[0] # number of clusters\r\n d = X.shape[1] # dimension of each object\r\n gamma = np.zeros((N, C)) # distribution q(T)\r\n print(\"Hei\")\r\n ### YOUR CODE HERE\r\n for c in np.arange(0,C):\r\n for ix in np.arange(0,N):\r\n x = X[ix,:]\r\n xc = x - mu[c,:]\r\n sigmac = sigma[c,:,:]\r\n sigmacInv_xc = solve(a=sigmac, b= xc)\r\n exp_arg_c = -0.5*np.dot(xc , sigmacInv_xc)\r\n acc = 0.0\r\n for d in np.arange(0,C):\r\n xd = x - mu[d,:]\r\n sigmad = sigma[d,:,:]\r\n sigmadInv_xd = solve(a=sigmad, b= xd)\r\n exp_arg_d = -0.5*np.dot(xd, sigmadInv_xd)\r\n exp_diff = exp_arg_d - exp_arg_c\r\n acc = acc + (pi[d]/pi[c]) * np.sqrt(det(sigmad)/det(sigmac))*np.exp(exp_diff) \r\n gamma[ix,c] = 1/acc \r\n \r\n \r\n return gamma", "def expmap0(self, v, c):\n c = self.truncate_c(c)\n v_norm = self.clip(tf.norm(v, ord=2, axis=-1, keepdims=True))\n gamma = TanC(v_norm, c) * v / v_norm\n return gamma", "def proximal(self):\n return proximal_cconj_kl_cross_entropy(space=self.domain, g=self.prior)", "def rbf_classify(self, point):\n sum = self.b\n for i, center in enumerate(self.centers):\n sum += self.g[i] * np.exp(-self.gamma * distance.euclidean(center, point) ** 2)\n if sum > 0:\n return 1.0\n else:\n return -1.0", "def lgamma(x):\n return 0.0", "def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def claret_linear(mu, coeff):\n return 1.0 - coeff * (1.0 - mu)", "def kl_gamma(x, y, a=1):\n x = max(x, eps)\n y = max(y, eps)\n return a*(x/y - 1 - log(x/y))", "def calc_xi(self):\n\t\n\tk_dot_x = self.k[0]*self.x[0,:,:] + self.k[1]*self.x[1,:,:] + self.k[2]*self.x[2,:,:]\n\n\tself.xi = self.t.reshape((1,self.N)) - k_dot_x/l.Clight\n\n\treturn", "def predict_center(self, point):\n point_cluster_num = self.predict_cluster(point)\n center = self.centers[point_cluster_num]\n return center" ]
[ "0.6103443", "0.5330922", "0.529988", "0.52296835", "0.52157253", "0.5153788", "0.51438296", "0.51432735", "0.5122229", "0.5116896", "0.51106155", "0.509807", "0.50852835", "0.50819665", "0.5080408", "0.50785637", "0.50589377", "0.50488997", "0.504817", "0.50294673", "0.501356", "0.5002992", "0.5001694", "0.496865", "0.4951418", "0.49506226", "0.49498817", "0.4949802", "0.494544", "0.49393016" ]
0.57451344
1
Initial configuration. Used to specify your username, password and domain. Configuration is stored in ~/.accountable/config.yaml.
def configure(username, password, domain): art = r''' Welcome! __ ___. .__ _____ ____ ____ ____ __ __ _____/ |______ \_ |__ | | ____ \__ \ _/ ___\/ ___\/ _ \| | \/ \ __\__ \ | __ \| | _/ __ \ / __ \\ \__\ \__( <_> ) | / | \ | / __ \| \_\ \ |_\ ___/ (____ /\___ >___ >____/|____/|___| /__| (____ /___ /____/\___ > \/ \/ \/ \/ \/ \/ \/ ''' click.secho(art, fg='blue') Config(username=username, password=password, domain=domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure(self):\n configurations = config.Configurations()\n self.credentials = configurations.credentials\n self.config = configurations.config", "def configure(self, conf):\n self.openam_base_url = conf.get('url')\n self.username = conf.get('user')\n self.__password = conf.get('password')\n return", "def configure(self):\n\n log.info(\"Loading configuration from the database...\")\n settings = dict(db.query(\"\"\"SELECT `key`, `value` FROM settings\"\"\"))\n \n log.info(\"Config loaded\")\n log.info(\"HoN Version: %s Chat Port: %s Protocol: %s\" % (settings['honver'], settings['chatport'], settings['chatver']))\n if 'username' in settings:\n acc_config['username'] = settings['username']\n \n if 'password' in settings:\n acc_config['password'] = settings['password']\n \n if 'invis' in settings:\n settings['invis'] = True if settings['invis'] == \"True\" else False\n \n if 'chatport' in settings:\n settings['chatport'] = int(settings['chatport'])\n \n if 'chatver' in settings:\n settings['chatver'] = int(settings['chatver'])\n \n for key in settings:\n if key in basic_config:\n basic_config[key] = settings[key]\n \n self._configure(chatport=settings['chatport'], protocol=settings['chatver'], invis=settings['invis'],\n masterserver=settings['masterserver'], basicserver=settings['basicserver'], honver=settings['honver'])", "def init():\n try:\n config = configparser.ConfigParser()\n # look for username.config on both Windows (USERNAME) and Linux (USER)\n if os.name == \"nt\":\n username = os.environ['USERNAME']\n else:\n username = os.environ['USER']\n config_file = username + \".config\"\n if not os.path.isfile(config_file):\n logging.error(\"Configuration file \" + config_file + \" not found.\")\n sys.exit()\n config.read(config_file)\n # database\n global DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASSWORD\n DB_HOST = config[\"DATABASE\"][\"db_host\"] if (\"db_host\" in config[\"DATABASE\"]) else None\n DB_PORT = config[\"DATABASE\"][\"db_port\"]\n DB_NAME = config[\"DATABASE\"][\"db_name\"]\n DB_USER = config[\"DATABASE\"][\"db_user\"]\n DB_PASSWORD = config[\"DATABASE\"][\"db_password\"]\n except Exception:\n logger.exception(\"Failed to read config file properly\")\n raise", "def initialize():\n # Ensure user config exists\n install(CONFIG_PATH)\n\n # Load preferences into memory\n get_config()", "def login_with_config(self):\n username = self.cfg.get('user', 'username')\n password = token = None\n\n try:\n password = self.cfg.get('user', 'password')\n except configparser.NoOptionError:\n pass\n try:\n token = self.cfg.get('user', 'token')\n except configparser.NoOptionError:\n pass\n\n if password is None and token is None:\n raise KattisConfigError(\n \"Your .kattisrc seems to be corrupted. Please download a new one.\")\n\n loginurl = self.get_url(self.cfg, 'loginurl', 'login')\n return self.login(loginurl, username, password, token)", "def __init__(self):\r\n self.load_config()\r\n self.login()", "def defaultConf():\n from config import lwbdUrl, userAndPass\n baseUrl = lwbdUrl\n lucidAuth = userAndPass\n return LucidSdaConfiguration(baseUrl,\n lucidAuth)", "def do_config(self, args):\n if args.set == \"store_password\":\n put_config_value(\"store_password\", True if args.value.lower() == \"yes\" else False)\n elif args.set == \"password\":\n put_config_value(\"password\", args.value)\n elif args.set == \"username\":\n put_config_value(\"username\", args.value)\n else:\n print(\"Invalid option\")", "def init(args):\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def __init__(self, passwd=None, yamlname=None): # Passwd is in the clear so be careful\n if yamlname is None:\n utilities.log.error('No json configuration was provided for iRODS: Abort')\n sys.exit(1)\n if passwd is None:\n utilities.log.error('No passwd was provided for iRODS: Abort')\n sys.exit(1)\n self.config=utilities.load_config(yamlname)\n self.passwd = passwd", "def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])", "def initEmailCreds(sender):\n print (\"::initializing email creds...\")\n pw = getpass.getpass()\n entry = {'tardigrade': { 'username':sender, 'password':pw } }\n filepath = confighome+\"config\"\n appendJson(filepath,entry)", "def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()", "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def on_start(self):\n admin_user = os.environ['ADMIN_USER']\n admin_password = os.environ['ADMIN_PASSWORD']\n admin_domain_name = os.environ['ADMIN_DOMAIN_NAME']\n admin_project_id = os.environ['ADMIN_PROJECT_ID']\n HEADERS['X-Auth-Token'] = self._get_token(admin_user,\n admin_password,\n admin_domain_name,\n project_id=admin_project_id)\n # Create test user\n self.username = 'test_user'\n self.password = 'Password1'\n self.user_domain_id = 'default'\n self.user_domain_name = 'Default'\n self.project_id = self._create_project()['project']['id']\n self._create_user(self.username, self.password, self.user_domain_id,\n self.project_id)", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "def setup(self):\n messages = [\n \"Please enter you Holberton email: \",\n \"Please enter your Holberton password (don't worry passwd will be encrypted): \",\n \"Please enter full path where you want to save future projects: \"\n ]\n settings_ini_variables = [\"username\", 'password', 'location']\n\n settings_ini = {}\n for msg, var in zip(messages, settings_ini_variables):\n user_input = str(input(msg))\n\n if var == \"location\":\n while not os.path.exists(user_input):\n print(\"[!]: SUPPLIED PATH DOES NOT EXIST.\")\n user_input = str(input(msg))\n settings_ini[var] = encrypted(user_input) if var == \"password\" else user_input\n\n self.write_to_file(**settings_ini)", "def initialize(self):\n self.login()", "def init():\n file_name = 'config.json'\n home_directory_path = str(Path.home())\n config_file_directory = home_directory_path+\"/.config/files/\"\n full_path = config_file_directory + file_name\n\n if os.path.isfile(full_path) and os.access(full_path, os.R_OK): # Readable Config file exists and is valid\n try:\n with open(full_path) as file:\n json_file = json.load(file)\n load_json_and_arguments(json_file)\n\n except ValueError as exception:\n raise ValueError(\"Invalid JSON configuration file\")\n\n elif not os.path.isfile(full_path): # Config file doesn't exist yet, create it\n\n if not os.path.exists(config_file_directory): # Make the directory if that doesn't exist as well\n os.makedirs(config_file_directory)\n\n get_account_info(full_path)\n\n else:\n raise IOError(\"Config file: \" + full_path + \" not accessible\")", "def __init__(self, domain, email, password, app):\n self.client = EmailSettingsClient(domain=domain)\n self.client.ClientLogin(email=email, password=password,\n source=app)", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)", "def init_config(self):\n pass", "def __init__(self,\n account,\n config_file='',\n auth_type='basic',\n connector_type='snowflake_connector'\n ):\n self.account = account\n self.config_file = config_file\n self.auth_type = auth_type\n self.connector_type = connector_type\n self._get_credentials()\n self._set_connection()", "def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]", "def __init__(self,account_name, username, password):\n self.account_name = account_name\n self.username = username\n self.password = password" ]
[ "0.6725636", "0.65860176", "0.6572466", "0.6524407", "0.6381888", "0.625799", "0.62230504", "0.6196215", "0.6191912", "0.61845404", "0.6112712", "0.60647243", "0.6038883", "0.6033728", "0.60200953", "0.60005546", "0.5989566", "0.598916", "0.59883714", "0.5980305", "0.5948604", "0.5940678", "0.59264064", "0.5915695", "0.58873785", "0.5872236", "0.58400977", "0.5838691", "0.583591", "0.583413" ]
0.6898006
0
List all issue types. Optional parameter to list issue types by a given project.
def issuetypes(accountable, project_key): projects = accountable.issue_types(project_key) headers = sorted(['id', 'name', 'description']) rows = [] for key, issue_types in sorted(projects.items()): for issue_type in issue_types: rows.append( [key] + [v for k, v in sorted(issue_type.items()) if k in headers] ) rows.insert(0, ['project_key'] + headers) print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self, request):\n bug_types = BugType.objects.all()\n\n # Note the additional `many=True` argument to the\n # serializer. It's needed when you are serializing\n # a list of objects instead of a single object.\n serializer = BugTypeSerializer(\n bug_types, many=True, context={'request': request})\n return Response(serializer.data)", "def list_file_types(project_id, host, email, password, api_key):\n ListFileTypes(\n project_id,\n Credentials(email=email, password=password, api_key=api_key),\n Optionals(host=host),\n ).run()", "def issues_list(self, mar, request):\n if request.additionalProject:\n for project_name in request.additionalProject:\n project = self._services.project.GetProjectByName(\n mar.cnxn, project_name)\n if project and not permissions.UserCanViewProject(\n mar.auth.user_pb, mar.auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (mar.auth.email, project_name))\n url_params = [(name, mar.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n # TODO(jrobbins): This should go through work_env.\n pipeline = frontendsearchpipeline.FrontendSearchPipeline(\n mar.cnxn, self._services, mar.auth, [mar.me_user_id], mar.query,\n mar.query_project_names, mar.num, mar.start, url_params, mar.can,\n mar.group_by_spec, mar.sort_spec, mar.warnings, mar.errors,\n mar.use_cached_searches, mar.profiler, display_mode=mar.mode,\n project=mar.project)\n if not mar.errors.AnyErrors():\n pipeline.SearchForIIDs()\n pipeline.MergeAndSortIssues()\n pipeline.Paginate()\n else:\n raise endpoints.BadRequestException(mar.errors.query)\n\n issue_list = [\n api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssueWrapper, r, mar, self._services)\n for r in pipeline.visible_results]\n return api_pb2_v1.IssuesListResponse(\n kind='monorail#issueList',\n totalResults=pipeline.total_count,\n items=issue_list)", "def get_jira_defects(project):\n return get_jira_issues('project = \"{}\" AND filter = 19589'.format(project))", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def get_queryset(self):\n queryset = Issue.objects.filter(project_id=self.project.pk)\n return queryset", "def report_types():\n return [ReportClass for name, ReportClass in REPORT_REGISTRY.items() if name != \"BaseReport\"]", "def view_all(request, index_call=False):\n closed = request.GET.get('closed', '')\n if closed in ('0', 'false'):\n closed = False\n elif closed in ('1', 'true'):\n closed = True\n elif index_call:\n # for index we display only open issues by default\n closed = False\n else:\n closed = None\n\n nav_parameters = {}\n if closed is not None:\n nav_parameters['closed'] = int(closed)\n\n # This uses eventual consistency and cannot be made strongly consistent.\n query = models.Issue.query(\n models.Issue.private == False).order(-models.Issue.modified)\n if closed is not None:\n # return only opened or closed issues\n query = query.filter(models.Issue.closed == closed)\n\n return _paginate_issues(reverse(view_all),\n request,\n query,\n 'all.html',\n extra_nav_parameters=nav_parameters,\n extra_template_params=dict(closed=closed))", "def view_all(request, index_call=False):\n closed = request.GET.get('closed', '')\n if closed in ('0', 'false'):\n closed = False\n elif closed in ('1', 'true'):\n closed = True\n elif index_call:\n # for index we display only open issues by default\n closed = False\n else:\n closed = None\n\n nav_parameters = {}\n if closed is not None:\n nav_parameters['closed'] = int(closed)\n\n query = models.Issue.query(\n models.Issue.private == False).order(-models.Issue.modified)\n if closed is not None:\n # return only opened or closed issues\n query = query.filter(models.Issue.closed == closed)\n\n return _paginate_issues(reverse(view_all),\n request,\n query,\n 'all.html',\n extra_nav_parameters=nav_parameters,\n extra_template_params=dict(closed=closed))", "def allUnresolved(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True).reverse()\n \n \n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n \n args['no_results'] = args['page'].object_list.count() < 1\n\n return render_to_response(\"issue_list.html\", args,\n context_instance=RequestContext(request))", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def list_issues(self, interval: str, threat_status: str = None, threat_type: str = None) -> dict:\n params = remove_empty_elements({\"interval\": interval,\n \"threatStatus\": threat_status,\n \"threatType\": threat_type,\n \"format\": \"json\"})\n return self.http_request(\"GET\", '/siem/issues', params=params)", "async def incidentTypes(self, includeHidden: bool = False) -> Iterable[str]:", "def get_jira_issues(query):\n jira_issues = []\n defects = []\n count, maxlen = 0, 1\n while count < maxlen:\n issues = jira_client.search_issues(query, startAt=count, maxResults=50, expand='changelog')\n jira_issues.extend(issues)\n count = len(jira_issues)\n maxlen = issues.total\n\n return jira_issues", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def by_type(self, types=None):\n return self.describe(only_type=types)", "def get_issues(request, project):\n\n try:\n api_response = requests.get(constants.GITHUB_API_GET_ISSUES_URL.format(project_name=project))\n api_response_json = api_response.json()\n if api_response.status_code == 404:\n error_message = \"Repository does not exist\"\n return Response(error_message, status=status.HTTP_404_NOT_FOUND)\n if api_response.status_code == 401:\n raise Exception(\"Authentication fails. Invalid github access token.\")\n response = []\n for issue in api_response_json:\n labels_length = len(issue['labels'])\n tags = []\n # Making custom dictionary for tags\n for i in range(0, labels_length):\n # Searching inside \"labels\" key for tag_name\n for tag, tag_name in issue[\"labels\"][i].items():\n if tag in [\"name\"]:\n label = tag_name\n tags.append(label)\n result = IssueResponse(\n title=issue['title'],\n created_at=issue['created_at'],\n comments=issue['comments'],\n issue_number=issue['number'],\n repository_url=issue['repository_url'],\n labels=tags\n )\n result_as_json = result.to_json()\n response.append(result_as_json)\n\n except Exception:\n return DOWNSTREAM_ERROR_RESPONSE\n\n return Response(response)", "async def showIncidentTypes(self, incidentTypes: Iterable[str]) -> None:", "def listFeaturableContentTypes():", "def report(issues, show_urls=False):\r\n # titles may have unicode in them, so we must encode everything below\r\n if show_urls:\r\n for i in issues:\r\n role = 'ghpull' if 'merged' in i else 'ghissue'\r\n print('* :%s:`%d`: %s' % (role, i['number'],\r\n i['title'].encode('utf-8')))\r\n else:\r\n for i in issues:\r\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))", "def test_issue_list_issues(self):\n pass", "def all_issues_page(self):\n # categories\n categories = model.Session.query(\n func.count(issuemodel.Issue.id).label('issue_count'),\n issuemodel.Issue.issue_category_id)\\\n .filter(issuemodel.Issue.resolved == None)\\\n .group_by(issuemodel.Issue.issue_category_id)\n\n c.categories = []\n c.pkg_names = {}\n for t in categories:\n tc = issuemodel.IssueCategory.get(t.issue_category_id)\n tc.issue_count = t.issue_count\n\n # get issues items for each category\n tc.issues = model.Session.query(issuemodel.Issue).filter(issuemodel.Issue.resolved == None)\\\n .filter(issuemodel.Issue.issue_category_id == t.issue_category_id) \\\n .order_by(issuemodel.Issue.created.desc())\n\n for issues in tc.issues:\n if issues.package_id:\n c.pkg_names[issues.package_id] = model.Package.get(issues.package_id).name\n c.categories.append(tc)\n # sort into alphabetical order\n c.categories.sort(key = lambda x: x.name)\n return render(\"issues/all_issues.html\")", "def standard_type_list(request):\n from .settings import STD_TYPE_SLUG_MAP\n context = {'standard_types': STD_TYPE_SLUG_MAP}\n return TemplateResponse(request, 'curricula/standard_type_list.html', context)", "def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)", "def search_pulls(self, query=None, sort=None, order=None):\n base_q = \"type:pr\"\n\n if query is None:\n query = base_q\n else:\n query = \"{} {}\".format(query, base_q)\n\n return self.search_issues(query, sort, order)", "def list_issues(self, jira_con, search_jql):\n assert jira_con, \"Requires jira_con\"\n assert search_jql, \"Requires search_jql\"\n assert self.log\n dict_keys = ('name', 'percent', 'watchCount', 'votes', 'progress', 'value')\n\n issues = jira_con.search_issues(search_jql + ' order by issue')\n self.log.info('\\nResults for {}:'.format(search_jql))\n for issue in issues:\n s = str(issue)\n for key, value in issue.raw['fields'].iteritems():\n if value:\n found = False\n if type(value) is not dict:\n found = True\n s += ', ' + key + '=' + unicode(value)\n else:\n for k in dict_keys:\n if k in value:\n found = True\n s += ', ' + key + '=' + str(value[k])\n if not found:\n s += ', ' + key + '=(' + unicode(type(value)) + ') ' + str(value)\n self.log.info(s)\n return issues", "def ntypes(self): # -> list[str]:\n ...", "def get_issues(project=\"nipy/nitime\", state=\"closed\", pulls=False):\r\n which = 'pulls' if pulls else 'issues'\r\n url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (project, which, state, PER_PAGE)\r\n return get_paged_request(url)", "def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)", "def test_ticket_type_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)" ]
[ "0.580682", "0.57997316", "0.55276394", "0.53734636", "0.53584605", "0.53383344", "0.5332019", "0.5323111", "0.53199", "0.52730525", "0.5229358", "0.5195646", "0.51418656", "0.51354766", "0.50994647", "0.5088411", "0.50732434", "0.5071402", "0.50672746", "0.5037107", "0.5009264", "0.49896944", "0.49826658", "0.49297446", "0.49249965", "0.4911765", "0.49006778", "0.48918867", "0.48849106", "0.4878309" ]
0.715429
0
Lists all comments for a given issue key.
def comments(accountable): comments = accountable.issue_comments() headers = sorted(['author_name', 'body', 'updated']) if comments: rows = [[v for k, v in sorted(c.items()) if k in headers] for c in comments] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No comments found for {}'.format( accountable.issue_key ), fg='red')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()", "def get_comments(self, key):\n if key not in self._config:\n raise ValueError(\"%s not in self.config\"%key)\n return self._config[key][\"comments\"]", "def list(self, number, user=None, repo=None):\n request = self.make_request('issues.comments.list', user=user,\n repo=repo, number=number)\n return self._get_result(request)", "def get_comments_for_issue(owner, repo, issue_number, session=None):\n url = (\n f'{GITHUB_API_URL}/repos/{owner}/{repo}/issues/{issue_number}/comments'\n )\n return get_one_item_at_a_time(url, session=session)", "def test_issue_get_comments(self):\n pass", "def get_cohorted_commentables(course_key):\r\n\r\n course = courses.get_course_by_id(course_key)\r\n\r\n if not course.is_cohorted:\r\n # this is the easy case :)\r\n ans = []\r\n else:\r\n ans = course.cohorted_discussions\r\n\r\n return ans", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def all_user_comments(username):\n return commentslist", "def issues_comments_list(self, mar, request):\n issue = self._services.issue.GetIssueByLocalID(\n mar.cnxn, mar.project_id, request.issueId)\n comments = self._services.issue.GetCommentsForIssue(\n mar.cnxn, issue.issue_id)\n comments = [comment for comment in comments if not comment.approval_id]\n visible_comments = []\n for comment in comments[\n request.startIndex:(request.startIndex + request.maxResults)]:\n visible_comments.append(\n api_pb2_v1_helpers.convert_comment(\n issue, comment, mar, self._services, mar.granted_perms))\n\n return api_pb2_v1.IssuesCommentsListResponse(\n kind='monorail#issueCommentList',\n totalResults=len(comments),\n items=visible_comments)", "def comments(self):\r\n return IssueComments(self)", "def comments(self):\r\n return IssueComments(self)", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def comments(self, request, pk=None):\n post = self.get_object()\n comments = Comment.objects.filter(post=post).order_by('created_at')\n serializer = PostCommentsSerializer(comments, many=True)\n return Response(serializer.data, status.HTTP_200_OK)", "def get_comment(self, index):\r\n\r\n # Get request to get all the comments for all exercises\r\n comments = requests.get(API.url_comment, headers = self.headers).json()\r\n # Parse the response\r\n for my_comment in comments:\r\n if my_comment['id'] == index:\r\n print(my_comment['comment'])", "def get_comments(self):\n\t\treturn self._client.get_comments(self)", "def all_user_comments(username):\n # comment = [\n # comment for comment in commentslist if comment[\"username\"] == username\n # ]\n return commentslist", "def get_queryset(self):\n queryset = Comment.objects.filter(issue_id=self.issue.pk)\n return queryset", "def test_issue_get_comment(self):\n pass", "def test_issue_get_repo_comments(self):\n pass", "def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url", "def get_repo_issue_comments(owner, repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/issues/comments'\n return get_one_item_at_a_time(url, session=session)", "def get_comments(self):\n raise NotImplementedError", "def get_comments(self, asset_id):\n endpoint = '/assets/{}/comments'.format(asset_id)\n return self._api_call('get', endpoint)", "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val", "def get_discussion_comments(self, post_id):\n query = \"SELECT users.Username , discussionreplies.* FROM discussionreplies INNER JOIN users ON (discussionreplies.Users_idUsers=users.idUsers) WHERE Discussions_idDiscussions={}\".format(post_id)\n cursor = DB.instance.connection.cursor()\n cursor.execute(query)\n return cursor.fetchall()", "def comments(self):\n return self.get_queryset().filter(content_type__model='comment').order_by('-comments__createdAt')", "def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])", "def _commentsInThisFunction(self):\n show_unique_c = self.config.display_unique_comments\n\n msg = \"Searching comments within function '\" + misc.get_function_name() + \"'\"\n self._console_output(msg)\n\n comment_list = self.ba.comments_in_function()\n\n # Found any comment at all?\n nrows = len(comment_list)\n if not nrows:\n self._console_output(\"[!] No comments found\", err = True)\n return\n\n self.table.setColumnCount(2)\n self.table_label.setText(\"Comments within current function\")\n self.table.setHorizontalHeaderLabels((\"Address\", \"Comments\"))\n self.table.clearContents()\n self.table.setRowCount(0)\n\n # Fill with contents\n displayed_comments = []\n\n idx = 0\n for (addr, comment) in comment_list:\n if show_unique_c and comment in displayed_comments:\n continue\n\n displayed_comments.append(comment)\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%08x\" % addr)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n comment_item = QTableWidgetItem(comment)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, comment_item)\n\n idx += 0" ]
[ "0.6735813", "0.63723326", "0.6294947", "0.62622994", "0.61518073", "0.6067908", "0.5866216", "0.5846625", "0.5813753", "0.58126175", "0.5763556", "0.5763556", "0.56536496", "0.5609131", "0.55674785", "0.55547565", "0.55169636", "0.54941475", "0.547762", "0.5461454", "0.5455961", "0.54529893", "0.5437487", "0.5428668", "0.541035", "0.54099125", "0.5406168", "0.54006106", "0.5382159", "0.53813607" ]
0.68489426
0
Add a comment to the given issue key. Accepts a body argument to be used as the comment's body.
def addcomment(accountable, body): r = accountable.issue_add_comment(body) headers = sorted(['author_name', 'body', 'updated']) rows = [[v for k, v in sorted(r.items()) if k in headers]] rows.insert(0, headers) print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_comment_to_issue(repo, issue_number, body, allow_duplicates):\n found = False\n issue = repo.issue(issue_number)\n\n if not allow_duplicates:\n for comment in issue.iter_comments():\n if comment.body == body:\n found = True\n break\n\n if allow_duplicates or not found:\n success = issue.create_comment(body)\n if success:\n click.echo(\"The comment was successfully posted to the issue.\")\n else:\n click.echo(\"There was a failure commenting on the issue.\")\n raise SystemExit(1)\n else:\n click.echo(\"An identical comment was found, skipping posting comment.\")", "def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )", "def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)", "def problem_comments_append(self, identifier, comment, html=None):\n params = {\"text\": comment}\n if html is not None:\n params[\"html\"] = html\n \n self._post(\"problems/%d/comments\" % identifier, json=params)", "def add_comment_to_issue(self, issue, comment, visibility=None):\r\n self.jira.add_comment(issue=issue, body=comment)", "def add_comment(self, issue, comment):\n return self.get_jira().add_comment(issue, comment)", "def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()", "def post_comment(self, entry, body, **args):\n args.update(entry=entry, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url", "def on_issue_comment(self, payload):\n pass", "def comment(self, body, incident_id):\n payload = {\"comment\":{\"body\":body, \"is_private\":\"false\"}}\n response = self.session.post(\n \"{0}/incidents/{1}/comments.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code", "def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))", "def add_comment(cls, uid, person, email, body):\n\n try:\n qs = cls.query.add_comment(uid=uid, person=person, email=email, body=body)\n record = cls.engine.query(qs, fetch_opts='single')\n\n except (DBAPIError, SnaqlException) as error:\n raise Exception(error.args[0])\n\n return record if record else None", "def cli(ctx, comment, metadata=\"\"):\n return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)", "def test_issue_create_comment(self):\n pass", "def edit_comment(self, id, body, **args):\n args.update(id=id, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def comment(self, msg):\n\t\tself._client.add_comment(self, msg)", "def _add_duplicate_comment(\n issue: Issue, *, image: str, repo: str, run: str, stacktrace: str\n) -> IssueComment:\n body = (\n f\"Probably duplicate error:\\n\"\n f\"{_report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)}\"\n )\n return issue.create_comment(body)", "def comment(args):\n message = args.message\n if message is None:\n title = gh.get_issue(owner, repo, args.issue)['title']\n last_comment = '<No previous comments>'\n try:\n last_comment = gh.get_comments(owner, repo, args.issue)[0]['body']\n except IndexError:\n pass\n\n message = get_text_editor_input('Please enter the message for your '\n 'comment. Remember that comments '\n 'support GitHub Flavored Markdown '\n '(GFM). An empty message aborts the '\n 'operation.\\n\\n'\n '#%s %s\\n' % (args.issue, title) +\n 'Last comment:\\n' + last_comment)\n if message == '':\n print('Aborting comment due to empty message.')\n sys.exit(1)\n\n gh.post_comment(owner, repo, args.issue, message)\n sys.exit(0)", "def add_comment(self, text, displayed, username, time,\n proposal, node_id, parent_id, moderator):\n raise NotImplementedError()", "def add_comment_to_announcement():\n vars = request.vars\n logger.info(\"vars.comment_text: %r\" % (vars.comment_text))\n comment_id = db.Comments.insert(\n comment_text = vars.comment_text,\n score = 1,\n ann_id= vars.ann_id,\n )\n comment = db.Announcements(comment_id)\n\n logger.info(\"api:add_comment_to_announcement ==> comment= %r\" % (comment))\n\n return response.json(comment)", "def __post_ticket_comment(ticket_id, user_id, comments, token):\n\n user = user_profile(user_id)\n display_name = user['user']['profile']['real_name']\n rt_api.ticket_comment(ticket_id, comments + \"\\n\\n- \" + display_name, True, token=token)", "def setCommentField(self, key, value):\n if not key:\n raise KeyError()\n \n comment= \"\"\n if value:\n comment= \"{0}='{1}'\".format(key, value) \n\n if not self.comments:\n self.comments= comment\n return\n \n pattern= re.compile(key + r\"s*=\\s*'.+'\")\n \n match= pattern.search(self.comments)\n \n if match:\n #key exists -> replace\n self.comments= ( self.comments[0:match.start(0)].strip()\n + comment\n + self.comments[match.end(0):] ).strip()\n else:\n self.comments+= \"; \" + comment", "def add_comment(self, checkin_id: str, comment: str) -> Dict:\n method = \"checkin/addcomment/\" + checkin_id\n auth = self._get_access_token()\n if len(comment) > 140:\n raise ValueError(\n f\"Check-in comment is {len(comment)} characters whereas Untappd only supports comments up to 140 characters\"\n )\n params = {\"comment\": comment}\n return self._do_post(method, auth, params)", "def test_issue_edit_comment(self):\n pass", "def add_comment() -> str:\n if \"markdown\" in request.form:\n if \"file\" in request.form:\n comment = Comment(\n markdown=request.form[\"markdown\"],\n submission_id=Submission.query.filter(\n Submission.filepath.contains(request.form[\"file\"])\n )\n .first()\n .id,\n cell_id=request.form[\"cell_id\"] if \"cell_id\" in request.form else None,\n user=UserModel.get_by_token(session[\"token\"]),\n )\n # If not cell_id this is a general comment\n comment.save()\n else:\n return \"Missing file or cell_id\", 400\n else:\n return \"Missing markdown\", 400\n\n comment_maker = get_template_attribute(\"_macros.html\", \"comment_block\")\n return comment_maker(comment)", "def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})", "def comment(self, comment_id):\r\n return IssueComment(self, comment_id)", "def comment(self, comment_id):\r\n return IssueComment(self, comment_id)", "def create_or_update_comment(comment, message, repo, pr_number, token):\n # repo is in the form of \"org/repo\"\n if comment is not None:\n print(\"updating existing comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#update-an-issue-comment # noqa\n response = requests.patch(\n f\"https://api.github.com/repos/{repo}/issues/comments/{comment['id']}\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n else:\n print(\"creating new comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment # noqa\n response = requests.post(\n f\"https://api.github.com/repos/{repo}/issues/{pr_number}/comments\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n\n response.raise_for_status()" ]
[ "0.682299", "0.6740081", "0.6561953", "0.6297364", "0.6274821", "0.6229835", "0.61394274", "0.5977267", "0.5953699", "0.5946078", "0.58701116", "0.5741862", "0.57191175", "0.56251615", "0.56233865", "0.5619574", "0.5502269", "0.5478731", "0.54059154", "0.5405601", "0.5395265", "0.5368913", "0.53544855", "0.53231955", "0.53193116", "0.5270235", "0.5265588", "0.52651983", "0.52651983", "0.52627563" ]
0.7143064
0
List all worklogs for a given issue key.
def worklog(accountable): worklog = accountable.issue_worklog() headers = ['author_name', 'comment', 'time_spent'] if worklog: rows = [[v for k, v in sorted(w.items()) if k in headers] for w in worklog] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No worklogs found for {}'.format(accountable.issue_key), fg='red' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_worklog(self, emp_id=None):\n\n query = \"select * from worklog\"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_logs(job_key):\n job = Job.fetch(job_key, connection=conn)\n if job.is_finished:\n logs = job.result\n elif job.is_failed:\n logs = job.exc_info\n else:\n logs = \"Task is still running\"\n return str(logs), 200", "def get_job_logs(self, params, ujs_proxy=None):\n if ujs_proxy is None:\n ujs_proxy = self.__proxy_client()\n return ujs_proxy.get_job_logs(params)", "def query_mw(key: str) -> List[MWEntry]:\n\n engine = create_engine(db.DATABASE_URI)\n with engine.connect() as conn:\n table = db.dict_entries\n q = select([table]).where(table.c.key == key)\n rows = conn.execute(q).fetchall()\n\n # Manual group-by. TODO: can this be done in sqla?\n res = []\n for row in rows:\n payload = transform_mw(row.value.decode(\"utf-8\"))\n res.append(MWEntry(content=payload))\n return res", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def push_worklogs(\n entries: Sequence[TogglEntry], toggl_token: str\n) -> Optional[str]:\n for index, worklog in enumerate(entries):\n logger.info('pushing worklog {}/{}'.format(index + 1, len(entries)))\n\n payload = json.dumps(asdict(worklog), cls=DateTimeEncoder)\n\n response = requests.post(\n 'https://www.toggl.com/api/v8/time_entries',\n data=payload,\n headers={'Content-Type': 'application/json'},\n auth=(toggl_token, 'api_token'),\n )\n\n try:\n response.raise_for_status()\n except HTTPError as err:\n assert isinstance(err.response.text, str)\n return err.response.text\n except RequestException:\n return traceback.format_exc()\n\n return None", "def get_logs(self, job_id):\n\n # Get the logstream name\n response = self.batch_client.describe_jobs(jobs=[job_id])\n logstream = response[\"jobs\"][0][\"container\"][\"logStreamName\"]\n\n # Keep a list with the log messages\n logs = []\n\n # Get the logs\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\", logStreamName=logstream\n )\n\n # Add to the list\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n # Keep getting more pages\n while response[\"nextForwardToken\"] is not None:\n\n # Keep track of the last token used\n last_token = response[\"nextForwardToken\"]\n\n # Get the next page\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\",\n logStreamName=logstream,\n nextToken=last_token,\n )\n\n # If the token is the same, we're done\n if response[\"nextForwardToken\"] == last_token:\n response[\"nextForwardToken\"] = None\n else:\n # Otherwise keep adding to the logs\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n return logs", "def get_logs_list():\n # reads the session\n session = request.args.get('session', type=str)\n\n available_keys = []\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n\n all_keys = lh.get_handlers().keys()\n\n for key in all_keys:\n if lh.check_user_log_visibility(user, key):\n available_keys.append(key)\n\n return jsonify({\"logs\": available_keys})", "def _get_logs(self):\n contents = dict()\n contents[\"Scheduler\"] = self._parse_log_content(\n self.scheduler.client.get_scheduler_logs()\n )\n log_workers = self.scheduler.client.get_worker_logs()\n for i, (_, worker_content) in enumerate(log_workers.items()):\n contents[f\"Worker-{i}\"] = self._parse_log_content(worker_content)\n return contents", "def issue(ctx, accountable, issue_key):\n accountable.issue_key = issue_key\n if not ctx.invoked_subcommand:\n issue = accountable.issue_meta()\n headers = issue.keys()\n rows = [headers, [v for k, v in issue.items()]]\n print_table(SingleTable(rows))", "def list_issues(self, jira_con, search_jql):\n assert jira_con, \"Requires jira_con\"\n assert search_jql, \"Requires search_jql\"\n assert self.log\n dict_keys = ('name', 'percent', 'watchCount', 'votes', 'progress', 'value')\n\n issues = jira_con.search_issues(search_jql + ' order by issue')\n self.log.info('\\nResults for {}:'.format(search_jql))\n for issue in issues:\n s = str(issue)\n for key, value in issue.raw['fields'].iteritems():\n if value:\n found = False\n if type(value) is not dict:\n found = True\n s += ', ' + key + '=' + unicode(value)\n else:\n for k in dict_keys:\n if k in value:\n found = True\n s += ', ' + key + '=' + str(value[k])\n if not found:\n s += ', ' + key + '=(' + unicode(type(value)) + ') ' + str(value)\n self.log.info(s)\n return issues", "def test_get_work_logs_one_page(self):\n with open(\"work_logs_first_issue_one_page.json\", \"r\") as first_issue_file:\n mock_response_first_issue = first_issue_file.read()\n\n with open(\"work_logs_second_issue_one_page.json\", \"r\") as second_issue_file:\n mock_response_second_issue = second_issue_file.read()\n\n issues = [Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/issue/MYB-5/worklog/', text=mock_response_first_issue)\n m.register_uri('GET', '/rest/api/2/issue/MYB-4/worklog/', text=mock_response_second_issue)\n work_logs, issues = jiratimereport.get_work_logs(\"https://jira_url\", \"user_name\", \"api_token\",\n \"2020-01-10\", \"2020-01-20\", \"\", issues)\n\n work_logs_expected_result = [WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-4\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n self.assertListEqual(work_logs_expected_result, work_logs, \"Work Log lists are unequal\")\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\",\n 3600, 900, datetime(2020, 1, 20))\n issue_myb_5.issue_start_date = datetime(2020, 1, 18)\n issue_myb_4 = Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\",\n 7200, 600, None)\n issue_myb_4.issue_start_date = datetime(2020, 1, 12)\n\n issues_expected_result = [issue_myb_5,\n issue_myb_4]\n\n self.assertListEqual(issues_expected_result, issues, \"Issue lists are unequal\")", "def fetch_log_entries(owner_account_id):\n batch_size = 500\n log_entries = []\n\n i = 0\n while True:\n i += 1\n skip = batch_size * (i - 1)\n top = batch_size\n\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/LogEntries?\"\n f\"$skip={skip}&$top={top}&\"\n f\"$filter=logType eq 'Command' and command eq 'TriggerLock'&\"\n f\"$select=id,entryNo,lockTimestamp,receivedAt,boundLockId,boundCardId,contactId&\")\n\n if resp.status_code != 200:\n abort(500)\n\n next_log_entries = resp.json()\n\n if not isinstance(next_log_entries, list):\n abort(500)\n\n log_entries.extend(next_log_entries)\n\n if len(next_log_entries) < batch_size:\n break\n\n return log_entries", "def logs(self, **kwargs):\n return self.client.api.logs(self.id, **kwargs)", "def list_documents(self, report_type: Type, key: str = None) -> List[str]:\n documents = []\n collection = self.client.collection(f'{report_type}').list_documents()\n for document in collection:\n if key:\n if document.id == key:\n for _document in document.get().to_dict():\n documents.append(_document)\n else:\n documents.append(document.id)\n\n return documents", "def get_issue_url(self, key):\n raise NotImplementedError", "def getLogs():", "def getLogs():", "def get_jira_issues(query):\n jira_issues = []\n defects = []\n count, maxlen = 0, 1\n while count < maxlen:\n issues = jira_client.search_issues(query, startAt=count, maxResults=50, expand='changelog')\n jira_issues.extend(issues)\n count = len(jira_issues)\n maxlen = issues.total\n\n return jira_issues", "def list_logs(db: Session) -> List[Log]:\n return db.query(Log).all()", "def list_tagging_work_request_logs(self, work_request_id, **kwargs):\n resource_path = \"/taggingWorkRequests/{workRequestId}/logs\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tagging_work_request_logs got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"workRequestId\": work_request_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestLogSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestLogSummary]\")", "def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)", "def get_logs(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogList(self._results, runtime=self._runtime)", "def GetLogs(self):\n stdout, _, _ = RunKubectlCommand(['logs', self.name])\n return stdout", "def view_logs(request):\n keyword = None\n num_logs = 0\n logs = LogEntry.objects.all()\n if request.POST:\n keyword = request.POST.get(\"search\")\n if keyword:\n logs = []\n for log in LogEntry.objects.all():\n if keyword in log.__str__() or keyword in log.user.__str__():\n logs.append(log)\n num_logs += 1\n\n return render(request, 'view_logs.html', {'logs': logs, \"keyword\": keyword, \"num_logs\": num_logs})", "def get_log_entries(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogEntryList(self._results, runtime=self._runtime)", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def get_all_logs(directory, artifacts):\n log_files = {}\n if artifacts:\n dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)\n if f.is_dir]\n else:\n dirs = [directory]\n for d in dirs:\n log_files[d] = []\n for f in view_base.gcs_ls(d):\n log_name = regex.log_re.search(f.filename)\n if log_name:\n log_files[d].append(f.filename)\n return log_files", "def get_jira_issues(jira, username):\n exclude_stories = cfg.args.x\n epics_only = cfg.args.e\n all_status = cfg.args.all\n filename = cfg.args.file\n user = cfg.args.user\n last_comment = cfg.args.l\n\n issue_types = [\"Sub-task\", \"Epic\"]\n if not epics_only:\n issue_types.append(\"Initiative\")\n if not exclude_stories:\n issue_types.extend([\"Story\", \"Task\", \"Sub-task\", \"Bug\"])\n issue_type = \"issuetype in (%s)\" % \", \".join(issue_types)\n\n status = 'status in (\"In Progress\")'\n if all_status:\n status = \"status not in (Resolved, Closed)\"\n\n if user is None:\n user = \"currentUser()\"\n else:\n user = '\"%s\"' % add_domain(user)\n\n jql = \"%s AND assignee = %s AND %s\" % (issue_type, user, status)\n log.debug(jql)\n\n my_issues = jira.search_issues(jql)\n if my_issues.total > my_issues.maxResults:\n my_issues = jira.search_issues(jql, maxResults=my_issues.total)\n\n showdate = strftime(\"%Y-%m-%d\", gmtime())\n subject = \"Subject: [Weekly] Week ending \" + showdate + \"\\n\\n\"\n\n msg = get_header()\n if msg != \"\":\n msg += email_to_name(username) + \"\\n\\n\"\n\n f = open_file(filename)\n filename = f.name\n\n f.write(subject)\n\n f.write(msg)\n log.debug(\"Found issue:\")\n for issue in my_issues:\n log.debug(\"%s : %s\" % (issue, issue.fields.summary))\n\n if merge_issue_header():\n f.write(\n \"[%s%s%s]\\n\" % (issue, get_header_separator(), issue.fields.summary)\n )\n else:\n f.write(\"[%s]\\n\" % issue)\n f.write(\"# Header: %s\\n\" % issue.fields.summary)\n\n f.write(\"# Type: %s\\n\" % issue.fields.issuetype)\n f.write(\"# Status: %s\\n\" % issue.fields.status)\n f.write(get_extra_comments())\n if last_comment:\n write_last_jira_comment(f, jira, issue)\n f.write(\"\\n\")\n\n f.close()\n return (filename, my_issues)", "def GetLogs(self):\n raise NotImplementedError()" ]
[ "0.6104521", "0.5450961", "0.5429597", "0.54215986", "0.53740776", "0.53375506", "0.5177321", "0.51320475", "0.509252", "0.50396067", "0.50299364", "0.50039464", "0.49709633", "0.49424547", "0.49327973", "0.49153993", "0.4895989", "0.4895989", "0.48916838", "0.48571062", "0.4846445", "0.4812073", "0.4805137", "0.4799156", "0.479585", "0.47851825", "0.47837254", "0.47737795", "0.47677732", "0.47640085" ]
0.68508613
0
List all possible transitions for a given issue.
def transitions(accountable): transitions = accountable.issue_transitions().get('transitions') headers = ['id', 'name'] if transitions: rows = [[v for k, v in sorted(t.items()) if k in headers] for t in transitions] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No transitions found for {}'.format(accountable.issue_key), fg='red' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transitions(self) -> List[Dict]:\n return []", "def transitions(self, from_state=None):\n return list(self.iter_transitions(from_state))", "def setup_transition_list():\n xn_list = []\n\n xn_list.append( Transition(3, 4, 2., 'left ejection') )\n xn_list.append( Transition(12, 2, 2., 'right ejection') )\n xn_list.append( Transition(19, 20, 2.e8, 'downward ejection, left') )\n xn_list.append( Transition(19, 24, 2.e8, 'downward ejection, right') )\n xn_list.append( Transition(28, 17, 1., 'upward ejection, left') )\n xn_list.append( Transition(28, 18, 1., 'upward ejection, right') )\n xn_list.append( Transition(11, 15, 3.0e7, 'demobilization (right wall)') )\n xn_list.append( Transition(13, 15, 3.0e7, 'demobilization (left wall)') )\n xn_list.append( Transition(29, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(30, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(1, 4, 3.0e8, 'leftward motion') )\n xn_list.append( Transition(8, 2, 3.0e8, 'rightward motion') )\n xn_list.append( Transition(20, 17, 2.0e6, 'upward motion') )\n xn_list.append( Transition(24, 18, 2.0e6, 'upward motion') )\n xn_list.append( Transition(18, 24, 2.0e8, 'downward motion') )\n xn_list.append( Transition(17, 20, 2.0e8, 'downward motion') )\n\n if _DEBUG:\n print()\n print('setup_transition_list(): list has',len(xn_list),'transitions:')\n for t in xn_list:\n print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)\n\n return xn_list", "def getListOfTransitions(self, *args):\n return _libsbml.QualModelPlugin_getListOfTransitions(self, *args)", "def get_transitions(self):\n transitions = []\n for row in self.states:\n t_row = []\n for column in self.states:\n t_row.append([row, column])\n transitions.append(t_row)\n return sorted(transitions)", "def _iter_transitions_all_(self):\n for state in self.iter_states():\n for t in state.transitions:\n yield t", "def transitions(self) -> typing.Optional[typing.List[\"Transition\"]]:\n return self._values.get('transitions')", "def get_active_transitions(self):\n return [t for st in self.get_active_states() for t in st.transitions]", "def iter_transitions(self, from_state=None):\n if from_state is None:\n return self._iter_transitions_all_()\n else:\n return iter(self.state(from_state).transitions)", "def action_sequence(node):\n actions = []\n while node.previous:\n actions.append(node.action)\n node = node.previous\n return actions[::-1]", "def _find_transitions(self, tz: Any) -> List[TransitionTimes]:\n # TODO: Do I need to start 1 day before Jan 1 UTC, in case the\n # local time is ahead of UTC?\n dt = datetime(self.start_year, 1, 1, 0, 0, 0, tzinfo=pytz.utc)\n dt_local = dt.astimezone(tz)\n\n # Check every 'sampling_interval' hours for a transition\n transitions: List[TransitionTimes] = []\n while True:\n next_dt = dt + self.sampling_interval\n next_dt_local = next_dt.astimezone(tz)\n if next_dt.year >= self.until_year:\n break\n\n # Look for a UTC or DST transition.\n if self.is_transition(dt_local, next_dt_local):\n # print(f'Transition between {dt_local} and {next_dt_local}')\n dt_left, dt_right = self.binary_search_transition(\n tz, dt, next_dt)\n dt_left_local = dt_left.astimezone(tz)\n dt_right_local = dt_right.astimezone(tz)\n only_dst = self.only_dst(dt_left_local, dt_right_local)\n transitions.append((dt_left_local, dt_right_local, only_dst))\n\n dt = next_dt\n dt_local = next_dt_local\n\n return transitions", "def parse_issue(issue: Issue) -> List[Step]:\n\n result = []\n tx_sequence = issue.transaction_sequence\n steps = tx_sequence[\"steps\"]\n\n for step in steps:\n result.append(Step(call_data=step[\"input\"], call_value=step[\"value\"]))\n\n return result", "def expand(self, problem):\n return [self.child(problem, action) for action in problem.actions(self.state)]", "def expand(self, problem):\n # List comprehension that generates a child node for every possible\n # action in the current state\n return [self.child_node(problem, move)\n for move in problem.moves(self.board_state)]", "def expand(self, problem):\n return [self.child_node(problem, action)\n for action in problem.actions(self.state)]", "def expand(self, problem):\n return [self.child_node(problem, action)\n for action in problem.actions(self.state)]", "def state_change_times(self) -> typing.List[float]:\n state_change_times = {0.}\n state_change_times.update(self.population.presence_interval().transition_times())\n state_change_times.update(self.ventilation.transition_times(self.room))\n \n return sorted(state_change_times)", "def _generate_children(self) -> list:\n if self.debug: print(f\"StateNode._generate_children()\")\n return [self.transition(x) for x in self.actions()]", "def get(self, *args):\n return _libsbml.ListOfTransitions_get(self, *args)", "def findActions(problem, state):\r\n size = len(problem) - 1\r\n legalActions = []\r\n if state[0] > 0 and problem[state[0] - 1][state[1]] != 'w':\r\n legalActions.append('N')\r\n if state[0] < size and problem[state[0] + 1][state[1]] != 'w':\r\n legalActions.append('S')\r\n if state[1] > 0 and problem[state[0]][state[1] - 1] != 'w':\r\n legalActions.append('W')\r\n if state[1] < size and problem[state[0]][state[1] + 1] != 'w':\r\n legalActions.append('E')\r\n return legalActions", "def transitions(self, state):\n if len(set(state)) < len(state):\n yield self.STOP_STATE\n return\n for hidx in xrange(self.num_players):\n for lidx in xrange(hidx):\n (lower, higher) = (state[lidx], state[hidx])\n yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state))", "def show_transitions(ion=None, lower=0., upper=1.e4, fine_lines=False, flim=0.):\n all_lines = list()\n if ion:\n # only return given ion\n for trans in lineList:\n if trans['ion'] == ion:\n if trans['l0'] > lower and trans['l0'] < upper:\n if trans['f'] > flim:\n all_lines.append(trans)\n\n elif trans['ion'][:-1] == ion and trans['ion'][-1].islower() and fine_lines is True:\n if trans['l0'] > lower and trans['l0'] < upper:\n if trans['f'] > flim:\n all_lines.append(trans)\n\n else:\n for trans in lineList:\n if trans['l0'] > lower and trans['l0'] < upper and trans['f'] > flim:\n if trans['ion'][-1].islower():\n if fine_lines is True:\n all_lines.append(trans)\n else:\n all_lines.append(trans)\n\n return all_lines", "def state_transitions(self, state):\n return self.states(\"ANY PreviousStates.identifier = '%s'\" % _obj_id(state))", "def CHECK_transition_frames(self):\n tr_frames = []\n for i, frame in enumerate(self.y):\n if not np.all(frame == frame[0]):\n tr_frames.append(frame)\n\n print('there are ', len(tr_frames), ' frames containing a transition')\n return tr_frames", "def transition_path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))", "def find_transitions(self):\n def transition(index_a, index_b):\n if None in (index_a, index_b):\n return False\n return (index_b - index_a) % 22 != 1 and index_b == 1\n\n simanim = []\n seif_values = (None, None)\n siman_start, count = 0, 0\n new_seif_ref = re.compile(u'^@11([\\u05d0-\\u05ea])')\n\n for line_num, line in enumerate(self.source_lines):\n match = new_seif_ref.match(line)\n if match:\n seif_values = (seif_values[1], he_ord(match.group(1)))\n if transition(*seif_values):\n simanim.append({\n 'start': siman_start,\n 'end': line_num-1,\n 'total': count\n })\n siman_start = line_num\n count = 1\n else:\n count += 1\n else:\n simanim.append({\n 'start': siman_start,\n 'end': line_num,\n 'total': count\n })\n\n return simanim", "def enabled_transitions(self):\n transitions = []\n for trans in self.get_enabled_transitions_by_scope().values():\n transitions.extend(trans)\n return transitions", "def transitions(player1_row, player1_column, player2_row, player2_column, player1_possession, player1_action, player2_action):\n\n s = (player1_row, player1_column, player2_row, player2_column, player1_possession)\n transitions = []\n\n # Case: Player 1 goes first\n next_s, collision = SoccerEnv.resolve_player1_action(s, player1_action, True)\n if not collision:\n next_s, collision = SoccerEnv.resolve_player2_action(next_s, player2_action, False)\n transitions.append((SoccerEnv.encode_state(next_s[0], next_s[1], next_s[2], next_s[3], next_s[4]),\n SoccerEnv.reward(next_s), SoccerEnv.done(next_s)))\n\n # Case: Player 2 goes first:\n next_s, collision = SoccerEnv.resolve_player2_action(s, player2_action, True)\n if not collision:\n next_s, collision = SoccerEnv.resolve_player1_action(next_s, player1_action, False)\n transitions.append((SoccerEnv.encode_state(next_s[0], next_s[1], next_s[2], next_s[3], next_s[4]),\n SoccerEnv.reward(next_s), SoccerEnv.done(next_s)))\n\n return transitions", "def transition(self, to_state: _State) -> Generator[_TransitionResult, None, None]:\n for trans in self._transitions:\n if trans.comparable_to(self.state, to_state):\n\n # these states are equal even considering data, thus no-op\n if trans.from_state == to_state:\n break\n\n self.state = to_state\n yield _StateChange(\n transition=_Transition(trans.from_state, to_state), node=self\n )\n break\n else:\n yield IllegalTransition(\n f\"no transition for {self} from {self.state} -> {to_state}\",\n self,\n _Transition(self.state, to_state),\n )", "def transitions_table(transitions, states, alphabet):\n transitions = sanitize_transitions(transitions)\n\n check_transitions(transitions, states, alphabet)\n\n table = []\n for current in states:\n for read in alphabet:\n # DEBUG: print(state, read)\n next, write, move = transitions(current, read)\n table.append([current, read, next, write, move])\n\n df = pd.DataFrame(table, columns = ['current', 'read', 'next', 'write', 'move'])\n return df" ]
[ "0.6658203", "0.64597243", "0.609803", "0.5973659", "0.59435755", "0.5658975", "0.56407136", "0.5377166", "0.5376197", "0.5366969", "0.53484374", "0.5304934", "0.52682185", "0.5264709", "0.5251384", "0.5251384", "0.5246294", "0.52343994", "0.5204525", "0.5165543", "0.5150279", "0.51372504", "0.5062964", "0.5054486", "0.501989", "0.5017491", "0.5012439", "0.49762756", "0.49717832", "0.49635008" ]
0.7419396
0
Debug breakpoint while in curses mode
def _D(stdscr): curses.nocbreak() stdscr.keypad(0) curses.echo() curses.endwin() import pdb; pdb.set_trace()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def _debug_trace():\n from PyQt4.QtCore import pyqtRemoveInputHook\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()", "def main():\r\n\r\n debug_tb = []\r\n def curses_loop(stdscr):\r\n \"\"\"Only the code inside this function runs within the curses wrapper\"\"\"\r\n\r\n # this function may under no circumstancs raise an exception, so I'm\r\n # wrapping everything into try/except (should actually never happen\r\n # anyways but when it happens during coding or debugging it would\r\n # leave the terminal in an unusable state and this must be avoded).\r\n # We have a list debug_tb[] where we can append tracebacks and\r\n # after curses uninitialized properly and the terminal is restored\r\n # we can print them.\r\n try:\r\n init_colors()\r\n gox = goxapi.Gox(secret, config)\r\n\r\n logwriter = LogWriter(gox)\r\n printhook = PrintHook(gox)\r\n\r\n conwin = WinConsole(stdscr, gox)\r\n bookwin = WinOrderBook(stdscr, gox)\r\n statuswin = WinStatus(stdscr, gox)\r\n chartwin = WinChart(stdscr, gox)\r\n\r\n strategy_manager = StrategyManager(gox, strat_mod_list)\r\n\r\n gox.start()\r\n while True:\r\n key = stdscr.getch()\r\n if key == ord(\"q\"):\r\n break\r\n elif key == curses.KEY_F4:\r\n DlgNewOrderBid(stdscr, gox).modal()\r\n elif key == curses.KEY_F5:\r\n DlgNewOrderAsk(stdscr, gox).modal()\r\n elif key == curses.KEY_F6:\r\n DlgCancelOrders(stdscr, gox).modal()\r\n elif key == curses.KEY_RESIZE:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n stdscr.erase()\r\n stdscr.refresh()\r\n conwin.resize()\r\n bookwin.resize()\r\n chartwin.resize()\r\n statuswin.resize()\r\n elif key == ord(\"l\"):\r\n strategy_manager.reload()\r\n\r\n # which chart to show on the right side\r\n elif key == ord(\"H\"):\r\n set_ini(gox, \"display_right\", \"history_chart\",\r\n gox.history.signal_changed, gox.history, None)\r\n elif key == ord(\"D\"):\r\n set_ini(gox, \"display_right\", \"depth_chart\",\r\n gox.orderbook.signal_changed, gox.orderbook, None)\r\n\r\n # depth chart step\r\n elif key == ord(\",\"): # zoom out\r\n toggle_depth_group(gox, +1)\r\n elif key == ord(\".\"): # zoom in\r\n toggle_depth_group(gox, -1)\r\n\r\n # orderbook grouping step\r\n elif key == ord(\"-\"): # zoom out (larger step)\r\n toggle_orderbook_group(gox, +1)\r\n elif key == ord(\"+\"): # zoom in (smaller step)\r\n toggle_orderbook_group(gox, -1)\r\n\r\n elif key == ord(\"S\"):\r\n toggle_orderbook_sum(gox)\r\n\r\n elif key == ord(\"T\"):\r\n toggle_depth_sum(gox)\r\n\r\n # lowercase keys go to the strategy module\r\n elif key >= ord(\"a\") and key <= ord(\"z\"):\r\n gox.signal_keypress(gox, (key))\r\n else:\r\n gox.debug(\"key pressed: key=%i\" % key)\r\n\r\n except KeyboardInterrupt:\r\n # Ctrl+C has been pressed\r\n pass\r\n\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # we are here because shutdown was requested.\r\n #\r\n # Before we do anything we dump stacktraces of all currently running\r\n # threads to a separate logfile because this helps debugging freezes\r\n # and deadlocks that might occur if things went totally wrong.\r\n with open(\"goxtool.stacktrace.log\", \"w\") as stacklog:\r\n stacklog.write(dump_all_stacks())\r\n\r\n # we need the signal lock to be able to shut down. And we cannot\r\n # wait for any frozen slot to return, so try really hard to get\r\n # the lock and if that fails then unlock it forcefully.\r\n try_get_lock_or_break_open()\r\n\r\n # Now trying to shutdown everything in an orderly manner.it in the\r\n # Since we are still inside curses but we don't know whether\r\n # the printhook or the logwriter was initialized properly already\r\n # or whether it crashed earlier we cannot print here and we also\r\n # cannot log, so we put all tracebacks into the debug_tb list to\r\n # print them later once the terminal is properly restored again.\r\n try:\r\n strategy_manager.unload()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n gox.stop()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n printhook.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n logwriter.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # curses_loop() ends here, we must reach this point under all circumstances.\r\n # Now curses will restore the terminal back to cooked (normal) mode.\r\n\r\n\r\n # Here it begins. The very first thing is to always set US or GB locale\r\n # to have always the same well defined behavior for number formatting.\r\n for loc in [\"en_US.UTF8\", \"en_GB.UTF8\", \"en_EN\", \"en_GB\", \"C\"]:\r\n try:\r\n locale.setlocale(locale.LC_NUMERIC, loc)\r\n break\r\n except locale.Error:\r\n continue\r\n\r\n # before we can finally start the curses UI we might need to do some user\r\n # interaction on the command line, regarding the encrypted secret\r\n argp = argparse.ArgumentParser(description='MtGox live market data monitor'\r\n + ' and trading bot experimentation framework')\r\n argp.add_argument('--add-secret', action=\"store_true\",\r\n help=\"prompt for API secret, encrypt it and then exit\")\r\n argp.add_argument('--strategy', action=\"store\", default=\"strategy.py\",\r\n help=\"name of strategy module files, comma separated list, default=strategy.py\")\r\n argp.add_argument('--protocol', action=\"store\", default=\"\",\r\n help=\"force protocol (socketio or websocket), ignore setting in .ini\")\r\n argp.add_argument('--no-fulldepth', action=\"store_true\", default=False,\r\n help=\"do not download full depth (useful for debugging)\")\r\n argp.add_argument('--no-depth', action=\"store_true\", default=False,\r\n help=\"do not request depth messages (implies no-fulldeph), useful for low traffic\")\r\n argp.add_argument('--no-lag', action=\"store_true\", default=False,\r\n help=\"do not request order-lag updates, useful for low traffic\")\r\n argp.add_argument('--no-history', action=\"store_true\", default=False,\r\n help=\"do not download full history (useful for debugging)\")\r\n argp.add_argument('--use-http', action=\"store_true\", default=False,\r\n help=\"use http api for trading (more reliable, recommended\")\r\n argp.add_argument('--no-http', action=\"store_true\", default=False,\r\n help=\"use streaming api for trading (problematic when streaming api disconnects often)\")\r\n argp.add_argument('--password', action=\"store\", default=None,\r\n help=\"password for decryption of stored key. This is a dangerous option \"\r\n +\"because the password might end up being stored in the history file \"\r\n +\"of your shell, for example in ~/.bash_history. Use this only when \"\r\n +\"starting it from within a script and then of course you need to \"\r\n +\"keep this start script in a secure place!\")\r\n args = argp.parse_args()\r\n\r\n config = goxapi.GoxConfig(\"goxtool.ini\")\r\n config.init_defaults(INI_DEFAULTS)\r\n secret = goxapi.Secret(config)\r\n secret.password_from_commandline_option = args.password\r\n if args.add_secret:\r\n # prompt for secret, encrypt, write to .ini and then exit the program\r\n secret.prompt_encrypt()\r\n else:\r\n strat_mod_list = args.strategy.split(\",\")\r\n goxapi.FORCE_PROTOCOL = args.protocol\r\n goxapi.FORCE_NO_FULLDEPTH = args.no_fulldepth\r\n goxapi.FORCE_NO_DEPTH = args.no_depth\r\n goxapi.FORCE_NO_LAG = args.no_lag\r\n goxapi.FORCE_NO_HISTORY = args.no_history\r\n goxapi.FORCE_HTTP_API = args.use_http\r\n goxapi.FORCE_NO_HTTP_API = args.no_http\r\n if goxapi.FORCE_NO_DEPTH:\r\n goxapi.FORCE_NO_FULLDEPTH = True\r\n\r\n # if its ok then we can finally enter the curses main loop\r\n if secret.prompt_decrypt() != secret.S_FAIL_FATAL:\r\n\r\n ###\r\n #\r\n # now going to enter cbreak mode and start the curses loop...\r\n curses.wrapper(curses_loop)\r\n # curses ended, terminal is back in normal (cooked) mode\r\n #\r\n ###\r\n\r\n if len(debug_tb):\r\n print \"\\n\\n*** error(s) in curses_loop() that caused unclean shutdown:\\n\"\r\n for trb in debug_tb:\r\n print trb\r\n else:\r\n print\r\n print \"*******************************************************\"\r\n print \"* Please donate: 1C8aDabADaYvTKvCAG1htqYcEgpAhkeYoW *\"\r\n print \"*******************************************************\"", "def start_pdb():\r\n import ctypes\r\n ctypes.windll.kernel32.AllocConsole()\r\n import sys\r\n sys.stdout = open('CONOUT$', 'wt')\r\n sys.stdin = open('CONIN$', 'rt')\r\n import pdb\r\n pdb.set_trace()", "def setup(self):\n\n try:\n with terminal.cbreak(), terminal.hidden_cursor():\n yield\n finally:\n print(terminal.clear)\n print(terminal.exit_fullscreen)", "def move_debug(self, environment):\n\n ch2 = sys.stdin.read(1)\n\n if ch2 == \"w\":\n # the up arrow key was pressed\n print(\"up key pressed\")\n\n elif ch2 == \"s\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n elif ch2 == \"a\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == \"d\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def breakpoint(self):\n return None", "def _(event):\n event.cli.push_focus(SYSTEM_BUFFER)", "def cursor_set():\n print(\"\\033[0;0H\")", "def msg_console_switched(self, msg):\r\n #update the paused/line number markers\r\n self.frame.notebook.UpdatePauseMarkers()\r\n\r\n #update the bp markers in the editor pages\r\n pages = self.frame.notebook.GetAllPages()\r\n for page in pages:\r\n page.UpdateBreakpointSymbols()", "def debug():\n # written before I knew about the pdb module\n caller = currentframe().f_back\n method_name = caller.f_code.co_name\n line_no = getframeinfo(caller).lineno\n print(method_name + \": line \" + str(line_no))\n code.interact(local=dict(globals(), **caller.f_locals))", "def break_stimulus(win,break_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show break stimulus\n #if 50 seconds pass, then quit experiment\n break_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n core.quit\n break_stim.setAutoDraw(False)", "def curses_loop(stdscr):\r\n\r\n # this function may under no circumstancs raise an exception, so I'm\r\n # wrapping everything into try/except (should actually never happen\r\n # anyways but when it happens during coding or debugging it would\r\n # leave the terminal in an unusable state and this must be avoded).\r\n # We have a list debug_tb[] where we can append tracebacks and\r\n # after curses uninitialized properly and the terminal is restored\r\n # we can print them.\r\n try:\r\n init_colors()\r\n gox = goxapi.Gox(secret, config)\r\n\r\n logwriter = LogWriter(gox)\r\n printhook = PrintHook(gox)\r\n\r\n conwin = WinConsole(stdscr, gox)\r\n bookwin = WinOrderBook(stdscr, gox)\r\n statuswin = WinStatus(stdscr, gox)\r\n chartwin = WinChart(stdscr, gox)\r\n\r\n strategy_manager = StrategyManager(gox, strat_mod_list)\r\n\r\n gox.start()\r\n while True:\r\n key = stdscr.getch()\r\n if key == ord(\"q\"):\r\n break\r\n elif key == curses.KEY_F4:\r\n DlgNewOrderBid(stdscr, gox).modal()\r\n elif key == curses.KEY_F5:\r\n DlgNewOrderAsk(stdscr, gox).modal()\r\n elif key == curses.KEY_F6:\r\n DlgCancelOrders(stdscr, gox).modal()\r\n elif key == curses.KEY_RESIZE:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n stdscr.erase()\r\n stdscr.refresh()\r\n conwin.resize()\r\n bookwin.resize()\r\n chartwin.resize()\r\n statuswin.resize()\r\n elif key == ord(\"l\"):\r\n strategy_manager.reload()\r\n\r\n # which chart to show on the right side\r\n elif key == ord(\"H\"):\r\n set_ini(gox, \"display_right\", \"history_chart\",\r\n gox.history.signal_changed, gox.history, None)\r\n elif key == ord(\"D\"):\r\n set_ini(gox, \"display_right\", \"depth_chart\",\r\n gox.orderbook.signal_changed, gox.orderbook, None)\r\n\r\n # depth chart step\r\n elif key == ord(\",\"): # zoom out\r\n toggle_depth_group(gox, +1)\r\n elif key == ord(\".\"): # zoom in\r\n toggle_depth_group(gox, -1)\r\n\r\n # orderbook grouping step\r\n elif key == ord(\"-\"): # zoom out (larger step)\r\n toggle_orderbook_group(gox, +1)\r\n elif key == ord(\"+\"): # zoom in (smaller step)\r\n toggle_orderbook_group(gox, -1)\r\n\r\n elif key == ord(\"S\"):\r\n toggle_orderbook_sum(gox)\r\n\r\n elif key == ord(\"T\"):\r\n toggle_depth_sum(gox)\r\n\r\n # lowercase keys go to the strategy module\r\n elif key >= ord(\"a\") and key <= ord(\"z\"):\r\n gox.signal_keypress(gox, (key))\r\n else:\r\n gox.debug(\"key pressed: key=%i\" % key)\r\n\r\n except KeyboardInterrupt:\r\n # Ctrl+C has been pressed\r\n pass\r\n\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # we are here because shutdown was requested.\r\n #\r\n # Before we do anything we dump stacktraces of all currently running\r\n # threads to a separate logfile because this helps debugging freezes\r\n # and deadlocks that might occur if things went totally wrong.\r\n with open(\"goxtool.stacktrace.log\", \"w\") as stacklog:\r\n stacklog.write(dump_all_stacks())\r\n\r\n # we need the signal lock to be able to shut down. And we cannot\r\n # wait for any frozen slot to return, so try really hard to get\r\n # the lock and if that fails then unlock it forcefully.\r\n try_get_lock_or_break_open()\r\n\r\n # Now trying to shutdown everything in an orderly manner.it in the\r\n # Since we are still inside curses but we don't know whether\r\n # the printhook or the logwriter was initialized properly already\r\n # or whether it crashed earlier we cannot print here and we also\r\n # cannot log, so we put all tracebacks into the debug_tb list to\r\n # print them later once the terminal is properly restored again.\r\n try:\r\n strategy_manager.unload()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n gox.stop()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n printhook.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n logwriter.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # curses_loop() ends here, we must reach this point under all circumstances.\r\n # Now curses will restore the terminal back to cooked (normal) mode.\r", "def start(self):\n self.print_separator()\n self.stdscr.box()\n\n box = curses.newwin(4, self.maxx-8, self.pad, self.pad)\n box.addstr(1,1,\"hello\")\n while self.running:\n # Enter the main program loop\n key = self.stdscr.getkey()\n for fn in [self.stdscr.clear,\n lambda: self.handle_key(key),\n self.update_xy,\n self.print_pattern,\n self.print_separator,\n self.stdscr.box,\n self.generate_menu_items,\n self.print_menu_items,\n self.print_current_selection,\n self.stdscr.refresh]:\n fn()", "def execute_pause(self):\n print(self.empty_lines + self.double_tab + \"to continue press any key..\\r\")\n self.getch()", "def console():\n repl(click.get_current_context())", "def Init_curses():\n curses.noecho()\n curses.cbreak()\n curses.curs_set(False)\n stdscr.keypad(True)\n curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)", "def initCurse():\n #On initialise notre fenêtre\n curses.initscr()\n #Couleur active !\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1, curses.COLOR_WHITE, -1)\n curses.init_pair(2, curses.COLOR_GREEN, -1)\n curses.init_pair(3, curses.COLOR_RED, -1)\n curses.curs_set(0)", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message = \"Signal received : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def test(self):\n self.gdb.b(\"main:start\")\n self.gdb.c()\n self.gdb.p(\"i=123\")\n self.gdb.c(wait=False)\n time.sleep(0.1)\n output = self.gdb.interrupt()\n assert \"main\" in output\n assertGreater(self.gdb.p(\"j\"), 10)\n self.gdb.p(\"i=0\")\n self.exit()", "def term():\n curses.endwin()\n unicornhathd.off()", "def cursor_placement_thread(self):\r\n while self.editing:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n curses.curs_set(2)\r\n self.win.touchwin()\r\n self.win.refresh()\r\n time.sleep(0.1)\r\n curses.curs_set(0)", "def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()", "def __liveActually(self, stdscr):\n global screenH, screenW\n self.__stdscr = stdscr\n (screenH, screenW) = self.__stdscr.getmaxyx()\n self.__stdscr.addstr(0, 0, \"Custom Burner \" + common.version)\n self.__stdscr.addstr(screenH - 1, 0, \"a: add ISO q: Quit\")\n self.__stdscr.noutrefresh()\n isoWindowHeight = ((screenH - 2) * 2)/ 3\n self.__isoWindow = IsoWindow(isoWindowHeight, screenW, 1, 0)\n self.__isoWindow.timeout(1000) # msec\n self.__logWindow = LogWindow(screenH - 2 - isoWindowHeight, screenW,\n isoWindowHeight + 1, 0)\n self.__focus = 0\n self.__focusedWindow = self.__isoWindow\n self.__isoWindow.focus()\n quitting = False\n while not quitting:\n self.__updateLog()\n curses.panel.update_panels()\n curses.doupdate()\n c = self.__focusedWindow.getch()\n if c == curses.ascii.TAB:\n self.__switchFocus()\n elif c == ord('a'):\n self.__askForIso()\n elif c == ord('q'):\n quitting = True", "def _debug_key_press_event(self, event):\n if event.key() == Qt.Key_F2:\n self._debug_signal_handler(None, None)", "def __init__(self, stdscr, gox):\r\n self.gox = gox\r\n gox.signal_debug.connect(self.slot_debug)\r\n Win.__init__(self, stdscr)", "def test_yankWithoutKillRing(self):\n s = 'hello world'\n n = 5\n self.widget.buffer = s\n self.widget.cursor = n\n self.widget.killRing = []\n self.widget.keystrokeReceived('\\x19', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, n)", "def debugger(self):\n\n if not self.rc.pdb:\n return\n pdb.pm()", "def run(self, window):\n self.window = window\n self.window.keypad(1)\n curses.curs_set(0)\n\n if curses.has_colors():\n colors.start_colors()\n\n while True:\n if self.update() == \"quit\":\n return" ]
[ "0.6429854", "0.64178854", "0.6243146", "0.62248564", "0.6194091", "0.610298", "0.6085449", "0.5991171", "0.5902381", "0.58755255", "0.5865251", "0.5808573", "0.5802128", "0.57814217", "0.57667226", "0.5760795", "0.5739943", "0.572808", "0.5721066", "0.5709992", "0.5702061", "0.5697934", "0.56772196", "0.56762743", "0.5663971", "0.5629237", "0.56257004", "0.5625625", "0.5610519", "0.55916077" ]
0.75144726
0
Retrieve yaml data from a given path if file not exist, return False
def get_yaml_data(path): yaml_path = "%s%s.yml" % (CONTENT_FILE_DIR, path[:-5]) if os.path.isfile(yaml_path): f = open(yaml_path, 'r') template_data = yaml.load(f) return template_data else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n return {}", "def yaml_file_must_exist(cls, v: pathlib.Path):\n if not v.exists():\n raise ValueError(f\"Path object not found in filesystem : {v}\")\n return v", "def provide_config(path):\n if os.path.exists(path):\n try:\n with open(path, \"r\") as f:\n config = yaml.load(f, Loader=yaml.UnsafeLoader)\n except yaml.YAMLError as exc:\n print(\"Error in configuration file:\", exc)\n else:\n config = {}\n print(\"The config yaml with path {}, does not exist.\".format(path))\n\n return config", "def load(path=None):\n if path is None:\n path = settings.HOST_CONFIG_PATH\n\n try:\n with open(path, 'r') as source:\n data = yaml.safe_load(source.read())\n return data\n except IOError as e:\n pass\n\n return None", "def LoadYaml(path):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n fp = None\n try:\n fp = open(path)\n \n data = yaml.load(fp)\n \n finally:\n if fp:\n fp.close()\n \n return data", "def from_path(cls, path: str) -> Any:\n cls._check_yaml()\n with open(path) as f:\n return yaml.safe_load(f)", "def load(path: str='config.yaml'):\n file = Path(path).open()\n result = yaml.safe_load(file)\n\n debug(f'YAML file {path} loaded and parsed succesful')\n\n return result", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def pickle_load(path):\n if os.path.isfile(path):\n file = pickle.load(open(path, \"rb\"))\n return file\n else: \n return False", "def __open_yml_file(path_to_yml_file: str):\n\n yaml_content = None\n\n with open(path_to_yml_file, 'r', encoding='utf8') as stream:\n try:\n yaml_content = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(\"could not read yml file '\" + str() + \"'...\\n\" + str(exc) + \"...\")\n\n return yaml_content", "def read_from_yaml(file_path, Loader=None):\n import yaml\n if Loader is None:\n Loader = yaml.FullLoader\n if os.path.isfile(file_path):\n with open(file_path, 'r') as stream:\n data = yaml.load(stream, Loader=Loader)\n return data\n else:\n raise Exception('File: {} does not exist.'.format(file_path))", "def load_local_paths(path):\n\n with open(path, 'r') as f:\n try:\n local_paths = yaml.safe_load(f)\n except yaml.YAMLError as err:\n print(err)\n return None\n\n return local_paths", "def fetch_config_from_yaml(cfg_path: Path = None):\n if not cfg_path:\n cfg_path = find_config_file()\n\n if cfg_path:\n with cfg_path.open(\"r\") as config_file:\n parsed_config = load(config_file.read())\n return parsed_config\n raise FileNotFoundError(f\"Did not found config file at path {cfg_path}\")", "def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def load_yaml(path: str) -> dict:\n with open(path, 'r') as f:\n yaml_file = yaml.load(f, Loader=yaml.FullLoader)\n return yaml_file", "def read_yaml(path: PathLike) -> Dict:\n with open(path, \"r\") as read_file:\n return yaml.load(read_file, Loader=yaml.UnsafeLoader)", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def get_data(path=None):\n\n # use default path \n if not path:\n path = os.path.relpath(\"config.json\")\n \n try:\n with open(path, mode=\"r\") as f:\n data = json.load(f)\n return data\n except Exception as e:\n print(e)", "def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None", "def load(path=\".travis.yml\"):\n if not path:\n path = \".travis.yml\"\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def is_config_file(path):\n is_valid_file(path)\n\n with open(path) as f:\n return f.read()", "def load_yaml(path: str) -> Dict[str, Any]:\n with open(path, \"r\", encoding=\"utf8\") as fp:\n data = yaml.safe_load(fp)\n return data", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def read_yaml_file(path: Union[str, pathlib.Path]) -> dict:\n\n if isinstance(path, (str, pathlib.Path)):\n with open(path, 'r') as fp:\n config = yaml.safe_load(fp)\n else:\n # Assume it's an stream\n config = yaml.safe_load(path)\n\n return config", "def load_config(path=\"configs/default.yaml\") -> dict:\n with open(path, \"r\", encoding=\"utf-8\") as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n return cfg", "def read_yaml(yaml_path):\n with open(yaml_path) as f:\n yaml_data = yaml.load(f, Loader=yaml.FullLoader)\n\n return yaml_data", "def read_config(path_to_file):\n path_to_file = Path(path_to_file)\n if not path_to_file.exists():\n raise ValueError(\"Config {} does not exist.\".format(path_to_file))\n with path_to_file.open('r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise IOError(exc)", "def check_exist(name, map):\r\n f = open(PATH,mode='r')\r\n file = yaml.load(f)\r\n f.close()\r\n if file is None:\r\n return (False, -1, -9, -9, [])\r\n elif name in file:\r\n if \"CSV\" in file[name]:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], file[name][\"csv_hash\"], file[name][\"children\"])\r\n else:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], -9, file[name][\"children\"])\r\n elif name+\"_\"+map in file:\r\n n = name+\"_\"+map\r\n if \"CSV\" in file[n]:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], file[n][\"csv_hash\"], file[n][\"children\"])\r\n else:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], -9, file[n][\"children\"])\r\n return (False, -1, -9, -9, [])" ]
[ "0.7641903", "0.7460384", "0.69069195", "0.6766211", "0.6622035", "0.6619555", "0.6472961", "0.6431809", "0.630646", "0.6232994", "0.620704", "0.620435", "0.617769", "0.6173353", "0.6155012", "0.6154364", "0.6134782", "0.6125133", "0.6101209", "0.6087636", "0.6067218", "0.6063575", "0.60627854", "0.6038078", "0.6003145", "0.6000955", "0.60000056", "0.5976795", "0.59693", "0.59634733" ]
0.80198294
0
Try and determine the correct _ (underscore) template matching the files directory structure
def determine_template_by_path(path): path = path.lstrip('/') path_chunks = re.split('\/', path) if len(path_chunks) <= 1: return path else: """ For now be ignorant and just return the first entry of the list as the possible template name, so in fact we only have a 1 level deep structure """ return '_%s.html' % path_chunks[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _FindTemplateFile(self, topdir):\n if topdir.endswith('..'):\n topdir = '/'.join(topdir.split('/')[:-2])\n fnames = os.listdir(topdir)\n for fname in fnames:\n filename = '%s/%s' % (topdir, fname)\n if filename.endswith('.yaml') and not os.path.isdir(filename) and \\\n os.path.exists(filename):\n f = open(filename, 'r')\n magic_code = f.read(22)\n f.close()\n if '#!fmri_file_template' in magic_code:\n return filename\n return None", "def find_custom_template(args):\n for arg in args:\n if os.path.isdir(arg):\n dirlist = os.listdir(arg)\n if \"custom.html\" in dirlist:\n return os.path.join(arg, \"custom.html\")\n elif \"custom.jinja\" in dirlist:\n return os.path.join(arg, \"custom.jinja\")", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def find_template_filename(self, template_name):\n\n def next_file():\n filename = self.path / template_name\n yield filename\n try:\n exts = self.default_file_extensions\n except AttributeError:\n return\n\n strfilename = str(filename)\n for ext in exts:\n yield Path(strfilename + ext)\n\n for filename in next_file():\n if filename.is_file():\n return filename", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def _find_relative(self, spec):\n if spec.template_rel_path is not None:\n return os.path.split(spec.template_rel_path)\n # Otherwise, determine the file name separately.\n\n locator = self.loader._make_locator()\n\n # We do not use the ternary operator for Python 2.4 support.\n if spec.template_name is not None:\n template_name = spec.template_name\n else:\n template_name = locator.make_template_name(spec)\n\n file_name = locator.make_file_name(template_name, spec.template_extension)\n\n return (spec.template_rel_directory, file_name)", "def get_template(name):\n found_dir = False\n pkg_dir = get_sitepackage_dirs()\n for pd in pkg_dir:\n if os.path.isdir(pd + '/lmdo'):\n found_dir = '{}/lmdo/local_template/{}'.format(pd, name)\n if os.path.isfile(found_dir):\n break\n else:\n found_dir = False\n \n if not found_dir:\n Oprint.warn('Template file {} is missing'.format(name), 'lmdo')\n\n return found_dir", "def get_template_name(request, base_template_name):\n template_base_dir = get_template_base_directory(request)\n return f\"cast/{template_base_dir}/{base_template_name}\"", "def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []", "def get_template_name(self):\n if self.template_name:\n return '%s' % self.template_name\n\n if self.template_name_prefix:\n return '%s%s.html' % (self.template_name_prefix, self.mode)\n\n for piece_name in reversed(list(self.pieces.keys())):\n piece = getattr(self, piece_name)\n result = piece.get_template_name()\n if result:\n return '%s.html' % result\n\n return None", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def test_filesystem_loader(self):\n\n self.assertEqual(\n list(\n template_finder.templates_for_engine({\n 'BACKEND': 'django.templates.backends.django.Djangotemplate.',\n 'APP_DIRS': False,\n 'DIRS': ['/tmp/project/templates/', '/tmp/project/other_templates/']\n })\n ),\n [\n ('base.html', '/tmp/project/templates/base.html'),\n ('foo/bar.html', '/tmp/project/templates/foo/bar.html'),\n ('baz.html', '/tmp/project/other_templates/baz.html'),\n ]\n )", "def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt", "def getTemplateFile(fname):\n return os.path.join(Configurations.getTemplateDir(), fname)", "def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)", "def _template_file_default(self):\n return \"index\"", "def get_template_names(self):\n name = self.__class__.__name__.replace(\"DatatableView\", \"\")\n name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\\1_', name)\n return [\"demos/\" + name.lower() + \".html\", \"example_base.html\"]", "def template_regex(filename):\n\n rfilename = filename[::-1]\n\n for j, cp in enumerate(compiled_patterns):\n match = cp.match(rfilename)\n if not match:\n continue\n groups = match.groups()\n\n if len(groups) == 3:\n exten = \".\" + groups[0][::-1]\n digits = groups[1][::-1]\n prefix = groups[2][::-1] + joiners[j]\n else:\n exten = \"\"\n digits = groups[0][::-1]\n prefix = groups[1][::-1] + joiners[j]\n\n template = prefix + (\"#\" * len(digits)) + exten\n return template, int(digits)\n\n # What is supposed to happen otherwise?\n raise ValueError(\"Could not determine filename template\")", "def _get_cfn_template_file_name(self, cfn_template_path: str) -> str:\n base_name = os.path.basename(cfn_template_path)\n (file_name, ext) = os.path.splitext(base_name)\n return file_name", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def test_read_namespaced_template(self):\n pass", "def get_templates_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [resource_filename(__name__, 'templates')]", "def test_templates(self):\n path = str(Template())\n self.assertTrue(os.path.exists(path))", "def get_template_from_path(path: str) -> str:\r\n path = path.replace(\"\\\\\", \"/\")\r\n return path", "def template_path(name):\n template_dir = os.path.join(os.path.dirname(__file__), 'templates')\n return os.path.join(template_dir, (name + \".html\"))", "def test_correct_template(self):\n self.assertCorrectTemplateUsed('common/home.html')", "def _git_templates():\n search_locations = [\n '/usr/share/git-core/templates',\n '/usr/local/share/git-core/templates',\n '/usr/local/git/share/git-core/templates'\n ]\n\n for possible_location in search_locations:\n if isdir(possible_location):\n return possible_location\n\n return None", "def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'api.yaml'\n custom_template_dir = DATA_PATH / 'templates_old_style'\n extra_template_data = OPEN_API_DATA_PATH / 'extra_data.json'\n\n with freeze_time(TIMESTAMP):\n main(\n [\n '--input',\n str(input_filename),\n '--custom-template-dir',\n str(custom_template_dir),\n '--extra-template-data',\n str(extra_template_data),\n ]\n )\n\n captured = capsys.readouterr()\n assert (\n captured.out\n == (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()\n )\n assert captured.err == inferred_message.format('openapi') + '\\n'", "def test_non_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.html')]\n\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/component.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/component.html')", "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")" ]
[ "0.68880016", "0.65661573", "0.6463607", "0.6450778", "0.6291978", "0.6183937", "0.6181306", "0.6174625", "0.61639", "0.60519874", "0.6047972", "0.6023817", "0.60039794", "0.5952855", "0.5941049", "0.5935887", "0.5918789", "0.5912781", "0.5902953", "0.5882798", "0.5849363", "0.5846346", "0.58270925", "0.5826837", "0.58109087", "0.5803167", "0.5791549", "0.5790368", "0.57873607", "0.5773656" ]
0.6831351
1
constructor instantiate a Document with a term_list to be converted into dict
def __init__(self, term_list, links=[]): # do type check if not isinstance(term_list, list): raise TypeError('term_list must be of type list') if not isinstance(links, list): raise TypeError('links must be of type list') self.term_dict = {x: term_list.count(x) for x in term_list} self.links = copy.deepcopy(links)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, docs, n):\n self.n = n\n self.dict = {}\n self.vocab = set()\n self.sum_index = \"*sum*\"\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in terms:\n if term not in self.vocab:\n self.vocab.add(term)\n for i in range(0, len(terms) - n + 1):\n end = i+n-1\n t = tuple(terms[i:end])\n if t not in self.dict:\n self.dict[t] = {}\n self.dict[t][self.sum_index] = 0\n self.dict[t][self.sum_index] += 1\n end_term = terms[end]\n if end_term not in self.dict[t]:\n self.dict[t][end_term] = 1\n else:\n self.dict[t][end_term] += 1\n self.D = len(self.vocab)", "def parse_doc(self, doc_as_list):\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n retweet_text = doc_as_list[4]\n retweet_url = doc_as_list[5]\n quote_text = doc_as_list[6]\n quote_url = doc_as_list[7]\n term_dict = {}\n tokenized_text = self.parse_sentence(full_text)\n doc_length = len(tokenized_text) # after text operations.\n\n for i, term in enumerate(tokenized_text):\n if term not in term_dict.keys():\n term_dict[term] = [1, [i]]\n else:\n term_dict[term][0] += 1\n term_dict[term][1].append(i)\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def __init__(self, tokenizer, stemmer=None, stopwords=None):\n self.tokenizer = tokenizer\n self.stemmer = stemmer\n self.index = defaultdict(list)\n self.documents = {}\n self.__unique_id = 0 #can be used as docid\n if not stopwords:\n self.stopwords = list()\n else:\n self.stopwords = list(stopwords)", "def __init__(self, main_doc):\n\t\tif not isinstance(main_doc, Document):\n\t\t\traise TypeError('term must be of type Document')\n\t\tself.main_doc = main_doc\n\t\tself.env_docs = []", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def __init__(self, tokenizer=simple_tokenize):\n # Set tokenizer to use for tokenizing new documents\n self.tokenize = tokenizer\n # The term document matrix is a sparse matrix represented as a\n # list of dictionaries. Each dictionary contains the word\n # counts for a document.\n self.sparse = []\n # Keep track of the number of documents containing the word.\n self.doc_count = {}", "def _load_biblical_terms_list(self, biblical_terms_list, _textin=''):\n if not _textin:\n fin = codecs.open(biblical_terms_list, mode='r', \\\n encoding='utf-16')\n lines = [l.strip() for l in fin.readlines()]\n else:\n lines = _textin\n line = ' '.join([aline.strip() for aline in lines])\n html = etree.HTML(line)\n #root = etree.fromstring(line)\n #body = etree.SubElement(html, \"body\")\n body = html[1]\n table = body[0]\n terms = dict()\n for tr in table[1:]:\n term = str(tr[3].text)\n rendering = str(tr[4].text)\n terms[term] = rendering\n return(terms)", "def add_new_doc(self, document, documents_list_length=10000):\n\n try:\n document_dictionary = document.term_doc_dictionary\n # self.countDoc += 1\n for term in document_dictionary.keys():\n if self.stemming == 'y':\n my_stemmer = Stemmer()\n term = my_stemmer.stem_term(term)\n # Update inverted index and posting\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, [\n (document_dictionary[term], document.tweet_id)]] # amount of doc, freq in the doc, doc id.\n\n else:\n self.inverted_idx[term][0] += 1 # amount of doc\n self.inverted_idx[term][1].append((document_dictionary[term],\n document.tweet_id)) # freq in the doc # doc id\n\n if term not in self.postingDict.keys():\n self.postingDict[term] = [(document.tweet_id, document_dictionary[term])]\n else:\n self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n # self.countTweet -= 1\n\n if document.tweet_id not in self.tweet_dict.keys():\n self.tweet_dict[document.tweet_id] = [[term, document_dictionary[term]], 1,\n 0] # [term,freq in tweet], amount of unique terms in tweet, amount of terms in tweet\n elif document_dictionary[term] > self.tweet_dict[document.tweet_id][0][\n 1]: # tweet exist, compering between freq in two terms\n if self.tweet_dict[document.tweet_id][0][\n 1] == 1: # before change term check if the last term is unique\n self.tweet_dict[document.tweet_id][\n 1] += 1 # last term is unique: add to the amount of uniqe terms in tweet\n self.tweet_dict[document.tweet_id][0] = [term,\n document_dictionary[term]] # change between the terms\n self.tweet_dict[document.tweet_id][2] += 1\n elif document_dictionary[term] == 1: # tweet exist, not most common, check if unique\n self.tweet_dict[document.tweet_id][1] += 1\n self.tweet_dict[document.tweet_id][2] += 1\n except:\n # print('problem in indexer : add_new_doc')\n # print(traceback.print_exc())\n pass", "def __init__(self):\n self.word_dict = collections.defaultdict(list)", "def __init__(self, *terms, **kwargs):\n self.missing = kwargs.pop('_key_missing_', False)\n if terms and kwargs:\n raise ValueError(\"You must specify terms or kwargs, not both\")\n self.terms = []\n for t in terms:\n self.add_term(t)\n self.add_term(kwargs)", "def __init__(self, documents_path):\n self.documents = []\n self.vocabulary = []\n self.likelihoods = []\n self.documents_path = documents_path\n self.term_doc_matrix = None \n self.document_topic_prob = None # P(z | d)\n self.topic_word_prob = None # P(w | z)\n self.topic_prob = None # P(z | d, w)\n\n self.number_of_documents = 0\n self.vocabulary_size = 0", "def _to_solr_document(document):\n solr_doc = collections.defaultdict(list)\n solr_doc['id'] = document.doc_id\n solr_doc['rank'] = document.rank\n solr_doc['language'] = document.language or ''\n\n for field in document.fields:\n\n lang_suffix = ''\n lang = field.language or document.language\n if lang in SUPPORTED_LANGUAGES:\n lang_suffix = '_{}'.format(lang)\n elif lang is not None:\n logger.warning('Language \"{}\" is not supported'.format(lang))\n\n if field.type == Field.Type.TEXT:\n solr_field_name = '{}_{}{}'.format(field.name, 'txt', lang_suffix)\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.HTML:\n raise InvalidRequest('Indexing HTML fields is not supported yet')\n elif field.type == Field.Type.ATOM:\n solr_field_name = '{}_{}'.format(field.name, 'atom')\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.NUMBER:\n solr_field_name = '{}_{}'.format(field.name, 'number')\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.DATE:\n # A single GAE date field goes as two Solr fields.\n # <field_name>_date is DateRange field which is used for queries\n solr_field_name = '{}_{}'.format(field.name, 'date')\n datetime_str = field.value.strftime('%Y-%m-%dT%H:%M:%SZ')\n solr_doc[solr_field_name].append(datetime_str)\n # <field_name>_date_ms is integer field which is used for sorting\n solr_field_name = '{}_{}'.format(field.name, 'date_ms')\n datetime_ms = int(time.mktime(field.value.timetuple()) * 1000)\n solr_doc[solr_field_name].append(datetime_ms)\n elif field.type == Field.Type.GEO:\n solr_field_name = '{}_{}'.format(field.name, 'geo')\n geo_str = '{},{}'.format(field.value[0], field.value[1])\n solr_doc[solr_field_name].append(geo_str)\n else:\n raise UnknownFieldTypeException(\n \"A document contains a field of unknown type: {}\".format(field.type)\n )\n\n for facet in document.facets:\n if facet.type == Facet.Type.ATOM:\n # A single GAE facet goes as two Solr fields.\n # <field_name>_atom_facet_value stores original value (not indexed).\n solr_field_name = '{}_{}'.format(facet.name, 'atom_facet_value')\n solr_doc[solr_field_name].append(facet.value)\n # <field_name>_atom_facet stores lowercased value (indexed).\n solr_field_name = '{}_{}'.format(facet.name, 'atom_facet')\n solr_doc[solr_field_name].append(facet.value.lower())\n elif facet.type == Facet.Type.NUMBER:\n solr_field_name = '{}_{}'.format(facet.name, 'number_facet')\n solr_doc[solr_field_name].append(facet.value)\n else:\n raise UnknownFacetTypeException(\n \"A document contains a facet of unknown type: {}\".format(facet.type)\n )\n\n return solr_doc", "def add_new_doc(self, document):\n self.counterOfTweets += 1\n docID = document.tweet_id\n document_dictionary = document.term_doc_dictionary # document_dictionary = {term:[[indexes],freq]}\n self.tweetTerms[docID] = list(document_dictionary.keys())\n freq_max = sorted(list(document_dictionary.values()), key=itemgetter(1), reverse=True)[0][1] # Gets the maxFreq\n postingFileName = \"\"\n\n # Go over each term in the doc\n for term in sorted(list(document_dictionary.keys())):\n\n # Deciding the type of the term\n if (str(term[0]).lower() not in self.letters): # others\n type = 1\n elif (len(term) > 1): # 'J'\n if str(term[1]).lower() not in self.letters and str(term[1]).lower() != '.': # 1400 -> 1.400K\n type = 1\n else: # strings\n type = 2\n else: # strings\n type = 2\n\n if (' ' in term): # alone entities\n if term not in self.alone_entities_dict: # fix it\n self.alone_entities_dict[term] = 0\n self.alone_entities_dict[term] += 1\n\n if (type == 1):\n if (postingFileName != \"postingOthers\"):\n postingFileName = \"postingOthers\"\n\n elif (len(term) == 1):\n if postingFileName != \"posting_\" + term.lower():\n postingFileName = \"posting_\" + term.lower()\n\n elif (term[1] == '.'):\n if postingFileName != \"posting_\" + term[0].lower():\n postingFileName = \"posting_\" + term[0].lower()\n else:\n if postingFileName != \"posting_\" + str(term[0]).lower() + str(term[1]).lower():\n postingFileName = \"posting_\" + term[0].lower() + term[1].lower()\n\n indexes_t = document_dictionary[term][0]\n freq_t = document_dictionary[term][1]\n tf = freq_t / freq_max\n\n if term not in self.inverted_idx.keys():\n self.postingFiles[postingFileName][term] = []\n self.postingFiles[postingFileName][term].append([freq_t, docID, indexes_t, tf])\n self.inverted_idx[term] = [1, freq_t, postingFileName]\n\n else:\n # update inv_dict:\n self.inverted_idx[term][0] += 1 # add another doc to the count in the inv_dict\n self.inverted_idx[term][1] += freq_t\n self.postingFiles[postingFileName][term].append([freq_t, docID, indexes_t, tf])", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def __init__(self, docs, dict_path= 'wordindex.npy'):\n super(NNModel, self).__init__()\n self.stopwords += self.additional_stopwords\n self.words = set(['OOB', 'UNK']) # OOB for out of boundary, UNK for unknown words\n self.docs = []\n\n for doc in docs:\n datum = []\n for word in self.cut_words(doc):\n self.words.add(word)\n datum.append(word)\n self.docs.append(datum)\n\n self.words = list(self.words)\n self.word2idx = dict([(self.words[i], i) for i in range(len(self.words))])\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')\n if dict_path != '': # save dict\n np.save(DATA_DIR + dict_path, self.word2idx)", "def __init__(self, termname, keys, ordinal=False):\n \n self.keys = list(set(keys))\n self.keys.sort()\n self._name = termname\n self.termname = termname\n self.ordinal = ordinal\n\n if self.ordinal:\n name = self.name\n else:\n name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]\n\n term.__init__(self, name, termname=self.termname, func=self.get_columns)", "def __init__(self, t_dict, links=[]):\n\n\t\t# do type check\n\t\tif not isinstance(t_dict, dict):\n\t\t\traise TypeError('t_dict must be of type dict')\n\t\tif not isinstance(links, list):\n\t\t\traise TypeError('links must be of type list')\n\t\tself.term_dict = copy.deepcopy(t_dict)\n\t\tself.links = copy.deepcopy(links)", "def create(init_document: 'Document') -> 'DocumentArray':", "def __init__(self, *args, **kwargs):\n self.document_links = []\n self.document_date_map = defaultdict(list)\n super().__init__(*args, **kwargs)", "def make_document_term_matrix(token_list):\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n j_indices = []\n \"\"\"Construct an array.array of a type suitable for scipy.sparse indices.\"\"\"\n indptr = array.array(str(\"i\"))\n values = array.array(str(\"i\"))\n indptr.append(0)\n\n for tokens in token_list:\n feature_counter = {}\n for token in tokens:\n feature_idx = vocabulary[token]\n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n vocabulary = dict(vocabulary)\n j_indices = np.asarray(j_indices, dtype=np.intc)\n indptr = np.frombuffer(indptr, dtype=np.intc)\n values = np.frombuffer(values, dtype=np.intc)\n\n X = scipy.sparse.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.int64)\n X.sort_indices()\n return X, vocabulary", "def __init__(self, documents):\n Classifier.__init__(self, documents)\n documents = set(documents)\n term_document_matrix = TermDocumentMatrix(documents, compute_word_vectors=False, compute_document_vectors=False)\n self.vocabulary = set(term_document_matrix.vocabulary)\n self.tree = self.get_tree(documents, self.vocabulary)", "def get_document_voc_terms(data):\r\n doc_terms = set()\r\n try:\r\n if 'MeshHeadings' in data and data['MeshHeadings']:\r\n doc_terms.update({k['text'] for k in data['MeshHeadings']})\r\n if 'Chemicals' in data and data['Chemicals']:\r\n doc_terms.update({k['NameOfSubstance'] for k in data['Chemicals']})\r\n if 'Keywords' in data and data['Keywords']:\r\n doc_terms.update({k for k in data['Keywords']})\r\n except KeyError as e:\r\n # term is not present in current doc\r\n logging.info(e)\r\n\r\n # do not keep single-word terms\r\n return {' '.join(sentence_tokenizer(d)) for d in doc_terms if re.search('[^a-zA-Z0-9]', d)}", "def test_createGlossaryByList(self):\n li = []\n li.append(['term', 'tags', 'value'])\n li.append(['foo', 'a', '1'])\n li.append(['bar', 'a, b', '2'])\n li.append(['gnark', 'a, c', '3'])\n self.g = glossary.Glossary(li)", "def __init__(self, docs, freq_threshold= 2):\n BaseDoc2Vec.__init__(self) # initialize variables\n self.stopwords += self.additional_stopwords\n self.docs = []\n self.words = set()\n\n for doc in docs: # go through documents to record all words\n words = set()\n for word in self.cut_words(doc):\n self.words.add(word)\n words.add(word)\n self.docs.append(list(words))\n self.words = list(self.words)\n\n self.dfdict = dict([(wrd, 0) for wrd in self.words])\n for doc in self.docs: \n for word in doc:\n self.dfdict[word] += 1 # calculate word frequency\n \n # exclude words that appear less than threshold\n self.words = [word for word in self.words if self.dfdict[word] > freq_threshold]\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')", "def __init__(self, array_of_citations, dict_of_keywords):\n self.array_of_citations = array_of_citations\n self.dict_of_keywords = dict_of_keywords", "def add_new_doc(self, document, end_of_corpus):\n max_tf = 0\n unique_terms_counter = 0\n document_dictionary = document.term_doc_dictionary\n # Go over each term in the doc\n for term in document_dictionary:\n try:\n # Update inverted index and posting\n if term not in self.inverted_idx:\n self.inverted_idx[term] = 1\n unique_terms_counter += 1\n else:\n self.inverted_idx[term] += 1\n if term not in self.posting_dict:\n self.posting_dict[term] = []\n\n self.posting_dict[term].append(\n (document.tweet_id, document_dictionary[term])) # key: str , value: array of tuples\n\n max_tf = max(document_dictionary[term], max_tf)\n\n except:\n\n print('problem with the following key {}'.format(term[0]))\n\n document.max_tf = max_tf\n document.unique_terms = unique_terms_counter\n self.docs_count += 1\n\n modulo = int(document.tweet_id) % 10\n self.documents[modulo][document.tweet_id] = [document.term_doc_dictionary, document.max_tf]\n\n if self.docs_count == self.DOCS_SIZE or end_of_corpus: # if we reach chunk size or end of corpus\n self.add_to_file(end_of_corpus)\n self.docs_count = 0\n self.posting_dict = {}\n\n for i in self.documents: # 0 - 9\n if self.documents[i].__len__() > 15000:\n doc = utils.load_obj(self.out + \"document\" + str(i))\n doc.update(self.documents[i])\n utils.save_obj(doc, self.out + \"document\" + str(i))\n self.documents[i] = {}", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests" ]
[ "0.66287744", "0.65560615", "0.64975613", "0.63856316", "0.63014597", "0.6102487", "0.6062859", "0.60123897", "0.59738135", "0.597112", "0.5929631", "0.58660865", "0.5838759", "0.5802222", "0.5794235", "0.5776823", "0.57711035", "0.5759174", "0.57271045", "0.5723895", "0.5695617", "0.5689399", "0.5682319", "0.5651787", "0.55844325", "0.55803424", "0.55402285", "0.5534467", "0.5532819", "0.5527768" ]
0.6789457
0
init Construct a DocumentSet with main document
def __init__(self, main_doc): if not isinstance(main_doc, Document): raise TypeError('term must be of type Document') self.main_doc = main_doc self.env_docs = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(init_document: 'Document') -> 'DocumentArray':", "def build_document(self):\n pass", "def new_document(self) -> nodes.document:\n document = super().new_document()\n document.__class__ = addnodes.document # replace the class with patched version\n\n # substitute transformer\n document.transformer = SphinxTransformer(document)\n document.transformer.set_environment(self.settings.env)\n\n # substitute reporter\n reporter = document.reporter\n document.reporter = LoggingReporter.from_reporter(reporter)\n\n return document", "def XCAFDoc_DocumentTool_Set(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Set(*args)", "def build_corpus(self):\n # #############################\n\n doc = metapy.index.Document()\n tok = metapy.analyzers.ICUTokenizer(suppress_tags=True)\n tok = metapy.analyzers.LowercaseFilter(tok)\n tok = metapy.analyzers.LengthFilter(tok, min=3, max=1000)\n tok = metapy.analyzers.Porter2Filter(tok)\n tok = metapy.analyzers.ListFilter(tok, \"lemur-stopwords.txt\", metapy.analyzers.ListFilter.Type.Reject)\n collection = -1\n\n with open(self.documents_path) as file:\n for num, line in enumerate(file):\n l = line.strip()\n c = int(l[0])\n l = l[2:]\n doc.content(l)\n tok.set_content(doc.content())\n if c != collection:\n self.documents.append([])\n collection = c\n self.documents[c].append([token for token in tok])\n self.number_of_collections = len(self.documents)\n self.number_of_documents = len(self.documents[0])\n #print(self.number_of_collections)\n #print(self.number_of_documents)\n #print(self.documents[0])", "def Init(self, *args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Init(self, *args)", "def __init__(self, doc_set):\n self.__doc_set = doc_set\n self.__preprocessor = InputPreprocessor(doc_set)", "def __init__(self, document):\n\n self._settemplates(self.onecol, self.twocol)\n assert document.type_key == 'cim.2.designing.Project'\n self.doc = document\n\n # We will populate the \"mip\" variable with the mip era\n self.mips = 'CMIP6'\n\n self.related = []\n for r in self.doc.required_experiments:\n self.related.append(esd.retrieve(r.id))", "def setUp(self):\n super(SimpleDocumentTestCase, self).setUp()\n self.document = SimpleDocument()\n self.document.description = u\"body text\"\n self.document.title = u\"document title\"", "def __init__(self, doc):\n\n self.doc = doc\n if self.doc.doi:\n self._populate()\n self.populated = True\n else:\n self.populated = False", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def setUp(self):\n super(SimpleDocumentAnnotationTestCase, self).setUp()\n self.document = SimpleDocument()\n self.document.description = u\"body text\"\n self.document.title = u\"document title\"", "def __init__(self, documents):\n Classifier.__init__(self, documents)\n documents = set(documents)\n term_document_matrix = TermDocumentMatrix(documents, compute_word_vectors=False, compute_document_vectors=False)\n self.vocabulary = set(term_document_matrix.vocabulary)\n self.tree = self.get_tree(documents, self.vocabulary)", "def __init__(self):\n self.inverted_index = OrderedDict({})\n self.i=0\n self.unique_doc_ids=set()", "def __init__(self, *args, **kwargs):\n self.document_links = []\n self.document_date_map = defaultdict(list)\n super().__init__(*args, **kwargs)", "def setUp(self):\n self.docCounter = 0", "def __init__(self, collection):\n self.collection = collection", "def Set(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Set(*args)", "def __init__(self, *args):\n this = _libsbml.new_SBMLDocument(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, docs, **kargs): # [todo]\n self.cohort = kargs.get('cohort', None)\n self.nDoc = len(docs)\n\n return", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def __init__(self, *args):\n this = _libsbml.new_MultiSBMLDocumentPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def init( self ):\n\t\treturn self", "def documents(self):\r\n return GlobalDocuments(self)", "def __init__(self, tokenizer, stemmer=None, stopwords=None):\n self.tokenizer = tokenizer\n self.stemmer = stemmer\n self.index = defaultdict(list)\n self.documents = {}\n self.__unique_id = 0 #can be used as docid\n if not stopwords:\n self.stopwords = list()\n else:\n self.stopwords = list(stopwords)", "def __init__(self, index, document_id, client=None):\n self.index = index\n self.document_id = document_id\n self.client = client or current_search_client", "def initialise(filename):\n normaliser = Normaliser()\n\n abs_colle_ref = db.collection('abstract')\n title_colle_ref = db.collection('title')\n author_colle_ref = db.collection('author')\n\n abs_param_ref = db.collection('param').document(\"abstract\")\n title_param_ref = db.collection('param').document(\"title\")\n author_param_ref = db.collection('param').document(\"author\")\n\n doc_total = 0\n total_title_word = 0\n total_abs_word = 0\n total_author_word = 0\n\n\n text = read(filename)\n\n for key in text.keys():\n doc_total = doc_total + 1\n doc_id = key.replace(\".\", \"-\")\n abstract = normaliser.normalise_text(text[key][\"abs\"])\n title = normaliser.normalise_text(text[key][\"title\"])\n author = normaliser.normalise_authors(text[key][\"authors\"])\n\n\n\n\n \"\"\"creating index\"\"\"\n\n file_title_cnt = init_index(doc_id, title, title_colle_ref)\n file_abs_cnt = init_index(doc_id, abstract, abs_colle_ref)\n file_author_cnt = init_index(doc_id, author, author_colle_ref)\n total_abs_word = total_abs_word + file_abs_cnt\n total_title_word = total_title_word + file_title_cnt\n total_author_word = total_author_word + file_author_cnt\n\n\n abs_param_ref.set(\n {\n 'total_doc_number': doc_total,\n 'total_document_length': total_abs_word\n }\n )\n\n title_param_ref.set(\n {\n 'total_doc_number': doc_total,\n 'total_document_length': total_title_word\n }\n )\n\n author_param_ref.set(\n {\n 'total_doc_number': doc_total,\n 'total_document_length': total_author_word\n }\n )\n return", "def read(self):\n dataset = Dataset()\n with open(self.corpus_file, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n document = Document()\n document.parts['abstract'] = Part(row[1])\n dataset.documents[row[0]] = document\n\n return dataset", "def __init__(self):\n self.EntireSet = []", "def read(self):\n dataset = Dataset()\n with open(self.corpus_folder, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n docid, title, abstract = row\n title = title.strip()\n abstract = abstract.strip()\n\n document = Document()\n if title:\n document.parts['title'] = Part(title)\n if abstract and abstract != 'null':\n document.parts['abstract'] = Part(abstract)\n\n dataset.documents[docid] = document\n\n return dataset" ]
[ "0.6253416", "0.60488045", "0.5982539", "0.59698325", "0.5956928", "0.59276694", "0.58543664", "0.5842508", "0.5783739", "0.5778808", "0.57767564", "0.5767244", "0.57607514", "0.57084924", "0.5701809", "0.5619672", "0.56058586", "0.56026864", "0.55988246", "0.5564519", "0.55637556", "0.55492777", "0.5546486", "0.5516598", "0.551363", "0.54971385", "0.5482788", "0.54755306", "0.54401165", "0.5439752" ]
0.6229792
1
Add Env Page append a new env_page to env_docs
def add_env_page(self, env_page): if not isinstance(env_page, Document): raise TypeError('env_page must be of type Document') self.env_docs.append(env_page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_env(self, env):\n pass", "def addPage(self, name, page, **attrs):\n page.globalConfig = self.globalConfig\n page.pageConfig['pageName'] = name\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n setattr(self,name,page) # Link page into page tree (for CherryPy)", "def add_page(self,**app_names_and_pages):\n \n for app,pages in app_names_and_pages.items():\n if os.path.exists(os.path.join(self._main,app)):\n for page in pages:\n os.makedirs(os.path.join(self._main,app,page))\n self._create_init_routes(self._main,app,page)\n else:\n print(\"that app does not exist\")\n\n self._update_add_app_or_page()", "def add_page(self, page): \n self.pages.append(Page(page))", "def handle_page(self, pagename: str, addctx: dict, templatename: str = 'page.html',\n outfilename: str | None = None, event_arg: Any = None) -> None:\n if pagename.startswith('genindex') and 'genindexentries' in addctx:\n if not self.use_index:\n return\n self.fix_genindex(addctx['genindexentries'])\n addctx['doctype'] = self.doctype\n super().handle_page(pagename, addctx, templatename, outfilename, event_arg)", "def add_env(v,s):\n if 0:\n if os.path.exists(s):\n tag = u\"GOOD\"\n else:\n tag = u\"BAD\"\n uprint(u\"{} {}\".format(tag,s))\n v.insert(0,s)", "def create_page(self):", "def addLink(self, name, alias, **attrs):\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n self.globalConfig.pageAttributes[name]['alias'] = alias", "def add_to_pr_export(self, exp_template):", "def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')", "def AddPage(self, page, info):\r\n\r\n page_info = info\r\n page_info.window = page\r\n\r\n self._pages.append(page_info)\r\n\r\n # let the art provider know how many pages we have\r\n if self._art:\r\n minMaxTabWidth = self._auiNotebook.GetMinMaxTabWidth()\r\n self._art.SetSizingInfo(self._rect.GetSize(), len(self._pages), minMaxTabWidth)\r\n \r\n return True", "def handle_page(self, pagename, addctx, templatename='page.html',\n outfilename=None, event_arg=None):\n self.t3addctx = addctx\n StandaloneHTMLBuilder.handle_page(self, pagename, addctx,\n templatename, outfilename, event_arg)", "def setup_render(\n self, options: Dict[str, Any], env: MutableMapping[str, Any]\n ) -> None:\n self.md_env = env\n self.config: Dict[str, Any] = options\n self.document: nodes.document = self.config.get(\"document\", make_document())\n self.current_node: nodes.Element = self.config.get(\n \"current_node\", self.document\n )\n self.reporter: Reporter = self.document.reporter\n # note there are actually two possible language modules:\n # one from docutils.languages, and one from docutils.parsers.rst.languages\n self.language_module_rst: ModuleType = get_language_rst(\n self.document.settings.language_code\n )\n self._level_to_elem: Dict[int, nodes.Element] = {0: self.document}", "def add_intro(self):\n page = lambda x: pkgutil.get_data(\n 'pyscp_ebooks',\n 'resources/wanderers_library/{}.xhtml'.format(x)).decode('UTF-8')\n self.add_page('Cover Page', page('cover'))\n self.add_page('Introduction', page('intro'))\n license = parser.bs(page('license'))\n license.find(class_='footer').string = arrow.now().format('YYYY-MM-DD')\n self.add_page('License', license.div.prettify())\n self.add_page('Title Page', page('title'))", "def html_page_context(app, pagename, templatename, context, doctree):\n site = context[\"SITEMAP_BASE_URL\"]\n version = context[\"version\"]\n app.sitemap_links.add(f\"{site}{version}/{pagename}.html\")", "def setup_page(self):\n raise NotImplementedError", "def _build_page(page, config, site_navigation, env, dirty=False):\n\n # Run the `pre_page` plugin event\n page = config['plugins'].run_event(\n 'pre_page', page, config=config, site_navigation=site_navigation\n )\n\n page.read_source(config=config)\n\n # Run `page_markdown` plugin events.\n page.markdown = config['plugins'].run_event(\n 'page_markdown', page.markdown, page=page, config=config, site_navigation=site_navigation\n )\n\n page.render(config, site_navigation)\n\n # Run `page_content` plugin events.\n page.content = config['plugins'].run_event(\n 'page_content', page.content, page=page, config=config, site_navigation=site_navigation\n )\n\n context = get_context(site_navigation, config, page)\n\n # Allow 'template:' override in md source files.\n if 'template' in page.meta:\n template = env.get_template(page.meta['template'])\n else:\n template = env.get_template('main.html')\n\n # Run `page_context` plugin events.\n context = config['plugins'].run_event(\n 'page_context', context, page=page, config=config, site_navigation=site_navigation\n )\n\n # Render the template.\n output_content = template.render(context)\n\n # Run `post_page` plugin events.\n output_content = config['plugins'].run_event(\n 'post_page', output_content, page=page, config=config\n )\n\n # Write the output file.\n if output_content.strip():\n utils.write_file(output_content.encode('utf-8'), page.abs_output_path)\n else:\n log.info(\"Page skipped: '{}'. Generated empty output.\".format(page.title))", "def add_new_section(self, name, context=...):\n ...", "def add_page(self, title, wiz_page=None):\n page = Page(self, self._title, title, wiz_page)\n self._pages.append(page)\n return page", "def register_environment(env_info):\n prospective_env_info = VirtualEnvInfo(env_info)\n for _env in virtualenvs:\n if _env == prospective_env_info:\n _env.merge(prospective_env_info)\n else:\n virtualenvs.append(prospective_env_info)\n\n if len(virtualenvs) == 0:\n virtualenvs.append(prospective_env_info)", "def add_page(self, edition_id, page): \n journal = self.editions.get(edition_id, None)\n if journal == None: \n raise ValueError(\"This edition of the Journal Collection does not exist\")\n else: \n journal.add_page(page)", "def add_to_environment(v):\n return \"Environment='{}'\".format(\n \"\\nEnvironment=\".join(\n \"{}={}\".format(k, \"\".join(map(str, v))) for k, v in iteritems(v)\n )\n if isinstance(v, dict)\n else v\n )", "def setup_page(self):\r\n raise NotImplementedError", "def append_common(envin, content):\n # This is the original libconfig.h. However, in case somebody (like\n # pbdagcon) builds libpbdata in-place, we need to drop a copy of\n # libconfig.h wherever pbdata is actually built, which we will not\n # know until later. This can all be cleared up later, when we are\n # more clear about where things are built.\n libconfig_h = os.path.abspath(os.path.join(os.getcwd(), 'libconfig.h'))\n content += \"\"\"\nLIBCONFIG_H:=%s\n# Use PREFIX dir, if available.\nINCLUDES += ${PREFIX_INC}\nLIBS += ${PREFIX_LIB}\n\"\"\"%libconfig_h\n env = dict(envin)\n # Some extra defs.\n if 'PREFIX' in envin:\n PREFIX = envin['PREFIX']\n setenv(env, 'PREFIX_INC', os.path.join(PREFIX, 'include'))\n setenv(env, 'PREFIX_LIB', os.path.join(PREFIX, 'lib'))\n poss = [\n 'SH_LIB_EXT',\n 'EXTRA_LDFLAGS',\n 'PREFIX_LIB', 'PREFIX_INC',\n ]\n vals = ['%-20s := %s' %(k, v) for k,v in sorted(env.items()) if k in poss]\n return '\\n'.join([''] + vals + ['']) + content", "def __construct_mail_env(self, env):\n\n if isinstance(env, dict):\n for k, v in env.items():\n self.env_content += \"\"\"\n <tr><td>{}</td><td>{}</td></tr>\n \"\"\".format(k, v)\n return self.env_content", "def writeLocalEnv(self):\n \n # open file\n f = open(self.installPath + \"/build_env.sh\", 'w')\n \n # write to file\n f.write( 80*'#' + os.linesep + \"# Environment script generated by ilcsoft-install on \" + time.ctime() + os.linesep )\n f.write( \"# for \" + self.name + \" located at [ \" + self.installPath + \" ]\" + os.linesep + 80*'#' + os.linesep )\n\n # global environment variables\n if( len( self.parent.env ) > 0 ):\n f.write( 2*os.linesep + \"#\" + 80*'-' + os.linesep + \"#\" + 5*' ' + \"Global Environment Variables\" + os.linesep \\\n + \"#\" + 80*'-' + os.linesep )\n for k, v in self.parent.env.iteritems():\n f.write( \"export \" + str(k) + \"=\\\"\" + str(v) + \"\\\"\" + os.linesep )\n \n\n # write environment recursively to file\n self.writeEnv(f, [])\n \n\n f.write( \"# --- additional comands ------- \" + os.linesep ) \n print \"\\n ----- adding additional commands to build_env.sh : \\n \"\n for c in self.envcmds:\n f.write( c + os.linesep ) \n print \"\\n ----- adding additional command to build_env.sh \" + c + \"\\n\"\n\n # close file\n f.close()", "def page_setup(self, page_setup):\n\n self.container['page_setup'] = page_setup", "def add_footprint(self, config):\n logging.debug(\"add_footprint entered\")\n notify(\"Adding footprint...\")\n container = cf.get_container(self.container_name)\n try:\n index = self.container.get_object(\"index.json\")\n except pyrax.exceptions.NoSuchObject, e:\n print \"Creating empty index...\"\n logging.info(\"Creating empty index\")\n self.save()\n index = self.container.get_object(\"index.json\")\n \n index = index.fetch()\n logging.info(\"loaded index %s\" % index)\n logging.debug(config)\n logging.debug(self.footprints)\n self.footprints[config['footprint']] = config\n notify(\"Saving environment\")\n self.save()\n # update the containers with the footprint metadata\n container_name = \"%s-metadata\" % config['footprint']\n try:\n fpcontainer = cf.get_container(container_name)\n except pyrax.exceptions.NoSuchContainer, e:\n logging.info(\"Container '%s' doesn't exist. Creating.\" % container_name)\n fpcontainer = cf.create_container(container_name)\n filename = \"index.json\"\n content = json.dumps(config)\n cf.store_object(fpcontainer, filename, content)\n logging.info(\"Footprint config %s saved\" % container_name)\n notify(\"Footprint config %s saved\" % container_name)\n return True", "def add_page(self, url, title, content, site, headings='', commit=False):\n content = unicode(content)\n hash = \"%s:%s\" % (site, hashlib.sha1(content.encode('utf-8')).hexdigest())\n \n if hash in self._unique_data['hash'] or url in self._unique_data['url']:\n print \"Duplicate data in batch detected\"\n self.commit()\n \n self._unique_data['hash'].append(hash)\n self._unique_data['url'].append(url)\n \n self.writer.update_document(title=force_unicode(title), \n content=force_unicode(content), \n url=force_unicode(url), \n site=force_unicode(site), \n content_hash=force_unicode(hash),\n headings=force_unicode(headings))\n self.batch_count += 1\n self.optimize_count += 1\n if commit or self.batch_count >= self.batch_size:\n self.commit()", "def InsertPage(self, page, info, idx):\r\n \r\n page_info = info\r\n page_info.window = page\r\n\r\n if idx >= len(self._pages):\r\n self._pages.append(page_info)\r\n else:\r\n self._pages.insert(idx, page_info)\r\n\r\n # let the art provider know how many pages we have\r\n if self._art:\r\n minMaxTabWidth = self._auiNotebook.GetMinMaxTabWidth()\r\n self._art.SetSizingInfo(self._rect.GetSize(), len(self._pages), minMaxTabWidth)\r\n \r\n return True" ]
[ "0.5903957", "0.5708109", "0.5389904", "0.5385481", "0.52170116", "0.5199296", "0.51268643", "0.51034814", "0.5072406", "0.50699824", "0.49988046", "0.49757445", "0.4973589", "0.49586692", "0.49433592", "0.4912121", "0.4901298", "0.48945105", "0.4888683", "0.48753846", "0.48752305", "0.4860848", "0.48378262", "0.48341155", "0.48284134", "0.48083693", "0.47990033", "0.4772119", "0.47549638", "0.47465524" ]
0.83140147
0
Count term in environment calculate idf of a term in main doc
def __count_term_in_env(self, term): # type check if not isinstance(term, str): raise TypeError('term must be of type str') total_cnt = float(len(self.env_docs)) + 1.0 if total_cnt == 1.0: return 1.0 cnt = 1.0 for doc in self.env_docs: if term in doc.term_dict: cnt += 1.0 return math.log(total_cnt / cnt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def term_idf(self, term):\n idf = math.log(2 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term))\n return idf", "def idf(term, corpus):\n corpus_size = len(corpus)\n docs_with_term = 0\n\n for document in corpus:\n if term in document:\n docs_with_term += 1\n\n #add 1 to docs_with_term to account for terms that don't occur in the corpus\n #so that a division by zero doesn't occur\n return math.log( corpus_size / (docs_with_term+1) )", "def calc_idf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n idf = {}\r\n for term in terms:\r\n term_count = 0\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1\r\n if term in doc:\r\n term_count += 1\r\n idf[term] = doc_count/term_count\r\n return idf", "def get_idf(term, documents):\n\n number_of_docs = len(documents)\n documents_containing_term = len([document for document in documents if term in document])\n\n idf = math.log10(number_of_docs / documents_containing_term)\n\n return round(idf, 5)", "def tfidf_term_in_document(self, term, document):\n tf = self.count_term_in_document(term, document)\\\n / self.count_term_in_document(ANY, document)\n idf = math.log(1 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term))\n return tf * idf", "def count_term_in_document(self, term, document):\n doc = self.get_document(document)\n for docterm, value in doc.get_terms():\n if docterm == term:\n return value\n return 0", "def idf(self, searcher, fieldnum, text):\n \n cache = self._idf_cache\n term = (fieldnum, text)\n if term in cache: return cache[term]\n \n df = searcher.doc_frequency(fieldnum, text)\n idf = log(searcher.doc_count_all() / (df + 1)) + 1.0\n cache[term] = idf\n return idf", "def get_tf(term, document):\n\n term_list = [term.lower() for term in document.split()]\n num_of_words_in_doc = len(document.split())\n term_count_in_doc = term_list.count(term)\n\n return term_count_in_doc / num_of_words_in_doc", "def calcCountDict(TFdict):\n\n countDict = {}\n\n for doc in TFdict:\n for term in doc:\n if term in countDict:\n countDict[term] +=1\n else:\n countDict[term] = 1\n\n return countDict", "def statistic_tfidf(self):\n\t\t# calculate df-idf for all words\n\t\tcount_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}\n\t\t# sort them by df and idf\n\t\treturn sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)", "def idf(self, term):\n # idf values are cached for performance.\n idf = self._idf_cache.get(term)\n if idf is None:\n matches = len([True for text in self._texts if term in text])\n if len(self._texts) == 0:\n raise ValueError(\"IDF undefined for empty document collection\")\n idf = log(len(self._texts) / matches) if matches else 0.0\n self._idf_cache[term] = idf\n return idf", "def count_terms(equat_orig):\n\tterms = 0\n\tfor pow_group in equat_orig:\n\t\tif pow_group:\n\t\t\tfor _ in pow_group:\n\t\t\t\tterms += 1\n\tprint(f'\\033[1;95mTerms in the polynom: \\033[0m{terms}')", "def count_idf(self):\n idf = dict.fromkeys(range(self.instances.shape[1]), 0) # initialize for all features\n num_docs = self.instances.shape[0]\n feature_counts = self.count_document_frequency()\n for feature in feature_counts.keys():\n idf[feature] = math.log((num_docs / feature_counts[feature]), 10) if feature_counts[feature] > 0 else 0\n return idf", "def compute_idfs(documents):\n idf={}\n words={}\n # idf= no.of doc/no. of doc in which it lies\n for doc in documents:\n for wrd in set(documents[doc]):\n if wrd.lower() not in words:\n words[wrd.lower()]=0\n words[wrd.lower()]+=1 \n for word in words:\n idf[word]=len(documents)/words[word]\n return idf", "def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf", "def tf(self, term, text):\n return text.count(term) / len(text)", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def compute_IDF(doc_info):\n number_of_docs = len(doc_info)\n idf_table = {}\n\n for idx, doc in enumerate(doc_info):\n for word in doc['freq_dict']:\n if word not in idf_table:\n idf_table[word] = 1\n else:\n idf_table[word] += 1\n\n for word in idf_table.keys():\n idf_table[word] = math.log(number_of_docs/idf_table[word])\n\n return idf_table", "def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency", "def getNumberTerms(content): \n return Counter(getTerms(content))", "def calc_idf_two(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n idf = {}\r\n for term in terms:\r\n term_count = 0\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1\r\n if term in doc:\r\n term_count += 1\r\n idf[term] = max(0, ((doc_count-term_count)/term_count))\r\n return idf", "def augmented_term_fequency(term,tokens):\n\tterm = processes_and_tokenize(term)[0] #make sure term is in correct form\n\n\tmax_count = max([tokens.count(t) for t in tokens])\n\treturn tokens.count(term)/max_count", "def __query_tf(query, term):\n count = 0\n for q_term in query.split():\n if term == q_term:\n count += 1\n return count", "def tf(self, term: str, doc_path: str) -> int:\n return self.tfs[doc_path][term]", "def imp(term,word_dict,number_of_docs,id):\n\n with open('term_freq.txt', 'rb') as file:\n term_freq = pickle.loads(file.read())\n\n #print(term_freq)\n\n if id+1 in term_freq[term]:\n #print(term_freq[term][id])\n return term_freq[term][id+1]*word_dict[term][1]#idf(term,number_of_docs,index_list)\n else:\n return 0.0", "def freq():", "def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next", "def computeTFIDF(self):\n for word in self.dictionary:\n numOfAppearance = self.dictionary[word].getDocumentFrequency()\n idf = math.log( (self.MAX_RATING) / (numOfAppearance), 10 )\n self.dictionary[word].setTFIDF(idf)", "def setCounts(self):\n N=len(self.y)\n self.counts=np.zeros(len(self.y))\n self.idf=np.zeros(len(self.y))\n for i in range(0,len(self.counts)):\n for word in self.qtext:\n wc=self.atext[i].count(word)\n self.counts[i]+=wc\n if wc>0:\n d=0\n for sentence in self.atext:\n if word in sentence:\n d+=1\n self.idf[i]+=wc*np.log(N/d)" ]
[ "0.729734", "0.71593374", "0.7090254", "0.6939883", "0.6922164", "0.66782546", "0.6643847", "0.65991753", "0.6548294", "0.6533882", "0.6521299", "0.6515126", "0.6509364", "0.65010506", "0.64998555", "0.6493106", "0.64863795", "0.6480846", "0.6379101", "0.6369765", "0.63460505", "0.6333923", "0.6263473", "0.62493217", "0.6245623", "0.621642", "0.6204669", "0.6200117", "0.6199041", "0.6197454" ]
0.7381723
0
Statistic TF calculate and sort terms in main doc by tf
def statistic_tf(self): return sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def compute_TF(doc_info):\n tf_scores = []\n\n for idx, doc in enumerate(doc_info):\n tf_score_table = {}\n for word in doc['freq_dict'].keys():\n count = doc['freq_dict'][word]\n tf_score_table[word] = count/doc_info[idx]['doc_length']\n tf_scores.append(tf_score_table)\n\n return tf_scores", "def statistic_tfidf(self):\n\t\t# calculate df-idf for all words\n\t\tcount_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}\n\t\t# sort them by df and idf\n\t\treturn sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)", "def tf(word, document):\n return freq(word,document) / wordCount(document)", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def computeTF(self):\n for word in self.dictionary:\n self.dictionary[word].setTF(self.getTotalTerms())", "def get_tf(term, document):\n\n term_list = [term.lower() for term in document.split()]\n num_of_words_in_doc = len(document.split())\n term_count_in_doc = term_list.count(term)\n\n return term_count_in_doc / num_of_words_in_doc", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def compute_tfs(descriptions):\n\n # Initialize a dictionary that maps words to their IDF values.\n tf_dict = {}\n\n # Loop over game descriptions\n for game_name in descriptions:\n\n # Loop over words in each document\n for word in descriptions[game_name]:\n\n # continue if the word was already processed in\n # previous documents\n if word in tf_dict:\n continue\n\n # Count number of documents that contain the word\n word_count = 0\n for game_name in descriptions:\n if word in descriptions[game_name]:\n word_count += 1\n\n # add tf_score to tf_dict\n tf_dict[word] = word_count\n\n return tf_dict", "def compute_tf(document):\r\n _len = len(document)\r\n tf_dict = {}\r\n for token in document:\r\n tf_dict.setdefault(token, 0.0)\r\n tf_dict[token] += 1 / _len\r\n\r\n return tf_dict", "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def calc_tf(self, level) -> None:\n if level == 't':\n term_idxs = self._load_term_idxs('t')\n path_in = self.path_token_idx_corpus\n path_out = self.path_tf_tokens\n # contains = self.load_contains(self.path_token_contains)\n elif level == 'l':\n term_idxs = self._load_term_idxs('l')\n path_in = self.path_lemma_idx_corpus\n path_out = self.path_tf_lemmas\n # contains = self.load_contains(self.path_lemma_contains)\n else:\n raise Exception('Error! Level not know!')\n\n tf = {}\n for doc_idx, doc in enumerate(get_docs(path_in)):\n tf[doc_idx] = {}\n tf_doc = tf[doc_idx]\n for sent in doc:\n for term_idx in sent:\n if term_idx in term_idxs:\n if term_idx in tf_doc:\n tf_doc[term_idx] += 1\n else:\n tf_doc[term_idx] = 1\n # *** Code for compound counting ***\n # # add counts for all terms contained\n # for tc_idx in contains[term_idx]:\n # if tc_idx in tf_doc:\n # tf_doc[str(tc_idx)] += 1\n # else:\n # tf_doc[str(tc_idx)] = 1\n\n # print(doc_idx, lemma_idx)\n # print(type(doc_idx), type(lemma_idx))\n # tf[doc_id][lemma_idx] += 1\n # tf_doc = tf[doc_idx]\n # tf_doc[lemma_idx]\n # ***\n with open(path_out, 'w', encoding='utf8') as f:\n json.dump(tf, f)\n\n self._docs_processed = 0", "def rankDocuments_itp(terms, docs, index, tf, itp): \n\n\tdocVectors=defaultdict(lambda: [0]*len(terms)) \t\n\tqueryVector=[0]*len(terms) \n\n\t# compute the norm for the query tf\n\tquery_terms_count = collections.Counter(terms) \n\t\n\tquery_norm = la.norm(list(query_terms_count.values()))\n\t\n\tfor termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n\t\t\tif term not in index:\n\t\t\t\t\tcontinue \n\t\t\t## Compute tf*idf(normalize tf as done with documents)\n\t\t\tqueryVector[termIndex]=query_terms_count[term]/query_norm * itp[term] \n\t\t\t# Generate docVectors for matching docs\n\t\t\tfor docIndex, (doc, postings) in enumerate(index[term]):\n \n\t\t\t\t\tif doc in docs:\n\t\t\t\t\t\t\tdocVectors[doc][termIndex]=tf[term][docIndex] * itp[term] \n\t\n\tdocScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n\tdocScores.sort(reverse=True)\n\tresultDocs=[x[1] for x in docScores]\n\n\treturn resultDocs", "def tfidf(t, h):\n h[0] = h[0].lower()\n t[0] = t[0].lower()\n score = 0\n for word in t:\n word = word.strip()\n if word in h:\n if word in config.doc_freq:\n score += (float(config.total_sentences) - config.word_freq[word]) / config.total_sentences\n else:\n score += 1\n return score", "def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def termFreq(self):\n if self._tf is None:\n nwords = len(self)\n self._tf = []\n for t in self.freqDist:\n self._tf.append( ( t[0], t[1] / nwords ) )\n return self._tf", "def calculate_tf(self, book_dict, term):\n term_frequency = 0\n try:\n term_frequency = (\n book_dict[\"SanitizedText\"][term] / book_dict[\"TotalNoOfTerms\"]\n )\n except KeyError:\n print(\"Key Error, Term doesnt exist\")\n return 0\n except ZeroDivisionError:\n print(\"tf division by zero!\")\n return 0\n return term_frequency", "def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next", "def create_index_tfidf(lines, numDocuments):\n \n index=collections.defaultdict(list)\n tf=collections.defaultdict(list) #term frequencies of terms in documents (documents in the same order as in the main index)\n df=collections.defaultdict(int) #document frequencies of terms in the corpus\n idf=collections.defaultdict(float)\n with Bar('Creating tf-idf index', max=len(lines)) as bar:\n for key in lines:\n page_id = key \n terms = getTerms(lines[key]) \n\n ## create the index for the **current page** and store it in termdictPage\n ## termdictPage in form ==> { ‘term1’: [currentdoc, [list of positions]], ...,‘termn’: [currentdoc, [list of positions]]}\n\n termdictPage={}\n\n for position, term in enumerate(terms): \n try:\n # if the term is already in the dict append the position to the corrisponding list\n termdictPage[term][1].append(position) \n except:\n # Add the new term as dict key and initialize the array of positions and add the position\n termdictPage[term]=[page_id, array('I',[position])] \n\n #normalize term frequencies\n norm=0\n for term, posting in termdictPage.items(): \n # posting ==> [currentdoc, [list of positions]] \n norm+=len(posting[1])**2\n norm=math.sqrt(norm)\n\n\n #calculate the tf(dividing the term frequency by the above computed norm) and df weights\n for term, posting in termdictPage.items(): \n # append the tf for current term (tf = term frequency in current doc/norm)\n tf[term].append(np.round(len(posting[1])/norm,4)) ## SEE formula (1) above\n #increment the document frequency of current term (number of documents containing the current term)\n df[term] += 1 \n\n #merge the current page index with the main index\n for termpage, postingpage in termdictPage.items():\n index[termpage].append(postingpage)\n\n # Compute idf following the formula (3) above. HINT: use np.log\n bar.next()\n for term in df:\n idf[term] = np.round(np.log(float(numDocuments/df[term])),4)\n \n return (index, tf, df, idf)", "def main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n counts = defaultdict(lambda: 0)\n total_sentences = 0\n for filename in sys.stdin:\n filename = filename.strip()\n reader = tf.python_io.tf_record_iterator(filename)\n n_sentences = 0\n for record in reader:\n x = tf.train.Example()\n x.ParseFromString(record)\n tokens = [int(i) for i in x.features.feature[FLAGS.field].int64_list.value]\n counts[len(tokens)] += 1\n n_sentences += 1\n tf.logging.info(\"Read %d sentences from %s.\", n_sentences, filename)\n total_sentences += n_sentences\n\n tf.logging.info(\"Statistics for %s:\", FLAGS.field)\n sorted_counts = [(l, f) for l, f in counts.iteritems()]\n sorted_counts.sort()\n acc = 0\n for l, f in sorted_counts:\n acc += f\n tf.logging.info(\"<=%d: %d/%d (%.3f%%)\", l, acc, total_sentences, 100.0 * acc / total_sentences)", "def tf_idf(self):\n all_tf_idf = {}\n total_docs = len(self.lemma_tokens)\n for zettel in self.lemma_tokens:\n total_words = len(zettel)\n count_dict = self.create_count_dictionary(zettel)\n for word in zettel:\n # tf = (count of given word for a given zettel) / (total number of words for given zettel)\n tf = count_dict[word[0]] / total_words\n # idf = (total number of documents) / (number of documents containing word)\n idf = total_docs / self.doc_count_dict[word[0]]\n tf_idf_value = tf * idf\n all_tf_idf[word[0]] = tf_idf_value\n return all_tf_idf", "def __ranking_function(self, doc, query_tokens):", "def search_tf_idf(query, index, tf, idf):\n\tquery = getTerms(query)\n\tdocs = set()\n\tfor term in query:\n\t\t\ttry:\n\t\t\t\t\t# store in termDocs the ids of the docs that contain \"term\"\n\t\t\t\t\ttermDocs = [posting[0] for posting in index[term]]\n\n\t\t\t\t\t# docs = docs Union termDocs\n\t\t\t\t\tdocs |= set(termDocs)\n\t\t\texcept:\n\t\t\t\t\t# term is not in index\n\t\t\t\t\tpass\n\tdocs = list(docs)\n\tranked_docs = rankDocuments_tf_idf(query, docs, index, idf, tf)\n\treturn ranked_docs", "def computeTFIDF(self):\n for word in self.dictionary:\n numOfAppearance = self.dictionary[word].getDocumentFrequency()\n idf = math.log( (self.MAX_RATING) / (numOfAppearance), 10 )\n self.dictionary[word].setTFIDF(idf)", "def tf1(self, collection_stats, tf, df, doclen):\r\n return np.sum(tf, axis=0)", "def get_tf_idf(term, document, documents):\n\n tf_idf = get_tf(term, document) * get_idf(term, documents)\n\n return round(tf_idf, 5)", "def rank_links(tf_idf_table, query_terms, links):\n \n tf = {}\n for w in query_terms:\n f = query_terms.count(w)\n tf[w] = f\n\n q_tf_idf = {}\n for term in tf:\n # if the query term is found in files\n if tf_idf_table.has_key(term):\n q_tf_idf[term] = tf.get(term) # * log(N/1)\n else:\n # if the query term is NOT found in files, set IDF to 0\n q_tf_idf[term] = 0\n\n # score of all docs for this query \n doc_vals = {}\n\n # Wiq denominator in CosSim\n DWiq = 0\n for t in tf_idf_table: \n\n DWiq = q_tf_idf.get(t)\n # if the term is not in query, ignore\n if DWiq == None:\n continue\n\n\n #print(\"Term: %s \\t\\t Query TF-IDF: %d\" % (t, q_tf_idf.get(t)))\n\n idf_row = tf_idf_table.get(t)\n # if the query term is in our corpus\n if idf_row != None:\n #print(idf_row)\n\n # get the document frequency\n df = float(len(idf_row))\n #print(\"DF: %d\" % (df))\n\n # Wij denominator in CosSim\n DWij = 0\n\n # Numerator in CosSim\n Njq = 0\n\n # calculate values of each document specific\n\n for doc in idf_row:\n #print(doc)\n\n # The \"df\" should not be processed\n if doc == \"df\":\n continue\n\n # skip any link that are not relevant\n try:\n _ = links.index(doc)\n except:\n continue\n\n #print(\"Doc ID: %s \\tTF: %d\" % (doc, idf_row.get(doc)))\n\n DWij = idf_row.get(doc)\n\n #Njq = q_tf_idf.get(t) * idf_row.get(doc)\n\n if doc_vals.has_key(doc):\n vals = doc_vals.get(doc)\n vals[\"DWiq\"] += pow(DWiq, 2)\n vals[\"DWij\"] += pow(DWij, 2)\n vals[\"NWjq\"] += DWij * DWiq\n\n doc_vals[doc] = vals\n else:\n vals = {}\n vals[\"DWiq\"] = pow(DWiq, 2)\n vals[\"DWij\"] = pow(DWij, 2)\n vals[\"NWjq\"] = DWij * DWiq\n\n doc_vals[doc] = vals\n\n #print(doc_vals)\n\n # Calculate the CosSim value\n doc_score = {}\n for doc in doc_vals:\n #print(doc)\n vals = doc_vals.get(doc)\n #print(vals)\n #n = vals.get(\"NWjq\")\n #d = float(pow(vals.get(\"DWij\") * vals.get(\"DWiq\"),0.5))\n #print(n)\n #print(d) \n #print(float(n/float(d)))\n doc_score[doc] = float(vals.get(\"NWjq\"))/float(pow(vals.get(\"DWij\") * vals.get(\"DWiq\"),0.5))\n #print(doc_score[doc])\n\n\n #print(doc_score)\n\n sorted_by_score = sorted(doc_score.items(), key=operator.itemgetter(1), reverse=True)\n #print(sorted_by_score)\n\n sorted_score = collections.OrderedDict()\n for url, score in sorted_by_score:\n sorted_score[url] = score\n\n #print(sorted_score)\n return sorted_score", "def relevance_feedback_exp(vec_docs, vec_queries, sim, tfidf_model, n=10):\n\n alpha = 0.1\n beta = 0.9\n gamma = 1.4\n closest = 5\n\n vec_docs = vec_docs / np.sum(vec_docs, axis=1)\n\n thesaurus = np.dot(np.transpose(vec_docs), vec_docs)\n \n for epoch in range(2):\n vec_queries_new = np.zeros((vec_queries.shape))\n\n for q in range(vec_queries.shape[0]):\n old_query = vec_queries[q, :].reshape(1, -1)\n\n highest = np.argmax(old_query)\n highest_value = np.max(old_query)\n\n closest_words = np.argsort(thesaurus[highest, :])[:, -closest:]\n closest_words = np.array(closest_words)[0]\n \n for idx in range(closest):\n old_query[:, closest_words[idx]] = highest_value\n\n old_query = old_query.reshape(1, -1)\n \n r = sim[:, q]\n sorted_ = np.argsort(r)\n \n first_n = sorted_[:n]\n D_irrel = vec_docs[first_n, :]\n\n last_n = sorted_[-n:]\n D_rel = vec_docs[last_n, :]\n\n centroid_rel = get_centroid(D_rel)\n centroid_irrel = get_centroid(D_irrel)\n\n new_query = (alpha/n) * old_query + (beta/n) * centroid_rel - (gamma/n) * centroid_irrel\n new_query = new_query.clip(min=0)\n vec_queries_new[q, :] = new_query\n\n rf_sim = cosine_similarity(vec_docs, vec_queries_new)\n vec_queries = vec_queries_new\n sim = rf_sim\n \n return rf_sim" ]
[ "0.7448172", "0.7352905", "0.7274512", "0.719932", "0.70984966", "0.70313746", "0.69458485", "0.68545496", "0.66986537", "0.6692026", "0.6671128", "0.662987", "0.6564009", "0.65638477", "0.65546054", "0.6543071", "0.6411876", "0.6403338", "0.6386789", "0.631672", "0.62851524", "0.6216851", "0.6206709", "0.6198173", "0.6194912", "0.6194776", "0.61918473", "0.61457163", "0.61452943", "0.61362237" ]
0.805907
0
Statistic TFIDF calculate and sort terms in main doc by tfidf
def statistic_tfidf(self): # calculate df-idf for all words count_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict} # sort them by df and idf return sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def tfidf(t, h):\n h[0] = h[0].lower()\n t[0] = t[0].lower()\n score = 0\n for word in t:\n word = word.strip()\n if word in h:\n if word in config.doc_freq:\n score += (float(config.total_sentences) - config.word_freq[word]) / config.total_sentences\n else:\n score += 1\n return score", "def statistic_tf(self):\n\t\treturn sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)", "def search_tf_idf(query, index, tf, idf):\n\tquery = getTerms(query)\n\tdocs = set()\n\tfor term in query:\n\t\t\ttry:\n\t\t\t\t\t# store in termDocs the ids of the docs that contain \"term\"\n\t\t\t\t\ttermDocs = [posting[0] for posting in index[term]]\n\n\t\t\t\t\t# docs = docs Union termDocs\n\t\t\t\t\tdocs |= set(termDocs)\n\t\t\texcept:\n\t\t\t\t\t# term is not in index\n\t\t\t\t\tpass\n\tdocs = list(docs)\n\tranked_docs = rankDocuments_tf_idf(query, docs, index, idf, tf)\n\treturn ranked_docs", "def compute_tfidf(self, movies):\n term2index = {} # {term: index}\n term2doc_cnt = defaultdict(int) # {term: document count}\n num_terms = 0\n for movie in movies:\n term_set = set()\n terms = movies[movie].split()\n for term in terms:\n if term not in term_set:\n term2doc_cnt[term] += 1\n term_set.add(term)\n\n if term not in term2index:\n term2index[term] = num_terms\n num_terms += 1\n\n # Compute TF (term frequency)\n self.tf = {} # {movie_id: tf}\n for movie in movies:\n self.tf[movie] = np.zeros(num_terms)\n terms = movies[movie].split()\n for term in terms:\n self.tf[movie][term2index[term]] += 1\n \n # Compute IDF (inverse document frequency)\n self.idf = np.zeros(num_terms)\n for term in term2doc_cnt:\n self.idf[term2index[term]] = log(len(movies) / term2doc_cnt[term])", "def computeTFIDF(self):\n for word in self.dictionary:\n numOfAppearance = self.dictionary[word].getDocumentFrequency()\n idf = math.log( (self.MAX_RATING) / (numOfAppearance), 10 )\n self.dictionary[word].setTFIDF(idf)", "def get_tf_idf(term, document, documents):\n\n tf_idf = get_tf(term, document) * get_idf(term, documents)\n\n return round(tf_idf, 5)", "def tf_idf(self):\n all_tf_idf = {}\n total_docs = len(self.lemma_tokens)\n for zettel in self.lemma_tokens:\n total_words = len(zettel)\n count_dict = self.create_count_dictionary(zettel)\n for word in zettel:\n # tf = (count of given word for a given zettel) / (total number of words for given zettel)\n tf = count_dict[word[0]] / total_words\n # idf = (total number of documents) / (number of documents containing word)\n idf = total_docs / self.doc_count_dict[word[0]]\n tf_idf_value = tf * idf\n all_tf_idf[word[0]] = tf_idf_value\n return all_tf_idf", "def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next", "def create_index_tfidf(lines, numDocuments):\n \n index=collections.defaultdict(list)\n tf=collections.defaultdict(list) #term frequencies of terms in documents (documents in the same order as in the main index)\n df=collections.defaultdict(int) #document frequencies of terms in the corpus\n idf=collections.defaultdict(float)\n with Bar('Creating tf-idf index', max=len(lines)) as bar:\n for key in lines:\n page_id = key \n terms = getTerms(lines[key]) \n\n ## create the index for the **current page** and store it in termdictPage\n ## termdictPage in form ==> { ‘term1’: [currentdoc, [list of positions]], ...,‘termn’: [currentdoc, [list of positions]]}\n\n termdictPage={}\n\n for position, term in enumerate(terms): \n try:\n # if the term is already in the dict append the position to the corrisponding list\n termdictPage[term][1].append(position) \n except:\n # Add the new term as dict key and initialize the array of positions and add the position\n termdictPage[term]=[page_id, array('I',[position])] \n\n #normalize term frequencies\n norm=0\n for term, posting in termdictPage.items(): \n # posting ==> [currentdoc, [list of positions]] \n norm+=len(posting[1])**2\n norm=math.sqrt(norm)\n\n\n #calculate the tf(dividing the term frequency by the above computed norm) and df weights\n for term, posting in termdictPage.items(): \n # append the tf for current term (tf = term frequency in current doc/norm)\n tf[term].append(np.round(len(posting[1])/norm,4)) ## SEE formula (1) above\n #increment the document frequency of current term (number of documents containing the current term)\n df[term] += 1 \n\n #merge the current page index with the main index\n for termpage, postingpage in termdictPage.items():\n index[termpage].append(postingpage)\n\n # Compute idf following the formula (3) above. HINT: use np.log\n bar.next()\n for term in df:\n idf[term] = np.round(np.log(float(numDocuments/df[term])),4)\n \n return (index, tf, df, idf)", "def compute_TF(doc_info):\n tf_scores = []\n\n for idx, doc in enumerate(doc_info):\n tf_score_table = {}\n for word in doc['freq_dict'].keys():\n count = doc['freq_dict'][word]\n tf_score_table[word] = count/doc_info[idx]['doc_length']\n tf_scores.append(tf_score_table)\n\n return tf_scores", "def _tfidf(term_frequency: int, document_frequency: int, document_count: int) -> float:\n if term_frequency == 0:\n return 0\n else:\n tf = 1 + np.log(term_frequency)\n idf = np.log(document_count / document_frequency)\n return tf * idf", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def tfidf(self):\n\t\ttry:\n\t\t\tself.tfidf_df = tfidf(self.bagofwords)\n\t\texcept AttributeError:\n\t\t\tself.gen_bag_of_words_df()\n\t\t\tself.tfidf_df = tfidf(self.bagofwords)", "def test_tfidf_scorer(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates)\n self.assertGreater(scores.get('erdogan'), scores.get('damascus'))\n self.assertEqual(scores.get('everywhere'), scores.get('disconnected')) # they appear the same number of times\n self.assertGreater(scores.get('erdogan'), scores.get('threats')) # 'threats' and 'erdogan' appear with the same frequency, but 'threats' has a higher DF", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def tfIdf(texts):\n vect = TfidfVectorizer(min_df=1)\n tfidf = vect.fit_transform([tt.lower() for tt in texts])\n aa=(tfidf * tfidf.T).A\n return aa", "def calc_tf_idf(idf, tf):\r\n tfidf = {}\r\n for key, val in tf.items():\r\n tfidf[key] = val * idf[key]\r\n return tfidf", "def cal_tf_idf(data: dict):\n if isinstance(data, dict) is False:\n raise ValueError('input must be an dictionary')\n\n tf_idf_dict = defaultdict(list)\n for yr, docs in data.items():\n unique_words_docs_sum = []\n for doc in docs:\n unique_words_in_one = list(set(doc))\n unique_words_docs_sum += unique_words_in_one\n\n df_dict = Counter(unique_words_docs_sum)\n\n n_doc = len(docs)\n\n for doc in docs:\n term_freq = Counter(doc)\n for term, freq in term_freq.items():\n tf = freq/sum(term_freq.values())\n df = df_dict[term]\n tf_idf = tf * np.log(n_doc/(df+1))\n tf_idf_dict[yr].append([term, tf_idf])\n\n return tf_idf_dict", "def tfidf(self):\n matrix = numpy.zeros(self.shape)\n # the number of words in a document\n words_per_doc = numpy.asarray(self.sum(axis=1), dtype=float)\n # the number of documents in which a word is attested.\n word_frequencies = numpy.asarray(numpy.sum(self > 0, axis=0), dtype=float)\n # calculate the term frequencies\n for i in range(self.shape[0]):\n tf = self[i] / words_per_doc[i] # array of tf's\n matrix[i] = tf * (numpy.log(self.shape[0] / word_frequencies))\n return matrix", "def best_tfidf(self):\r\n\r\n for (i, email) in enumerate(self.emails):\r\n print \"\\t%s\" % i\r\n email.tfidf = counter.Counter()\r\n for word in email.words_counter:\r\n tf_d = email.words_counter[word]\r\n df = len(self.inverted_index[word])\r\n idf = math.log(self.number_emails / float(df))\r\n squasher = float(2 * email.length) / self.avg_length()\r\n score = (tf_d / (tf_d + squasher)) * idf\r\n\r\n email.tfidf[word] = score\r\n\r\n overall_tfidfs = counter.Counter()\r\n for email in self.emails:\r\n overall_tfidfs += email.tfidf\r\n\r\n return overall_tfidfs.most_common(1)[0][0]", "def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf", "def tfidf(docs):\n vocab = {}\n df = {}\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in set(terms):\n if len(term) > 0:\n if term not in vocab:\n vocab[term] = count # (index, df)\n df[term] = 1\n count += 1\n else:\n df[term] += 1\n num_docs = len(docs)\n scores = []\n for i in range(0, num_docs):\n scores.append({})\n\n for index in range(0, num_docs):\n terms = re.split(regex, docs[index])\n for term, tf in collections.Counter(terms).most_common():\n if len(term) > 0:\n term_index = vocab[term]\n score = float(tf) * np.log(float(num_docs) / float(df[term]))\n if score > 0.0:\n scores[index][term_index] = score\n\n i_list = []\n j_list = []\n data = []\n\n for i in range(0, num_docs):\n for j, score in scores[i].iteritems():\n i_list.append(i)\n j_list.append(j)\n data.append(score)\n\n matrix = sp.csr_matrix((data, (i_list, j_list)), shape=(num_docs, len(vocab)))\n reverse_map = {v: k for k, v in vocab.iteritems()}\n return matrix, reverse_map.values()", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def tfidf(corpus):\n vectorizer = CountVectorizer(stop_words='english', decode_error='ignore')\n x = vectorizer.fit_transform(corpus)\n a = x.toarray()\n name = vectorizer.get_feature_names()\n transformer = TfidfTransformer()\n tfidf = transformer.fit_transform(a)\n return name, tfidf.toarray()", "def __tf_idf_feature_extraction(self):\n print('=' * 80)\n print(\"TF-IDF Feature Extraction\")\n t0 = time()\n vectorizer = TfidfVectorizer()\n vec_train = vectorizer.fit_transform(self.train.text)\n vec_test = vectorizer.transform(self.test.text)\n duration = time() - t0\n print(\"DONE!!!!! total time: %fs\" % duration)\n print('=' * 80)\n return vec_train, vec_test", "def rankDocuments_itp(terms, docs, index, tf, itp): \n\n\tdocVectors=defaultdict(lambda: [0]*len(terms)) \t\n\tqueryVector=[0]*len(terms) \n\n\t# compute the norm for the query tf\n\tquery_terms_count = collections.Counter(terms) \n\t\n\tquery_norm = la.norm(list(query_terms_count.values()))\n\t\n\tfor termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n\t\t\tif term not in index:\n\t\t\t\t\tcontinue \n\t\t\t## Compute tf*idf(normalize tf as done with documents)\n\t\t\tqueryVector[termIndex]=query_terms_count[term]/query_norm * itp[term] \n\t\t\t# Generate docVectors for matching docs\n\t\t\tfor docIndex, (doc, postings) in enumerate(index[term]):\n \n\t\t\t\t\tif doc in docs:\n\t\t\t\t\t\t\tdocVectors[doc][termIndex]=tf[term][docIndex] * itp[term] \n\t\n\tdocScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n\tdocScores.sort(reverse=True)\n\tresultDocs=[x[1] for x in docScores]\n\n\treturn resultDocs", "def tfidf1(self, collection_stats, tf, df, doclen):\r\n idfs = np.log((collection_stats.get_doc_counts() + 1)/(df+1e-4))\r\n r = np.apply_along_axis(self.tfidf1_apply, 0, tf, idfs)\r\n return np.sum(r, axis=0)", "def calc_idf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n idf = {}\r\n for term in terms:\r\n term_count = 0\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1\r\n if term in doc:\r\n term_count += 1\r\n idf[term] = doc_count/term_count\r\n return idf", "def tf(word, document):\n return freq(word,document) / wordCount(document)" ]
[ "0.7582223", "0.7435247", "0.73491657", "0.73197037", "0.7230406", "0.7206604", "0.71902233", "0.71717143", "0.71699125", "0.7125617", "0.702574", "0.7018102", "0.700898", "0.69296885", "0.6926581", "0.6906138", "0.68636584", "0.68492436", "0.68379414", "0.6832129", "0.6826512", "0.6809031", "0.680271", "0.6793015", "0.67303264", "0.67246395", "0.6716061", "0.670321", "0.6685574", "0.66830164" ]
0.8353365
0
Show the menu and return either None (if an exit key was pressed) or FindTweetMenu.BACK_INDEX
def showAndGet(self): keywords = TerminalInterface.getSearchKeywords() # If user did not enter any keywords, return FindUserMenu.BACK_INDEX if keywords is None: return FindTweetMenu.BACK_INDEX tweetGeneratorMethod = lambda: TweetsTableTools.findTweets( self._connection, keywords) menu = TweetsMenu(self._connection, self._userID, tweetGeneratorMethod, emptyMessage = FindTweetMenu._EMPTY_MESSAGE) choice = menu.showAndGet() if choice == TweetsMenu.BACK_INDEX: return FindTweetMenu.BACK_INDEX return choice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_menu(self):\n while True:\n number = pyip.inputNum(\"0. Back to the main menu: \")\n if number == 0:\n # Clean up the console\n self.clear_console()\n # back to the main menu\n self.run()\n else:\n print('Press the number zero to go back')", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def show_menu(self):\n curses.curs_set(0)\n self.main_menu.display()", "def menu_quit():\n return \"Quit\"", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"c\": (\"Calibrate\", self.calibrate),\n \"t\": (\"test restore\", self.calibrate),\n \"s\": (\"Check status\", self.status),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"Your selection: \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def help_menu():\n print('\\n##################################################')\n print('################ Help Menu ###############') \n print('##################################################')\n print(' Type move or examine for each turn') \n print(' If moving, type up, down, left, or right')\n print(' If examining, you may need to answer yes or no')\n print('##################################################\\n')\n title_screen_selections()", "def action(self,input,session,context):\n index = int(input) - 1\n if index < 0:\n raise IndexError('Menu option can not be less than 1')\n return self.menu_items[index].next_screen", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu_screen(win):\n\tpass", "def go_menu(self, window, keycode1, keycode2, text, modifiers):\r\n if keycode1 in [27, 1001]:\r\n self.sm.current = \"menu\"\r\n return True\r\n return False", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def go_to_exit(self, _: int = 0) -> None:\n self.current_option = self.last_item_index\n self.draw()", "def unhandled(self, key):\n if key == 'f10':\n self.show_menu(None, 'exit')\n elif key == 'backspace':\n if self.previous_menu is None: # from main menu go to exit menu\n self.show_menu(None, 'exit')\n else:\n self.show_menu(None, self.previous_menu)", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def return_to_main_menu() -> bool:\n choice = get_user_choice(['Return to main menu', 'Move a book to another shelf'],\n '\\nWould you like to return to the main menu or move a book?')\n return True if choice == '1' else False", "def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')", "def choice_stay_return(self, text, action):\n while True:\n print(\"\"\"\n 0. Back to the main menu\n 1. {}\n \"\"\".format(text))\n choice = pyip.inputNum('Enter a number: ')\n if choice == 0:\n # Clean up the console\n self.clear_console()\n # Gives the options that can be selected in the menu\n self.run()\n elif choice == 1:\n action()\n else:\n print('Please, choose number 0 or 1')", "def navigate_mainMenu():\r\n msg, flag = \"\", False\r\n try: \r\n 'Click on the main menu item in OMM home page'\r\n \r\n flag = ui_controls.button(get_obj_identifier('mnu_btn'))\r\n if flag:\r\n print \"Main menu icon in home page is clicked\"\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def home(self):\n self.input_key_event(InputActions.HOME)", "def action(self,input,session,context):\n #index = int(input) - 1\n #if index < 0:\n # raise IndexError('Menu option can not be less than 1')\n def make_index(elt):\n idx, item = elt\n if item.custom_index is not None: return str(item.custom_index)\n else: return str(idx)\n\n valid_inputs = map(make_index, enumerate(self.menu_items))\n index = valid_inputs.index(input)\n\n return self.menu_items[index].next_screen", "def menu():\n menu = 'main'\n while 1:\n if menu == 'main':\n click.echo('Main menu:')\n click.echo(' d: debug menu')\n click.echo(' q: quit')\n char = click.getchar()\n if char == 'd':\n menu = 'debug'\n elif char == 'q':\n menu = 'quit'\n else:\n click.echo('Invalid input')\n elif menu == 'debug':\n click.echo('Debug menu')\n click.echo(' b: back')\n char = click.getchar()\n if char == 'b':\n menu = 'main'\n else:\n click.echo('Invalid input')\n elif menu == 'quit':\n return", "def show_hr_menu():\n no_input = True\n while no_input:\n print('\\nPlease select from the following options:\\n')\n print('1. View / approve pending applications')\n print('2. View approved applications')\n print('3. View rejected applications\\n')\n choice = input('Please enter 1, 2 or 3 or Q to quit \\n')\n if choice in ('1', '2', '3'):\n no_input = False\n return choice\n elif choice.lower() == 'q':\n logout()\n is_invalid()", "def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )", "def main_menu(ftp):\n print(\"What would you like to do?\")\n for key in sorted(MAIN_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, MAIN_MENU_SELECTIONS[key][0]))\n choice = raw_input(\"> \")\n while choice not in list(MAIN_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n handle_main_menu_choice(choice, ftp)", "def go_back(self):\n app = App.get_running_app()\n app.sm.current = 'menu'", "def call_quit(self, _):\n return MENU_QUIT" ]
[ "0.6956989", "0.63773394", "0.6250696", "0.6249938", "0.6121402", "0.6083828", "0.6070606", "0.6057965", "0.6057965", "0.6047459", "0.60398436", "0.60209143", "0.5979282", "0.59761137", "0.59599715", "0.59599715", "0.59599715", "0.5945133", "0.59189636", "0.5891626", "0.58872306", "0.5873769", "0.5853492", "0.5847998", "0.58432484", "0.582579", "0.5805279", "0.5798352", "0.57976365", "0.5794415" ]
0.75444674
0
Uses an index array to obtain indices using an index array along an axis.
def select_indices(arr,index_arr,axis=-1): shape_list=(lambda x,y: [ 1 if dim!=x else y for dim in range(len(arr.shape))] ) indices_list=[np.reshape(np.arange(length),shape_list(length_id,length)) for length_id,length in enumerate(arr.shape)] indices_list[axis]=index_arr return arr.ravel()[np.ravel_multi_index(indices_list,dims=arr.shape)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pndindex(*args):\r\n return np.ndindex(*args)", "def pndindex(*args):\n return np.ndindex(*args)", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v", "def take_along_axis(a, indices, axis):\n\n if indices.dtype.kind not in ('i', 'u'):\n raise IndexError('`indices` must be an integer array')\n\n if axis is None:\n a = a.ravel()\n axis = 0\n\n ndim = a.ndim\n\n axis = internal._normalize_axis_index(axis, ndim)\n\n if ndim != indices.ndim:\n raise ValueError(\n '`indices` and `a` must have the same number of dimensions')\n\n fancy_index = []\n for i, n in enumerate(a.shape):\n if i == axis:\n fancy_index.append(indices)\n else:\n ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)\n fancy_index.append(cupy.arange(n).reshape(ind_shape))\n\n return a[tuple(fancy_index)]", "def index(x, axis, index_spec):\n idx = [slice(None)] * x.ndim\n idx[axis] = index_spec\n\n indexer = tuple(idx)\n return indexer", "def array_array_index(array, indices):\n if indices.shape[1] == 1:\n return array[np.arange(array.shape[0]), indices[:, 0]].reshape(indices.shape)\n\n stride = np.arange(indices.shape[0])*array.shape[1]\n indices_mod = indices + stride[:, None]\n indices_flat = indices_mod.ravel()\n return array.ravel()[indices_flat].reshape(indices.shape).copy()", "def _dask_oindex(x, indices):\n axis = 0\n for index in indices:\n x = da.take(x, index, axis=axis)\n # If axis wasn't dropped by a scalar index:\n if not isinstance(index, Integral):\n axis += 1\n return x", "def apply_index(data, idx):\n data = numpy.asanyarray(data)\n idx = numpy.asanyarray(idx)\n if len(idx.shape) != 2:\n raise ValueError(\"idx must have dimensions 2, not {0}\".format(\n len(idx.shape)))\n if len(data.shape) < 2:\n raise ValueError(\"data must have at least dimensions 2\")\n if idx.shape[0] != data.shape[0]:\n raise ValueError(\"data and idx must have same size in \"\n \"0th dimension\")\n if not idx.shape[1] in data.shape[1:]:\n raise ValueError(\"Size of idx dimension 1 must match a dimension in \"\n \"data\")\n idx_dim = data.shape[1:].index(idx.shape[1]) + 1\n return numpy.rollaxis(\n numpy.rollaxis(data, idx_dim, 1) #make time and index dim adjacent\n #get a 2d array where every element matches index of first axis\n [numpy.mgrid[0:idx.shape[0], slice(idx.shape[1])][0],\n idx, #2d array, every element is desired index of second axis\n ...] #and the other axes come along for the ride\n , 1, idx_dim + 1) #and put index dim back in place", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n # Just be sure to catch this as an error in\n # the user's code earlier in the pipeline.\n return arr\n if isinstance(arr_t, TupleT):\n if isinstance(idx, Const):\n idx = idx.value\n\n assert isinstance(idx, int), \\\n \"Index into tuple must be an integer, got %s\" % idx\n if isinstance(idx, Const):\n idx = idx.value\n proj = self.tuple_proj(arr, idx)\n if temp:\n return self.assign_temp(proj, \"tuple_elt%d\" % idx if name is None else name)\n else:\n return proj\n\n if self.is_tuple(idx):\n indices = self.tuple_elts(idx)\n elif hasattr(idx, '__iter__'):\n indices = tuple(map(wrap_if_constant,idx))\n else:\n indices = (wrap_if_constant(idx),)\n\n n_required = arr_t.rank\n n_indices = len(indices)\n if n_indices < n_required:\n # all unspecified dimensions are considered fully sliced\n extra = (syntax_helpers.slice_none,) * (n_required - n_indices)\n indices = indices + extra\n\n if len(indices) > 1:\n idx = self.tuple(indices, \"index_tuple\" if name is None else name)\n else:\n idx = indices[0]\n\n t = arr_t.index_type(idx.type)\n idx_expr = Index(arr, idx, type=t)\n if temp:\n return self.assign_temp(idx_expr, \"array_elt\" if name is None else name)\n else:\n return idx_expr", "def broadcast_index(values, indices):\r\n assert_array(indices, shape=(...,) + values.shape[:-1])\r\n indexed_values = jp.take_along_axis(\r\n values.reshape((1,) + values.shape),\r\n indices.reshape((-1,) + values.shape[:-1] + (1,)),\r\n axis=-1,\r\n )\r\n flat_result = jp.squeeze(indexed_values, axis=-1)\r\n return flat_result.reshape(indices.shape)", "def advanced_indexing_op(input, index):\n batch_size = tf.shape(input)[0]\n max_length = int(input.get_shape()[1])\n dim_size = int(input.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (index - 1)\n flat = tf.reshape(input, [-1, dim_size])\n relevant = tf.gather(flat, index)\n return relevant", "def indices(dimensions, dtype=int, sparse=False):\n\n if not isinstance(dimensions, (tuple, list)):\n pass\n elif len(dimensions) > 2 or len(dimensions) == 0:\n pass\n elif dtype != int:\n pass\n elif sparse:\n pass\n else:\n return dpnp_indices(dimensions)\n\n return call_origin(numpy.indices, dimensions, dtype, sparse)", "def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret", "def getindex(ndim, ind, strides):\n ret = 0\n for i in range(ndim):\n ret += strides[i] * ind[i]\n return ret", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def sub2ind( sizes, multi_index ):\r\n num_dims = sizes.shape[0]\r\n index = 0\r\n shift = 1\r\n for i in range( num_dims ):\r\n index += shift * multi_index[i]\r\n shift *= sizes[i]\r\n return index", "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)", "def order_indexes(dataarray: xr.DataArray, index_list: list) -> np.ndarray:\n\n dim_list = list(dataarray.dims)\n print(\"index_list\", index_list)\n print(\"list(dataaray.dims)\", dim_list)\n init_list = []\n\n for dim in dim_list:\n init_list.append(index_list.index(dim))\n\n print(\"init_list\", init_list)\n fin_list = list(range(len(dim_list)))\n dataarray_values = np.moveaxis(dataarray.values, init_list, fin_list)\n\n return dataarray_values", "def vector_to_array_index(vector_index, array):\n return numpy.asarray(numpy.unravel_index(vector_index, array.shape))", "def _indarray(np_array):\n return skil_client.INDArray(\n ordering='c',\n shape=list(np_array.shape),\n data=np_array.reshape(-1).tolist()\n )", "def axis_index(shape, axis=-1):\n return operator.getitem(numpy.mgrid, [slice(i) for i in shape])[axis]", "def multi_index(t, indices):\n if K._BACKEND == 'theano':\n return t[tuple(indices)]\n #from operator import getitem\n # Use native Theano indexing. \n #return getitem(t, tuple(indices)) # Equivalent to t[indices].\n else:\n return _tf_multi_index(t, indices)", "def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)", "def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)", "def _tf_multi_index(t, indices):\n # Note: this is far from a full implementation of Theano fancy\n # indexing, use with care.\n assert K._BACKEND == 'tensorflow'\n from collections import Sequence\n import tensorflow as tf\n\n if not isinstance(indices, Sequence):\n raise ValueError(indices)\n\n if len(indices) == 1:\n return tf.gather(t, indices[0]) # gather() suffices for 1d\n if K.ndim(t) == len(indices):\n # Index n-dimensional tensor with n indices: pack the indices\n # from e.g. [[i_0, i_1, ...] [j_0, j_1, ...]] to [[i_0, j_0],\n # [i_1, j_1], ...] and use gather_nd()\n # (https://www.tensorflow.org/api_docs/python/array_ops.html#gather_nd)\n # TODO: check that all i in indices have ndim n-1 \n # TODO: support broadcasting for numpy arrays with np.broadcast_to()\n #indices = tf.pack(list(indices), axis=len(indices)-1)\n indices = tf.pack(list(indices), axis=-1)\n # indices = tf.Print(indices, [indices], 'indices', summarize=100)\n return tf.gather_nd(t, indices)\n else:\n raise NotImplementedError('index {} with {}'.format(t, indices))", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx", "def gather_nd_python(a_np, indices_np):\n a_shape = a_np.shape\n indices_np = indices_np.astype(\"int32\")\n indices_shape = indices_np.shape\n assert len(indices_shape) > 1\n assert indices_shape[0] <= len(a_shape)\n b_shape = list(indices_shape[1:])\n for i in range(indices_shape[0], len(a_shape)):\n b_shape.append(a_shape[i])\n b_np = np.zeros(b_shape)\n for idx in np.ndindex(*indices_shape[1:]):\n a_idx = []\n for i in range(indices_shape[0]):\n indices_pos = tuple([i] + list(idx))\n a_idx.append(indices_np[indices_pos])\n b_np[idx] = a_np[tuple(a_idx)]\n return b_np", "def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i", "def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)" ]
[ "0.7302368", "0.7263272", "0.69995314", "0.6984675", "0.68649966", "0.68557614", "0.6626734", "0.6612736", "0.64494765", "0.63717943", "0.6355618", "0.6344733", "0.6259788", "0.62565714", "0.62565714", "0.6241369", "0.62404037", "0.62190133", "0.62045544", "0.61014456", "0.6095909", "0.60866135", "0.608108", "0.6067619", "0.6067619", "0.60500604", "0.6039876", "0.60145503", "0.60067827", "0.5959661" ]
0.7482163
0
Continous loop of inputs and answers
def evaluateCycle(self): print("Enter q or quit to exit") input_sentence = '' while(1): # Get input sentence input_sentence = input('> ') # Check if it is quit case if input_sentence == 'q' or input_sentence == 'quit': break ans = self.evaluateOneInput(input_sentence) print('Bot:', ans)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_loop():\n while(True):\n decision = raw_input(\"enter some mathematical operations\")\n if(decision == \"done\"):\n break\n print eval(decision)", "def main():\n min_random = 10 #keeping constant for the min random number range\n max_random = 99 #keeping constant for the max random number range\n count = 0 #creating a counter variable to keep track of user's answers in a row\n\n\n while count != 3: #this loop will keep goin until user get 3 answers correct in a row\n num1 = random.randint(min_random, max_random) #generating a random number each new equations\n num2 = random.randint(min_random, max_random)\n\n print(\"What is \" + str(num1) + \"+\" + str(num2) + \"?\")\n user_input = int(input(\"Your answer is: \")) #takign the user's input and converting it into an integer\n\n total = num1 + num2 #keeping track of the actual answer to compare with the user's response", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def algorithm_loop(self):", "def waitenter(times=1):\n\n # For each time\n for _ in range(times):\n # Ask for user input\n input(\"\")", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def main_loop(self, old_answers=None, old_correct_list=None):\r\n if old_answers is None:\r\n old_answers = []\r\n if old_correct_list is None:\r\n old_correct_list = []\r\n raw_answers, is_correct_list = self.ask_question(old_answers,\r\n old_correct_list)\r\n\r\n if sum(is_correct_list) == len(self.correct_answers):\r\n user_response = self.display_correct_window()\r\n else:\r\n user_response = self.display_incorrect_window(is_correct_list)\r\n if user_response == 'Try Again':\r\n self.main_loop(raw_answers, is_correct_list)\r\n if user_response == 'Show Answers':\r\n user_response = self.show_answers(self.correct_answers,\r\n raw_answers, is_correct_list)\r\n if user_response == 'Show Solution':\r\n user_response = self.show_solution()\r\n return user_response", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def multiple_eval_for_loops_v1():", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def main_loop(self):\r\n print('Press ctrl-c to quit')\r\n while True:\r\n url = input('\\nType Question url: ')\r\n handler = AnswerHandler(self.session)\r\n res, err = handler.answer_questions(url)\r\n if res:\r\n print('No more questions for this URL')\r\n else:\r\n print(f'Unexpected exception occurred: {err}', file=sys.stderr)\r\n traceback.print_exc()", "def multiple_eval_for_loops_v2():", "def loop(self):\n pass", "def loop(self):\n raise NotImplementedError()", "def perform_strategy(self, counter):\r\n ans = \"\"\r\n while ans.lower() not in [\"y\", \"n\"]:\r\n print(f\"Envelope number {counter} contains: \\n ... \\n ... \\n ... \\n{self.envelopes[counter].money}$!!!!\\n \"\r\n f\"Do you CHOOSE this ENVELOPE!?!? y/n\")\r\n ans = input()\r\n return ans.lower() == 'y'", "def main():\n while True:\n heightTruss = int(input(\"Please enter the height of the truss: \"))\n truss = int(input(\"Please enter the width of the truss: \"))\n roofLength = int(input(\"Please enter the finished length of the roof: \"))\n\n print(\"The area of the roof is\", area(roofLength, pythagorean(truss/2, heightTruss)), \"m^2\") # calls area function with pythagorean function as one of the parameters\n print(\"Would you like to repeat the process?\")\n\n response = input(\"Please respond with 'y' for yes or 'n' for no: \")\n if response.lower() == 'y':\n continue\n break", "def userinput(prompttext=\"\", times=1):\n\n # If times is 1\n if times == 1:\n # Return the result\n return input(str(prompttext))\n\n # Create new empty list\n inputlist = []\n\n # For each time in range\n for _ in range(times):\n # Append the result of another input request\n inputlist.append(input(str(prompttext)))\n\n # Return the final result\n return inputlist", "def ready():\n rdy = False\n valid_answer = False\n while not rdy:\n while not valid_answer:\n response = input(\"Are you ready to play? \")\n valid_answer = check_inputs([\"Y\", \"N\"], response[0].capitalize())\n rdy = response[0].capitalize() == \"Y\"\n valid_answer = False", "def loop(self):\n line = self.read()\n while line != \"quit\":\n value = self.eval(line)\n print(value)\n line = self.read()", "def while_repeat(sentence_string_list,input_word,answer_list):\r\n\tchance = 5\r\n\tRepeat = \"Repeat\"\r\n\t\r\n\twhile Repeat == \"Repeat\":\r\n\t\tprint \" \".join(sentence_string_list)+\"\\n\"\r\n\t\tuser_ans = raw_input(\"Your answer of \" + input_word + \" is : \")\r\n\r\n\t\tif correct_or_not(user_ans,answer_list):\r\n\r\n\t\t\tuser_ans_list.append(user_ans)\r\n\t\t\treplace_all(sentence_string_list,input_word, user_ans)\r\n\t\t\tprint \"/////Corrent!/////\\n\"\r\n\t\t\tRepeat = \"Stop\"\r\n\r\n\t\telse:\r\n\t\t\tchance -= 1\r\n\t\t\tprint \"/////Worng! You've got \" + str(chance) + \"chances left!/////\\n\"\r\n\t\t\tRepeat = \"Repeat\"\r\n\r\n\t\tif chance == 0:\r\n\t\t\treturn \"unvalid\"\r\n\t\t\r\n\treturn \"valid\"", "def multiplication(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} * {easy_random2} * {easy_random3} * {easy_random4} = ?\")\r\n real_answer = easy_random1 * easy_random2 * easy_random3 * easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n if int(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def play():\n display_starting_message()\n print(\"\")\n print(\"*\"*10)\n for question_number, question in enumerate(list_of_questions):\n print(question)\n print(\"\")\n for responses in list_of_questions[question]:\n print(responses)\n pick_one = input(\"pick one: \")\n check_murder_sauce(question, pick_one)\n\n murder_sauce_result(murder_sauce)", "def run(self):\n \n t = 0\n while t < 10:\n self.reset()\n self.start_simulation()\n while not self.done:\n raw_input(\"Press Enter to continue...\")\n action = self.action_space.sample()\n print(action)\n state, reward, self.done, _ = self.step(action)\n print('Current state:\\n angles: {}'.format(state))\n print('Reward: {}'.format(reward))\n\n self.stop_simulation()\n t += 1", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def Loop(self):\n self.coshell.SetModesCallback(self.SetModes)\n while True:\n try:\n text = self.Prompt()\n if text is None:\n break\n self.Run(text) # paradoxically ignored - coshell maintains $?\n except EOFError:\n # ctrl-d\n if not self.coshell.ignore_eof:\n break\n except KeyboardInterrupt:\n # ignore ctrl-c\n pass\n except interactive_coshell.CoshellExitError:\n break", "def run_aqi(self):\r\n while True:\r\n self.get_aqi()\r\n time.sleep(30 - time.time() % 30)", "def part2():\n\n program = IntCodeProcessor.load_program('day13input.txt')\n program[0] = 2\n cpu = IntCodeProcessor(program)\n result = None\n next_input = None\n ball_pos = None\n paddle_pos = None\n score = None\n while result is None:\n try:\n result = cpu.execute_program(next_input, reset=False)\n except ExecutionError as err:\n assert err.reason == ExecutionCode.NEED_INPUT\n\n ball_pos, paddle_pos, score = process_output(cpu.outputs, ball_pos, paddle_pos, score)\n cpu.outputs = []\n next_input = next_input_for(ball_pos, paddle_pos)\n print(f'Part 2 answer: {score}')", "def wait_for_solution(m,lit_to_clauses):\n i = 2\n for line in sys.stdin:\n if (line.startswith(\"v \")): \n handle_solution_line(line,i)\n i += 1", "def run(self):\n tick_duration = 1 / self.config.tick_rate\n last_tick_time = time.time()\n\n while True:\n input_ = self.input_source.get_input()\n self.__update(input_)\n\n if self.state.exit:\n break\n\n current_time = time.time()\n sleep_time = tick_duration - (current_time - last_tick_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n last_tick_time = current_time", "def transduce(self,inputs):\n self.start()\n return [self.step(inp) for inp in inputs]" ]
[ "0.6230928", "0.6096894", "0.6076145", "0.5999656", "0.5885386", "0.5863726", "0.5846322", "0.5829764", "0.5800593", "0.57979757", "0.5749408", "0.57334924", "0.5704362", "0.5703528", "0.56611365", "0.5657158", "0.56488985", "0.564698", "0.5644468", "0.5608744", "0.5578464", "0.5566481", "0.5520391", "0.5518303", "0.55164444", "0.5509446", "0.5482248", "0.54818004", "0.54582787", "0.54473084" ]
0.6725195
0
Execute html_reporter if html flag is exist in sys.argv.
def __execute_reporter(self): if not self.__args.report: return reporter.HTMLReporter().generate_report_from_file( self.__lst_json_files)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(args):\n p = OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='debug')\n p.add_option('-w', '--w3c',\n action='store_true', default=False, dest='w3c',\n help='send file to validator.w3.org')\n p.add_option('-r', '--rm',\n action='store_true', default=False, dest='passrm',\n help='rm validation output on pass')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='more output')\n (o, a) = p.parse_args(args)\n \n if o.debug: pdb.set_trace()\n\n verbose(o.verbose)\n \n if 1 < len(a):\n flist = a[1:]\n else:\n flist = glob.glob(\"*.html\")\n\n for filename in flist:\n if verbose(): print filename\n if o.w3c:\n w3c_validate(filename)\n else:\n check_file(filename)\n\n sys.exit(exit_value())", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)", "def entry_point():\n\n args = parse_arguments()\n\n try:\n main(args)\n except Exception as exc:\n if args['output'] != 'html':\n raise\n msg = 'Error: ' + str(exc)\n if args['output'] == 'html':\n print('<p>{}</p><script>document.getElementById(\\\"result\\\").style.backgroundColor ' \\\n '= \\\"Tomato\\\";</script>'.format(msg))\n else:\n print(exc)\n sys.exit()", "def main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--levels', type=int,\n help='maximum levels to show', default=2)\n parser.add_argument('--report', type=str, default=\"structure_dump\",\n help='Report to run.')\n parser.add_argument('input')\n\n args = parser.parse_args()\n\n data = open(args.input).read()\n if args.input.endswith('.html'):\n root = from_html(data)\n else:\n root = from_markdown(data)\n\n if args.report == 'structure_dump':\n report.structure_dump.report(root, args)\n elif args.report == 'display_cdf':\n report.display_cdf.report(root, args)\n elif args.report == 'enhanced_html':\n assert hasattr(root, \"ast\") # Require the CommonMark AST.\n report.enhanced_html.report(root, args)\n else:\n parser.error(\"Unrecognized report: %d\" % args.report)", "def test_html_output(self):\n pass", "def _build_htmlpage_one(args):\n return build_htmlpage_one(*args)", "def display_html_report():\n display(HTML('report_page.html'))", "def pytest_runtest_makereport(item, call): # pylint: disable=unused-argument\n pytest_html = item.config.pluginmanager.getplugin(\"html\")\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, \"extra\", [])\n driver_manager = DriverManager()\n xfail = hasattr(report, \"wasxfail\")\n\n if report.when == \"call\":\n extra.append(pytest_html.extras.url(driver_manager.driver.current_url))\n if (report.skipped and xfail) or (report.failed and not xfail):\n extra.append(pytest_html.extras.html(\"<div>Additional HTML</div>\"))\n screenshot = driver_manager.driver.get_screenshot_as_base64()\n extra.append(pytest_html.extras.image(screenshot, \"Screenshot\"))\n report.extra = extra", "def html():\n env.file_ext = \".html\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --standalone --bibliography={bib_file} --csl={csl_file} --toc --number-sections\".format(**env))", "def report():\n \n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate analysis report.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-s','--summary', help='Print quick summary and exit',default=False,dest='summary', action='store_true')\n parser.add_argument('-e', help='report output type: [md,pdf,html] \\ndefault: pdf',default='pdf',action=\"store\")\n parser.add_argument('-c',help='Report options [(f)ull,fa(i)l,(p)ass]\\ndefault: f',default='f',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile\n envLog=reports.checkEnvLog(logFile) \n #parse args\n if args.summary:\n #print summary\n reports.generate_summary(logFile,envLog,coverage='a')\n return\n \n vFlag=args.v\n if vFlag:\n print(\"Generating report\")\n \n \n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(args.logfile)\n else:\n outFile=args.o\n outFile+='.'+args.e\n \n if args.e in ['pdf','html','md']:\n htmlReport=reports.generateHTMLReport('simpleDiv.html',logFile,envLog,coverage=args.c)\n if args.e=='pdf':\n reports.writeHtmlToPdf(htmlReport,outFile)\n elif args.e=='html':\n reports.writeHtml(htmlReport,outFile)\n elif args.e == 'md':\n reports.writeHtmlToMarkdown(htmlReport,outFile)\n else:\n pu.print_boldred(\"unknown extension:\"+args.e+\". Exiting\")", "def test_html_documentation(self):\n app = Sphinx(\n self.source_dir,\n self.config_dir,\n self.output_dir,\n self.doctree_dir,\n buildername='html',\n warningiserror=True,\n )\n app.build(force_all=self.all_files)", "def main(*arguments):\n\n args = parse_args(arguments)\n\n if args.test_suite is not None:\n test_suite = report_manager.load_test_suite_conf(args.test_suite)\n for i, test in enumerate(test_suite):\n args = parse_args(test)\n process_args_and_run(args, test_suite_iter=i)\n else:\n process_args_and_run(args)", "def run_analyze():\n\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Results file or directory with result files\")\n parser.add_argument('-o','--output',help=\"Analysis output directory\")\n parser.add_argument('-d','--diffs-only',action='store_true',help=\"Show only diffs on detail pages\")\n parser.set_defaults(output='',diffs_only=False)\n\n script_runner.run_analyze(parser.parse_args())", "def main(*args):\n local_args = pywikibot.handle_args(args)\n\n # This factory is responsible for processing command line arguments\n # that are also used by other scripts and that determine on which pages\n # to work on.\n gen_factory = pagegenerators.GeneratorFactory()\n # The program to pipe stuff through\n filters = []\n options = {}\n\n # Parse command line arguments\n for arg in local_args:\n option, sep, value = arg.partition(':')\n if option == '-filter':\n filters.append(value)\n elif option == '-always':\n options['always'] = True\n else:\n # check if a standard argument like\n # -start:XYZ or -ref:Asdf was given.\n gen_factory.handleArg(arg)\n\n options['filters'] = filters\n\n gen = gen_factory.getCombinedGenerator(preload=True)\n if gen:\n # The preloading generator is responsible for downloading multiple\n # pages from the wiki simultaneously.\n bot = PiperBot(gen, **options)\n bot.run()\n return True\n else:\n pywikibot.bot.suggest_help(missing_generator=True)\n return False", "def call_link_reports(args) ->None:\n\n if not args['no_cmd']:\n print_link_reports(args['report-id'])\n if args['yaml']:\n yaml_file(args['report-id'])\n if args['csv']:\n csv_file(args['report-id'])\n if args['json']:\n json_file(args['report-id']) \n\n config.logger.info(\"Link Report generated according to the format chosen by user\")", "def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)", "def Main():\n statistics_types = frozenset([\n u'codereviews', u'codereviews-history', u'contributions'])\n\n argument_parser = argparse.ArgumentParser(description=(\n u'Generates an overview of project statistics of github projects.'))\n\n argument_parser.add_argument(\n u'-c', u'--config', dest=u'config_path', action=u'store',\n metavar=u'CONFIG_PATH', default=None, help=(\n u'path of the directory containing the statistics configuration '\n u'files e.g. stats.ini.'))\n\n argument_parser.add_argument(\n u'statistics_type', choices=sorted(statistics_types), action=u'store',\n metavar=u'TYPE', default=None, help=u'The statistics type.')\n\n options = argument_parser.parse_args()\n\n if not options.statistics_type:\n print(u'Statistics type missing.')\n print(u'')\n argument_parser.print_help()\n print(u'')\n return False\n\n config_path = options.config_path\n if not config_path:\n config_path = os.path.dirname(__file__)\n config_path = os.path.dirname(config_path)\n config_path = os.path.join(config_path, u'data')\n\n stats_file = os.path.join(config_path, u'stats.ini')\n if not os.path.exists(stats_file):\n print(u'No such config file: {0:s}.'.format(stats_file))\n print(u'')\n return False\n\n output_writer = StdoutWriter()\n\n if not output_writer.Open():\n print(u'Unable to open output writer.')\n print(u'')\n return False\n\n if options.statistics_type.startswith(u'codereviews'):\n usernames = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n usernames = stats_definition_reader.ReadUsernames(file_object)\n\n include_closed = False\n if options.statistics_type == u'codereviews-history':\n include_closed = True\n\n codereviews_helper = CodeReviewIssuesHelper(include_closed=include_closed)\n codereviews_helper.ListIssues(usernames, output_writer)\n\n elif options.statistics_type == u'contributions':\n projects_per_organization = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n projects_per_organization = (\n stats_definition_reader.ReadProjectsPerOrganization(file_object))\n\n user_mappings = {}\n with open(stats_file) as file_object:\n stats_definition_reader = StatsDefinitionReader()\n user_mappings = stats_definition_reader.ReadUserMappings(file_object)\n\n contributions_helper = GithubContributionsHelper(user_mappings)\n contributions_helper.ListContributions(\n projects_per_organization, output_writer)\n\n # TODO: add support for pull requests\n # TODO: add support for more granular CL information\n\n return True", "def collect_html(args):\n url_list = args.url_list\n output_dir = args.output_dir\n\n print(url_list)\n\n # do some checks\n try: \n assert os.path.exists(url_list), 'url_list must exist'\n assert os.path.exists(output_dir), 'output_dir must exist'\n except AssertionError as err: \n logger.error('Failed check: {}'.format(err)) \n return \n\n urls = common.read_file(url_list)\n \n for url in urls: \n logger.debug(url) \n\n html = spy_tools.collect_html(url)\n out = url.split('/')\n output = os.path.join(output_dir, out[-1] + '.html')\n common.write_file(html, output)", "def review(args):\n html_doc = document.Document(get_code(args.file))\n summary = html_doc.review()\n\n print(\n '{:d} blank links removed.'.format(summary['links']['removed']),\n '{:d} misdirected links set to open in new window.'.format(summary['links']['retargetted']),\n '{:d} double-tracked links decoded.'.format(summary['links']['decoded']),\n '{:d} broken links marked.'.format(summary['links']['broken']),\n '{:d} unchecked links marked.'.format(summary['links']['unchecked']),\n\n '{:d} links referencing missing anchors marked.'.format(summary['anchors']['marked']),\n\n '{:d} emails cleaned.'.format(summary['emails']['cleaned']),\n '{:d} invalid emails marked.'.format(summary['emails']['invalid']),\n '{:d} unchecked emails marked.'.format(summary['emails']['unchecked']),\n sep='\\n'\n )\n set_code(args.file, html_doc)", "def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out", "def test_arg_parser_run_no_req_args(self):\n with self.assertRaises(SystemExit):\n self.parser.parse_args(['view-report'])", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n urls = scrape_urls(parsed_args.webpage)\n emails = scrape_emails(parsed_args.webpage)\n phones = scrape_phones(parsed_args.webpage)\n\n if urls:\n print(\"\\nURLS:\\n\\n\", '\\n'.join(urls))\n else:\n print(\"\\nURLS:\\n\\nNone\")\n\n if emails:\n print(\"\\nEMAILS:\\n\\n\", '\\n'.join(emails))\n else:\n print(\"\\nEMAILS:\\n\\nNone\")\n\n if phones:\n print(\"\\nPHONE NUMBERS:\\n\\n\", '\\n'.join(phones))\n else:\n print(\"\\nPHONE NUMBERS:\\n\\nNone\")", "def auto_reporter(**opts):\r\n if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():\r\n try:\r\n return FancyReporter(**opts)\r\n except ImportError:\r\n pass\r\n return PlainReporter()", "def run():\r\n\r\n # Parse options and adjust logging level if necessary\r\n options, logging_level = parse_options()\r\n if not options: sys.exit(2)\r\n logger.setLevel(logging_level)\r\n logger.addHandler(logging.StreamHandler())\r\n\r\n # Run\r\n markdown.markdownFromFile(**options)", "def main(*args):\n options = {}\n local_args = pywikibot.handle_args(args)\n site = pywikibot.Site()\n site.login()\n gen_factory = GeneratorFactory(site)\n for arg in local_args:\n if gen_factory.handleArg(arg):\n continue\n arg, _, value = arg.partition(':')\n arg = arg[1:]\n if arg == 'config':\n if not value:\n value = pywikibot.input(\n 'Please enter a value for {}'.format(arg), default=None\n )\n options[arg] = value\n else:\n options[arg] = True\n gen = gen_factory.getCombinedGenerator(preload=True)\n if 'config' not in options:\n pywikibot.bot.suggest_help(missing_parameters=['config'])\n return False\n config = get_json_from_page(pywikibot.Page(site, options.pop('config')))\n if validate_config(config):\n options.update(config)\n else:\n pywikibot.error('Invalid config.')\n return False\n MagicLinksReplacer(gen, site=site, **options).run()\n return True", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n scrape_url(parsed_args.url)", "def main():\n config_file = get_conf(get_config_name())\n if not config_file:\n sys.exit(1)\n log = get_last_file(config_file[\"LOG_DIR\"])\n MAIN_LOGGER.info(\"we've got log file named %s\", log.path)\n file_name = os.path.join(os.path.dirname(__file__), config_file['REPORT_DIR'],\n \"report-{}.html\".format(log.date))\n if os.path.exists(file_name):\n MAIN_LOGGER.info(\"%s already exists\", file_name)\n sys.exit()\n res = gen_parse_log(log, config_file['PERCENT_FAILS'])\n if not res:\n sys.exit(1)\n MAIN_LOGGER.info(\"log parsed\")\n report = []\n for _ in range(int(config_file[\"REPORT_SIZE\"])):\n try:\n report.append(next(res))\n except StopIteration:\n pass\n MAIN_LOGGER.info(\"report file name %s\", file_name)\n\n if report:\n save_report(report, config_file['TEMPLATE_FILE'], file_name)", "def do_html(pidx):\n response_headers = [(\"Content-type\", \"text/html\")]\n name = get_script_name(pidx)\n if not os.path.isfile(name):\n sys.stderr.write(f\"autoplot/meta 404 {name}\\n\")\n status = \"404 Not Found\"\n output = \"\"\n return output.encode(), status, response_headers\n loader = importlib.machinery.SourceFileLoader(f\"p{pidx}\", name)\n spec = importlib.util.spec_from_loader(loader.name, loader)\n mod = importlib.util.module_from_spec(spec)\n loader.exec_module(mod)\n # see how we are called, finally\n appdata = mod.get_description()\n html = generate_html(appdata)\n return html, \"200 OK\", response_headers", "def main() -> None:\r\n # pylint: disable=W0601\r\n global ARGS, BROWSERS\r\n BROWSERS = {'chrome': Chrome, 'firefox': Firefox, 'ie': Ie,\r\n 'safari': Safari, 'opera': Opera, 'edge': Edge}\r\n # pylint: disable=C0103\r\n PARSER = argparse.ArgumentParser()\r\n PARSER.add_argument('-m', '--modules', help='Which modules to test. One or more of '\r\n '[%(choices)s]. Default is all.', nargs='+', type=str,\r\n choices=MODULES.keys(), metavar='', default=list(MODULES.keys()))\r\n PARSER.add_argument('-l', '--locales', help='Which locales to test. One or more of '\r\n '[%(choices)s]. Default is all.', nargs='+', type=str,\r\n choices=LANGS.keys(), metavar='', default=list(LANGS.keys()))\r\n PARSER.add_argument('-b', '--browsers', help='Which browser to use. One or more of '\r\n '[%(choices)s]. Default is %(default)s', nargs='+', default=['chrome'],\r\n choices=BROWSERS.keys(), metavar='')\r\n PARSER.add_argument('-w', '--wait', help='Wait this many seconds before deciding an element is '\r\n 'missing. Default is %(default)s', default=[20], type=int, nargs=1)\r\n ARGS = PARSER.parse_args()\r\n parseargs()\r\n os.makedirs(SCREENSHOT_DIR, exist_ok=True)\r\n\r\n try:\r\n full_languages_modules_run(modfilter=ARGS.modules,\r\n langfilter=ARGS.locales, brows=ARGS.browsers)\r\n except Exception: # Too general is the point, it's a Final Action. pylint: disable=W0703\r\n with open(RESULTS_FILE, mode='a', encoding='UTF-8') as log:\r\n log.write('\\n\"Well, something went wrong. A manual exit, hopefully:\"\\n\\n' + tidy_error())\r\n raise", "def initial_reporting(config, run_tracker):\r\n reports_dir = config.get('reporting', 'reports_dir',\r\n default=os.path.join(config.getdefault('pants_workdir'), 'reports'))\r\n link_to_latest = os.path.join(reports_dir, 'latest')\r\n if os.path.exists(link_to_latest):\r\n os.unlink(link_to_latest)\r\n\r\n run_id = run_tracker.run_info.get_info('id')\r\n if run_id is None:\r\n raise ReportingError('No run_id set')\r\n run_dir = os.path.join(reports_dir, run_id)\r\n safe_rmtree(run_dir)\r\n\r\n html_dir = os.path.join(run_dir, 'html')\r\n safe_mkdir(html_dir)\r\n os.symlink(run_dir, link_to_latest)\r\n\r\n report = Report()\r\n\r\n # Capture initial console reporting into a buffer. We'll do something with it once\r\n # we know what the cmd-line flag settings are.\r\n outfile = StringIO()\r\n capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,\r\n color=False, indent=True, timing=False,\r\n cache_stats=False)\r\n capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)\r\n report.add_reporter('capturing', capturing_reporter)\r\n\r\n # Set up HTML reporting. We always want that.\r\n template_dir = config.get('reporting', 'reports_template_dir')\r\n html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,\r\n html_dir=html_dir,\r\n template_dir=template_dir)\r\n html_reporter = HtmlReporter(run_tracker, html_reporter_settings)\r\n report.add_reporter('html', html_reporter)\r\n\r\n # Add some useful RunInfo.\r\n run_tracker.run_info.add_info('default_report', html_reporter.report_path())\r\n port = ReportingServerManager.get_current_server_port()\r\n if port:\r\n run_tracker.run_info.add_info('report_url', 'http://localhost:%d/run/%s' % (port, run_id))\r\n\r\n return report" ]
[ "0.61923707", "0.6103338", "0.57461756", "0.5729902", "0.5687042", "0.5661867", "0.5577024", "0.5549909", "0.5545881", "0.5544282", "0.5523945", "0.5516386", "0.5506726", "0.5482997", "0.545956", "0.541977", "0.5402856", "0.5398641", "0.5351573", "0.53477114", "0.5338802", "0.53273463", "0.5315181", "0.53111917", "0.52756685", "0.52723646", "0.5268862", "0.5260009", "0.52172375", "0.52087146" ]
0.65210193
0
Get all scenario in folder. Recursive to sub folder if "rd" argument appear in sys.argv.
def __get_list_scenarios_in_folder(self): # If both directory and recur_directory are exist # then show "Invalid command" and exit. if self.__args.directory is not "" \ and self.__args.recur_directory is not "": utils.print_error("\n{}\n".format(constant.ERR_COMMAND_ERROR)) exit(1) recursive = False start_directory = "" if self.__args.directory is not "": start_directory = self.__args.directory elif self.__args.recur_directory is not "": start_directory = self.__args.recur_directory recursive = True if not start_directory: start_directory = TestRunner.__test_script_dir if not os.path.exists(start_directory): utils.print_error( "\n{}\n".format(constant.ERR_PATH_DOES_NOT_EXIST. format(start_directory))) exit(1) list_files = [] if start_directory.endswith(".py"): list_files = [start_directory] else: try: if recursive: for directory, _, _ in os.walk(start_directory): list_files.extend(glob.glob(os.path.join(directory, "*.py"))) else: list_files.extend(glob.glob(os.path.join(start_directory, "*.py"))) except OSError: pass list_test_scenarios = [] for file in list_files: sys.path.append(os.path.dirname(os.path.abspath(file))) test_module = \ importlib.import_module(os.path.basename(file).replace(".py", "")) for name, cls in inspect.getmembers(test_module, inspect.isclass): if cls is not TestScenarioBase \ and issubclass(cls, TestScenarioBase): list_test_scenarios.append(cls) return list_test_scenarios
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getImmediateSubdirectories(dir):", "def open_run_list(base_path, filter=None):\n dir_list = listdir(base_path)\n if not dir_list:\n return []\n if filter is not None:\n filter_list = glob(path.join(base_path, filter))\n filter_list = [path.basename(x) for x in filter_list]\n dir_list = [x for x in dir_list if x in filter_list]\n if not dir_list:\n return []\n dir_list.sort(key=human_order_key)\n return [Run(x) for x in [path.join(base_path, y) for y in dir_list]]", "def fixture_sets(*args):\n return [os.path.join(*args, dir)\n for dir in os.listdir(os.path.join(FIXTURE_DATA, *args))\n if os.path.isdir(os.path.join(FIXTURE_DATA, *args, dir))\n ]", "def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def read_sate_run_folder(directory, rar_fn = \"runs_and_results-it*.csv\",inst_fn = \"instances.txt\" , feat_fn = \"instance-features.txt\" , ps_fn = \"paramstrings-it*.txt\"):\n print((\"reading {}\".format(directory)))\n configs = read_paramstrings_file(find_largest_file(os.path.join(directory,ps_fn)))\n instance_names = read_instances_file(find_largest_file(os.path.join(directory,inst_fn)))\n runs_and_results = read_runs_and_results_file(find_largest_file(os.path.join(directory, rar_fn)))\n\n full_feat_fn = glob.glob(os.path.join(directory,feat_fn))\n if len(full_feat_fn) == 1: \n instance_features = read_instance_features_file(full_feat_fn[0])\n else:\n instance_features = None\n\n return (configs, instance_names, instance_features, runs_and_results)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def traverse_directory(args) :\n siteRGX = re.compile('DPH.'+args.site.upper())\n s = []\n\n # report non-unique residuals\n for root, dirs, files in os.walk(args.traverse):\n path = root.split('/')\n for gamitFile in files:\n if siteRGX.search(gamitFile):\n gamitFile = root+'/'+gamitFile\n #check for potential duplicates in the same path, only want to use one of the DOH files\n if len(path[-1]) > 4:\n regex = re.compile(root[:-2])\n else:\n regex = re.compile(root)\n\n\n # only check for duplicates when there is more than one network\n # being processed...\n if args.network == 'yyyy_dddnN':\n if len(s) == 0:\n s.append(gamitFile)\n else:\n # for each element in s, check to see if the root path does not match\n # any of the files already stored in the list\n m = 0\n for item in s:\n if regex.search(item) :\n m = 1\n if not m :\n s.append(gamitFile)\n else:\n s.append(gamitFile)\n\n s.sort()\n lines = ''\n # Now loop through each file and consolidate the residuals\n for dfile in s :\n dphs = res.parseDPH(dfile)\n\n # check if the dph files are being searched are from\n #a GAMIT network of type yyyy/dddn?/\n root, filename = os.path.split(dfile)\n if args.network == 'yyyy_dddnN':\n ddd = root[-5:-2]\n year = int(root[-10:-6])\n startDT = dt.datetime(year,01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n elif args.network == 'ddd':\n ddd = root[-3:]\n year = root[-8:-4] \n startDT = dt.datetime(int(year),01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n\n line = res.consolidate(dphs,startDT)\n lines = lines + line\n\n # if its larger than 1GB dump it to a file\n # this is designed to keep the load n the file system lighter\n if sys.getsizeof(lines) > 1073741824 :\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n #print(lines)\n\n # dump any remaining memory to file\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n\n return", "def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)", "def navigate_to_cases():\r\n current_dir = os.getcwd() # Get current directory\r\n dir_steps = \"//foamfiles//counterFlowFlame2D//\"\r\n cases_path = current_dir + dir_steps # full path with case folders\r\n case_directory_list = [directory for directory in os.listdir(cases_path) if os.path.isdir(cases_path)]\r\n length_case_directory_list = len(case_directory_list) # length of directory\r\n\r\n print(\"case directory length:\")\r\n print(\"\\n\")\r\n print(case_directory_list)\r\n\r\n return cases_path, length_case_directory_list, case_directory_list", "def Run(self):\n \n if not self.FolderPath == None:\n \n #run the folder manipulation routines...\n self.InitialList, out_0 = self.ReturnRamanFiles(self.FolderPath,'.txt')\n self.Condensensed = self.RamanDBCondenser(out_0)\n self.SetFilters()\n self.Gatherer(self.InitialList,out_0)\n self.BuildTree()", "def get_run_folders():\n return [os.path.join(f, sf) for f in get_date_folders() for sf in os.listdir(f)]", "def some_run_path(experiment_path, filters=None):\n must_be = [\"cfg.yaml\", \".__leaf\"]\n must_not_be = [\".__lock\", \".__crash\", \".__end\", \".__start\"]\n with os.scandir(experiment_path) as fit:\n for entry in fit:\n if not entry.name.startswith(\".\") and entry.is_dir():\n subexp_path = os.path.join(experiment_path, entry.name)\n with os.scandir(subexp_path) as fit2:\n for entry2 in fit2:\n if not entry2.name.startswith(\".\") and entry2.is_dir():\n run_path = os.path.join(subexp_path, entry2.name)\n done_before = False\n mandatory_files = []\n with os.scandir(run_path) as fit3:\n for entry3 in fit3:\n if entry3.name in must_not_be:\n done_before = True\n break\n if entry3.name in must_be:\n mandatory_files.append(entry3.name)\n if done_before or set(mandatory_files) != set(must_be):\n continue\n if filters and not experiment_matches(run_path, filters):\n print(f\"Skipping {run_path:s} as it was filtered out.\")\n continue\n yield run_path", "def load_slurm_folder(p):\n filter_function = lambda f: True if \".out\" in f else False\n slurm_dict = {\"runs\": []}\n for f in filter(filter_function, os.listdir(p)):\n slurm_dict[\"runs\"].append(load_slurm_data(os.path.join(p, f)))\n exit(\"Success!\")", "def walk(dirname): \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")", "def gci(path):\n parents = os.listdir(path)\n for parent in parents:\n if parent == \"forgifs\" or parent == \"hilariousgifs\":\n pass\n else:\n child = os.path.join(path,parent)\n #print(child)\n if os.path.isdir(child):\n gci(child)\n else:\n filepath.append(child)\n #print(child)", "def main():\n args = parseArguments()\n setLogger()\n files = ls(args.dirs)\n matches = pad(files)\n if args.dry_run:\n dryRun(matches)\n else:\n move(matches)", "def list_dir(self, path):", "def test_input_folder_recursive(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder_recursive\"\n params[\"input\"] = data_dir + \"build-custom/files/\"\n params[\"input_extension\"] = \"fna.gz\"\n params[\"input_recursive\"] = True\n \n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # list files from base folder and \"more\" (got recursively)\n files = list_files_folder(params[\"input\"], ext=params[\"input_extension\"], recursive=True)\n\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def walk(dirname):\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def main_list(args):\n return list_commands(args.directory)", "def main(argv):\n\n opts = docopt.docopt(__doc__, version='ftree 0.1')\n\n dirs = opts['<dir>'] or ['.']\n for d in dirs:\n #print d\n print ListTree(d)\n\n return 0", "def examples():\n root_path = None\n find = None\n journey = travelling(root_path=root_path, find=find)\n root_path = journey.root_path\n current_working_directory = journey.cwd\n print(\"\"\"Input parameters:\n{}\"\"\".format(\"-\" * 23))\n print(\"root_path = {}\".format(root_path))\n print(\"find = {}\".format(find))\n print()\n print(\"\"\"Output:\n{}\"\"\".format(\"-\" * 23))\n print(\"Current Working Directory: {}\".format(current_working_directory))\n print()\n absolute_paths = journey.absolute_paths\n print(\"Absolute paths: {}\".format(absolute_paths))\n print()\n relative_paths = journey.relative_paths\n print(\"Relative paths: {}\".format(relative_paths))\n print()\n traveller = journey.travel\n print(\"Total files and directories available: {}\".format(len(traveller)))\n print()\n print(\"\"\"{}\"\"\".format(\"#\" * 75))\n\n print()\n root_path = None\n find = 'spec.json'\n print(\"Identifying '{}' file\".format(find))\n print(\"\"\"{}\"\"\".format(\"-\" * 28))\n journey = travelling(root_path=root_path, find=find)\n root_path = journey.root_path\n current_working_directory = journey.cwd\n print(\"\"\"Input parameters:\n{}\"\"\".format(\"-\" * 23))\n print(\"root_path = {}\".format(root_path))\n print(\"find = {}\".format(find))\n print()\n print(\"\"\"Output:\n{}\"\"\".format(\"-\" * 23))\n print(\"Current Working Directory: {}\".format(current_working_directory))\n print()\n absolute_paths = journey.absolute_paths\n print(\"Absolute paths: {}\".format(absolute_paths))\n print()\n relative_paths = journey.relative_paths\n print(\"Relative paths: {}\".format(relative_paths))\n print()\n traveller = journey.travel\n print(\"Total files and directories available: {}\".format(len(traveller)))\n print()\n print(\"\"\"{}\"\"\".format(\"#\" * 75))", "def open_runfolder(\n folder,\n prefixes=\"diag\",\n merge_full=True,\n partial_prefixes=None,\n exclude=None,\n read_input_data=False,\n verb=False,\n):\n\n folder = Path(folder)\n\n data = parse_namelist(folder / \"data\", flat=True, silence_cast_errors=True)\n diags = {}\n if prefixes == \"diag\":\n try:\n diags = parse_namelist(\n folder / \"data.diagnostics\", silence_cast_errors=True\n )\n except FileNotFoundError:\n warn(\"File data.diagnostics not found - switching prefixes to all\")\n prefixes == \"all\"\n\n exclude = exclude or []\n partial_prefixes = partial_prefixes or []\n if verb:\n if exclude:\n print(\"Ne lira pas {}\".format(exclude))\n print(\"Delta T found: {}\".format(data[\"deltat\"]))\n\n if prefixes == \"all\" or prefixes == \"*\":\n all_prefixes = {\n data_file.stem.split(\".\")[0] for data_file in folder.glob(\"*.*.data\")\n }.difference(\n {\n \"pickup\",\n \"pickup_ptracers\",\n *exclude,\n *[prf for prf, _ in partial_prefixes],\n }\n )\n grouped_prefixes = {}\n for prefix in all_prefixes:\n iterations = tuple(\n int(filename.stem.split(\".\")[-1])\n for filename in sorted(folder.glob(prefix + \".*.data\"))\n )\n if len(iterations) > 1 and (len(iterations) > 2 or prefixes == \"all\"):\n grouped_prefixes.setdefault(iterations, []).append(prefix)\n prefixes = grouped_prefixes.values()\n elif prefixes == \"diag\":\n grouped_prefixes = {}\n partial_prefixes = []\n for name, lvls, frq, tph in zip_longest(\n diags[\"DIAGNOSTICS_LIST\"][\"filename\"],\n diags[\"DIAGNOSTICS_LIST\"].get(\"levels\", []),\n diags[\"DIAGNOSTICS_LIST\"][\"frequency\"],\n diags[\"DIAGNOSTICS_LIST\"][\"timephase\"],\n ):\n if isinstance(name, str) and name not in exclude:\n if lvls is None or len(lvls) == 0:\n grouped_prefixes.setdefault((frq, tph), []).append(name)\n else:\n partial_prefixes.append((name, np.array(lvls, dtype=int)))\n prefixes = grouped_prefixes.values()\n\n datasets = OrderedDict()\n for prefix in prefixes:\n if verb:\n print(\"Lecture de {}\".format(prefix))\n if isinstance(prefix, list):\n prf = prefix[0]\n else:\n prf = prefix\n datasets[prf] = open_mdsdataset(\n str(folder),\n geometry=\"cartesian\",\n delta_t=data[\"deltat\"],\n prefix=prefix,\n ignore_unknown_vars=True,\n )\n datasets[prf].attrs.update(\n source=\"Created from files {}*.data/meta\".format(prefix)\n )\n\n if merge_full:\n _, full = datasets.popitem(last=False)\n for dataset in datasets.values():\n full = full.merge(dataset)\n datasets = {\"full\": full}\n\n if len(datasets) > 0:\n first_prf, full = next(iter(datasets.items()))\n else: # No non-partial datasets, so we read the grid.\n first_prf = \"grid\"\n datasets[first_prf] = open_mdsdataset(\n str(folder),\n iters=None,\n prefix=\"\",\n geometry=\"cartesian\",\n delta_t=data[\"deltat\"],\n ignore_unknown_vars=True,\n )\n full = datasets[first_prf]\n\n datasets.update(\n {\n prf: open_partial(\n full, folder, prf, lvls, data[\"deltat\"], geometry=\"cartesian\"\n )\n for prf, lvls in partial_prefixes\n }\n )\n\n if read_input_data:\n binfiles = folder.glob(\"*.bin\")\n for binfile in binfiles:\n if verb:\n print(\"Lecture de {}.\".format(binfile.stem))\n try:\n bindata = readIEEEbin(str(binfile), (full.YC.size, full.XC.size))\n except ValueError:\n try:\n bindata = readIEEEbin(\n str(binfile), (full.Z.size, full.YC.size, full.XC.size)\n )\n except ValueError:\n if verb:\n print(\n \"Incapable de lire {varname}, on saute.\".format(\n varname=binfile.stem\n )\n )\n continue\n else:\n full = full.assign(\n {\n binfile.stem: xr.DataArray(\n bindata,\n dims=(\"XC\", \"YC\", \"Z\"),\n coords={\"XC\": full.XC, \"YC\": full.YC, \"Z\": full.Z},\n )\n }\n )\n else:\n full = full.assign(\n {\n binfile.stem: xr.DataArray(\n bindata,\n dims=(\"XC\", \"YC\"),\n coords={\"XC\": full.XC, \"YC\": full.YC},\n )\n }\n )\n datasets[first_prf] = full\n\n for dataset in datasets.values():\n for coord in dataset.coords.values():\n if coord.name.startswith(\"Z\"):\n coord.attrs.update(long_name=\"Depth\")\n\n return datasets", "def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"", "def _read_in_folder(dpath, cancer, idc):\n info('input folder is given, read in by cancer types one by one')\n\n all_exist = {x.split('.')[0]: os.path.join(dpath, x)\n for x in os.listdir(dpath)}\n need_cancer = _check_cancer(cancer, list(all_exist.keys()), dpath)\n mat = pd.DataFrame()\n for cancer in need_cancer:\n info('read in exp mat of %s' % cancer)\n # print(idc)\n mat_tmp = _read_in_file(all_exist[cancer], idc)\n mat = pd.concat([mat, mat_tmp]) # rbind\n del mat_tmp\n return(mat)", "def ReadRecipesFromDirectory(self, path: str) -> None:\n for file_path in glob.glob(os.path.join(path, '*.json')):\n self.ReadRecipeFromFile(file_path)", "def main(args, fLOG=print):\n try:\n from .parsers.folders import read_folder\n except ImportError: # pragma: no cover\n from manydataapi.parsers.folders import read_folder\n\n fcts = dict(read_folder=read_folder)\n from pyquickhelper.cli import cli_main_helper\n return cli_main_helper(fcts, args=args, fLOG=fLOG)", "def recurse(path):\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n if filename.endswith('.robot'):\n filepath = os.path.join(dirpath, filename)\n reformat(filepath)" ]
[ "0.63146096", "0.5627409", "0.558369", "0.55248845", "0.5507381", "0.54794055", "0.54717195", "0.541972", "0.5409548", "0.5396662", "0.53879046", "0.5381494", "0.53524697", "0.534816", "0.53151697", "0.5314741", "0.5300426", "0.5292671", "0.5287332", "0.52773887", "0.5202164", "0.5191799", "0.51481193", "0.5138563", "0.5122851", "0.51132154", "0.510957", "0.50929976", "0.5022671", "0.50219953" ]
0.6236096
1
Takes a tuple representing a circle as (x,y,radius) and returns a tuple with the x,y coordinates and width,size (x,y,w,h)
def circle_2_tuple(circle): assign_coord = lambda x,y: x - y if x > y else 0 x = assign_coord(circle[0],circle[2]) y = assign_coord(circle[1],circle[2]) assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) w = assign_size(circle[0],circle[2]) h = assign_size(circle[1],circle[2]) return (x,y,w,h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def circleInfo(r):\n c = 2 * 3.14159 * r\n a = 3.14159 * r * r\n return (c, a)", "def _resolve_size(self, width, height, center_x, center_y):\n if self.size_type == 'explicit':\n size_x, size_y = self.size\n size_x = percentage(size_x, width)\n size_y = percentage(size_y, height)\n return size_x, size_y\n left = abs(center_x)\n right = abs(width - center_x)\n top = abs(center_y)\n bottom = abs(height - center_y)\n pick = min if self.size.startswith('closest') else max\n if self.size.endswith('side'):\n if self.shape == 'circle':\n size_xy = pick(left, right, top, bottom)\n return size_xy, size_xy\n # else: ellipse\n return pick(left, right), pick(top, bottom)\n # else: corner\n if self.shape == 'circle':\n size_xy = pick(math.hypot(left, top), math.hypot(left, bottom),\n math.hypot(right, top), math.hypot(right, bottom))\n return size_xy, size_xy\n # else: ellipse\n corner_x, corner_y = pick(\n (left, top), (left, bottom), (right, top), (right, bottom),\n key=lambda a: math.hypot(*a))\n return corner_x * math.sqrt(2), corner_y * math.sqrt(2)", "def get_radius(size):\n return (size * 10) - 5", "def oncircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n # This beats normalizing incircle for all sizes, even though that\n # should be the superior algorithm for compiled code.\n theta = 2.*pi * random(size + (1,))\n return concatenate((cos(theta), sin(theta)), axis=-1)", "def yolo_coords(self, \n img_size: \"tuple[int, int]\"\n ) -> \"tuple[float, float, float, float]\":\n img_w, img_h = img_size\n return self.xmid / img_w, self.ymid / img_h, self.width / img_w, self.height / img_h", "def get_circle_coords(self, radius, divider, count,center_x, center_y):\n\n angle_deg = (360/divider)*count\n angle = radians(angle_deg-(90 + (360/divider)))\n x = radius*cos(angle) + center_x;\n y = radius*sin(angle) + center_y;\n return (int(x), int(y))", "def shape(self) -> tuple[int, int]:\n return self.height, self.width", "def circleArea(radius):\n return math.pi * radius * radius", "def size(self) -> Tuple[int, int]:\n return (self.width, self.height)", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle", "def random_shape(height, width):\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)", "def createCircle(self, x, y, radius):\n # TODO (#2398) fix this to be top left coordinates, width, height\n return QtCore.QRectF(\n int(x - radius), int(y - radius), int(radius * 2), int(radius * 2)\n )", "def generate_circle(R,center,N=100,t0=0.0,t1=2.0*np.pi):\r\n theta = np.linspace(t0,t0+t1,N)\r\n y = R*np.sin(theta) + center[1]\r\n x = R*np.cos(theta) + center[0]\r\n return x,y", "def find_center(r):\n cx=r.corner.x+(r.width/2)\n cy=r.corner.y+(r.height/2)\n return cx,cy", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)", "def GetCircle(circle):\r\n pass", "def circle(r=0):\n\tteta = 2*pi*random()\n\tx = (r+1)*cos(teta) + L//2\n\ty = (r+1)*sin(teta) + L//2\n\t\n\ti = int(x) + 1\n\tj = int(y) + 1\n\tprint(r)\n\treturn i,j", "def size(self):\n return (self.width, self.height)", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def pointOnCircle(cx, cy, radius, angle):\n angle = math.radians(angle) - (math.pi / 2)\n x = cx + radius * math.cos(angle)\n if x < cx:\n x = math.ceil(x)\n else:\n x = math.floor(x)\n\n y = cy + radius * math.sin(angle)\n\n if y < cy:\n y = math.ceil(y)\n else:\n y = math.floor(y)\n\n return (int(x), int(y))", "def get_circle_coords(center, r):\n circle = [[r, 180* phi/3.14159265] for phi in range(0, 180, 5)]\n circle = [pol2cart(p[0], p[1]) + (center[0], center[1]) for p in circle]\n return circle", "def calculatesize(self, size):\n wsize = self.layout.size\n x = (wsize[0] * size[0]) / 100\n y = (wsize[1] * size[1]) / 100\n return x, y", "def get_width_and_height_from_size(x):\n if isinstance(x, int):\n return x, x\n if isinstance(x, list) or isinstance(x, tuple):\n return x\n else:\n raise TypeError()", "def circle_point(radius, phi):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n x = radius * cos(radians(phi))\n y = radius * sin(radians(phi))\n z = 0\n\n return x, y, z", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def discretized_circle(radius, n_pts):\n x1 = np.zeros(n_pts)\n y1 = np.zeros(n_pts)\n for i in range(0, n_pts):\n x1[i] = np.cos(2 * np.pi / n_pts * i) * radius\n y1[i] = np.sin(2 * np.pi / n_pts * i) * radius\n\n x2 = np.roll(x1, -1)\n y2 = np.roll(y1, -1)\n return x1, y1, x2, y2", "def incircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n n = int(prod(size))\n if n < 330:\n # For small n, interpreted overhead dominates. Using sin and cos\n # results in fewer interpreted instructions than rejection method.\n # Compiled code should never use this algorithm.\n t, z = random((2,) + size + (1,))\n t *= 2. * pi\n return sqrt(z) * concatenate((cos(t), sin(t)), axis=-1)\n # Beats this slightly:\n # xy = standard_normal(size + (2,))\n # return xy * expm1(-0.5 * (xy*xy).sum(axis=-1, keepdims=True))\n # For large n, higher intrinsic cost of sin and cos compared to\n # rejection method dominates, and it is worth taking a few more\n # interpreted instructions to benefit from the superior algorithm.\n nmore = n\n p = []\n fac = 4./pi # 1/prob random point in unit circle\n while nmore > 0: # Odds of needing another pass < 0.0001.\n m = int((nmore + 5.*sqrt(nmore))*fac)\n q = 2.*random((m, 2)) - 1.\n q = q[(q * q).sum(axis=-1) < 1., :]\n p.append(q)\n nmore -= len(q)\n return concatenate(p)[:n].reshape(size + (2,))", "def size(self):\n return (self.width(), self.height())" ]
[ "0.6815439", "0.67740446", "0.6597744", "0.64084023", "0.63581634", "0.6175593", "0.6125594", "0.6088099", "0.6076769", "0.60566986", "0.6024376", "0.5960171", "0.5957911", "0.5952948", "0.59458065", "0.5938926", "0.5935301", "0.59228104", "0.59222513", "0.5917145", "0.58829415", "0.5880527", "0.5867597", "0.58658355", "0.58645207", "0.5827263", "0.5797308", "0.5796451", "0.57907784", "0.57874787" ]
0.82615507
0
Takes a tuple representing a circle as (x,y,radius) and returns a tuple represeting a bbox ((x,y),(x',y'))
def circle_2_bbox(circle): x,y,w,h = circle_2_tuple(circle) return ((x,y),(x+w,y+h))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_tuple(circle):\n assign_coord = lambda x,y: x - y if x > y else 0\n x = assign_coord(circle[0],circle[2])\n y = assign_coord(circle[1],circle[2])\n\n assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) \n w = assign_size(circle[0],circle[2])\n h = assign_size(circle[1],circle[2])\n return (x,y,w,h)", "def bbox(\n bbox: Tuple[Coordinate, Coordinate] = ((-1.0, -1.0), (3.0, 4.0)),\n layer: Tuple[int, int] = (1, 0),\n top: float = 0,\n bottom: float = 0,\n left: float = 0,\n right: float = 0,\n) -> gf.Component:\n D = gf.Component()\n (xmin, ymin), (xmax, ymax) = bbox\n points = [\n [xmin - left, ymin - bottom],\n [xmax + right, ymin - bottom],\n [xmax + right, ymax + top],\n [xmin - left, ymax + top],\n ]\n D.add_polygon(points, layer=layer)\n return D", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def bbox_rel(*xyxy):\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\n \n x_c = (xyxy[0].item() + xyxy[2].item()) /2\n y_c = (xyxy[1].item() + xyxy[3].item()) /2\n\n w = bbox_w\n h = bbox_h\n return x_c, y_c, w, h", "def make_yolo_bbox(width, height, x1, y1, x2, y2):\n x1, y1 = x1 / width, y1 / height\n x2, y2 = x2 / width, y2 / height\n w = (x2 - x1) \n h = (y2 - y1) \n center_x = x1 + w/2\n center_y = y1 + h/2\n \n return center_x, center_y, w, h", "def get_yolo_bbox(width, height, c_x, c_y, w_r, h_r):\n x1 = int((c_x - w_r/2) * width)\n y1 = int((c_y - h_r/2) * height)\n\n x2 = int((c_x + w_r/2) * width)\n y2 = int((c_y + h_r/2) * height)\n\n\n p_leftEnd = x1, y1\n p_rightEnd = x2, y2\n\n return p_leftEnd, p_rightEnd", "def bounding_box(primitive):\n\n if primitive[\"shape\"] == \"circle\":\n bbox = [[primitive[\"center\"][0] - primitive[\"radius\"],\n primitive[\"center\"][1] - primitive[\"radius\"]],\n [primitive[\"center\"][0] + primitive[\"radius\"],\n primitive[\"center\"][1] + primitive[\"radius\"]]]\n else:\n x_coords, y_coords = zip(*primitive[\"vertices\"])\n bbox = [[min(x_coords), min(y_coords)],\n [max(x_coords), max(y_coords)]]\n\n primitive[\"bounding_box\"] = bbox\n return primitive", "def bbox(bbox = [(-1, -1), (3, 4)], layer = 0):\n D = Device(name = 'bbox')\n (a,b), (c,d) = bbox\n points = ((a,b), (c,b), (c,d), (a,d))\n D.add_polygon(points, layer = layer)\n return D", "def bbox_rel(*xyxy):\n bbox_left = min([xyxy[0].item(), xyxy[2].item()])\n bbox_top = min([xyxy[1].item(), xyxy[3].item()])\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\n x_c = (bbox_left + bbox_w / 2)\n y_c = (bbox_top + bbox_h / 2)\n w = bbox_w\n h = bbox_h\n return x_c, y_c, w, h", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def bbox_center(bbox):\n y, x, h, w = bbox\n return int(y + h/2), int(x + w/2)", "def get_center_point(bbox):\n x_middle = 42\n y_middle = 42\n\n # HINT: bbox.xmin, bbox,xmax, bbox.ymin, bbox.ymax\n return (x_middle, y_middle)", "def bbox(self, node):\n node_id = node.get('id')\n #inkex.utils.debug(\"Check if \" + str(node_id) + \" is in \" + str(self.node_info))\n info = self.node_info[node_id] \n \n x = info.x\n y = info.y\n width = info.width\n height = info.height\n\n return Box(Point(x, y),\n Point(x + width, y),\n Point(x + width, y + height),\n Point(x, y + height))", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def get_bounding_box(uv_coor, shape):\r\n\txmin = ymin = 99999\r\n\txmax = ymax = 0\r\n\tfor x, y in uv_coor:\r\n\t\txmin = min(xmin, int(x))\r\n\t\txmax = max(xmax, int(x))\r\n\t\tymin = min(ymin, int(y))\r\n\t\tymax = max(ymax, int(y))\r\n\txmin = max(0, xmin - 20)\r\n\tymin = max(0, ymin - 20)\r\n\r\n\txmax = min(shape[1], xmax + 20)\r\n\tymax = min(shape[0], ymax + 20)\r\n\r\n\treturn xmin, xmax, ymin, ymax", "def pointgraph_from_circle(fitting):\n diameter = fitting.diameter\n radius = diameter / 2.0\n y, x = fitting.center\n y -= radius\n x -= radius\n return bounding_box((y, x), (y + diameter, x + diameter))", "def coord_center2corner(bbox):\n\n x, y = bbox.new([bbox[0]]), bbox.new([bbox[1]])\n w, h = bbox.new([bbox[2]]), bbox.new([bbox[3]])\n x1 = x - torch.floor(w / 2)\n y1 = y - torch.floor(h / 2)\n x2 = x + torch.floor(w / 2)\n y2 = y + torch.floor(h / 2)\n\n return x1, y1, x2, y2", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def bounding_box(coords):\n min_x = min(coords, key = lambda p: p[0])[0]\n min_y = min(coords, key = lambda p: p[1])[1]\n max_x = max(coords, key = lambda p: p[0])[0]\n max_y = max(coords, key = lambda p: p[1])[1]\n print(min_x)\n print(min_y)\n print(max_x)\n print(max_y)\n return (min_x, max_y), (max_x, min_y)", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def bbox(x):\n if ispoint(x):\n return pointbbox(x)\n elif isline(x):\n return linebbox(x)\n elif isarc(x):\n return arcbbox(x)\n elif ispoly(x):\n return polybbox(x)\n elif isgeomlist(x):\n return geomlistbbox(x)\n else:\n raise ValueError(\"inappropriate type for bbox(): \",format(x))", "def get_rand_bbox_coord(\n w: int, h: int, len_ratio: float\n) -> Tuple[Tuple[int, int], Tuple[int, int]]:\n size_hole_w = int(len_ratio * w)\n size_hole_h = int(len_ratio * h)\n x = random.randint(0, w) # [0, w]\n y = random.randint(0, h) # [0, h]\n\n x0 = max(0, x - size_hole_w // 2)\n y0 = max(0, y - size_hole_h // 2)\n x1 = min(w, x + size_hole_w // 2)\n y1 = min(h, y + size_hole_h // 2)\n return (x0, y0), (x1, y1)", "def get_bbox(self) -> Tuple[Vec, Vec]:\n if self.is_brush():\n bbox_min, bbox_max = self.solids[0].get_bbox()\n for s in self.solids[1:]:\n side_min, side_max = s.get_bbox()\n bbox_max.max(side_max)\n bbox_min.min(side_min)\n return bbox_min, bbox_max\n else:\n origin = self.get_origin()\n # the bounding box is 0x0 large for a point ent basically\n return origin, origin.copy()", "def boundingBox(points):\n min_x, min_y = float('inf'), float('inf')\n max_x, max_y = float('-inf'), float('-inf')\n for x, _, y in points:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n\n return (min_x, min_y), (max_x, min_y), (max_x, max_y), (min_x, max_y)", "def get_bbox(self, crs=None):\n if len(self) != 0:\n x, y = self.get_coordinate_lists(crs=crs)\n return (min(x), min(y), max(x), max(y))\n else:\n return (np.nan, np.nan, np.nan, np.nan)", "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def bbox_rel(self, *xyxy):\r\n bbox_left = min([xyxy[0].item(), xyxy[2].item()])\r\n bbox_top = min([xyxy[1].item(), xyxy[3].item()])\r\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\r\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\r\n x_c = (bbox_left + bbox_w / 2)\r\n y_c = (bbox_top + bbox_h / 2)\r\n w = bbox_w\r\n h = bbox_h\r\n return x_c, y_c, w, h", "def fix_bbox(bbox,img_shape):\n x = min(bbox[1][0],img_shape[1])\n y = min(bbox[1][1],img_shape[0])\n return ((bbox[0]),(x,y))" ]
[ "0.7212098", "0.6748863", "0.6743238", "0.6730478", "0.67082477", "0.66678756", "0.66592455", "0.66318727", "0.6586817", "0.65842336", "0.6532223", "0.6481017", "0.6468795", "0.6422326", "0.6373362", "0.63589585", "0.635091", "0.6347281", "0.6332991", "0.63162756", "0.63071877", "0.63071877", "0.630461", "0.62914133", "0.62557244", "0.62502337", "0.624417", "0.6222251", "0.6218008", "0.61937594" ]
0.87923753
0
Takes a tuple of tuples represeting a bbox ((x,y),(x',y')) and returns
def fix_bbox(bbox,img_shape): x = min(bbox[1][0],img_shape[1]) y = min(bbox[1][1],img_shape[0]) return ((bbox[0]),(x,y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def bbox2points(bbox):\r\n l, x, y, w, h = bbox\r\n xmin = int(round(x - (w / 2)))\r\n xmax = int(round(x + (w / 2)))\r\n ymin = int(round(y - (h / 2)))\r\n ymax = int(round(y + (h / 2)))\r\n return (l, xmin, ymin, xmax, ymax)", "def bbox(\n bbox: Tuple[Coordinate, Coordinate] = ((-1.0, -1.0), (3.0, 4.0)),\n layer: Tuple[int, int] = (1, 0),\n top: float = 0,\n bottom: float = 0,\n left: float = 0,\n right: float = 0,\n) -> gf.Component:\n D = gf.Component()\n (xmin, ymin), (xmax, ymax) = bbox\n points = [\n [xmin - left, ymin - bottom],\n [xmax + right, ymin - bottom],\n [xmax + right, ymax + top],\n [xmin - left, ymax + top],\n ]\n D.add_polygon(points, layer=layer)\n return D", "def bbox(bbox = [(-1, -1), (3, 4)], layer = 0):\n D = Device(name = 'bbox')\n (a,b), (c,d) = bbox\n points = ((a,b), (c,b), (c,d), (a,d))\n D.add_polygon(points, layer = layer)\n return D", "def bounding_box(coords):\n min_x = min(coords, key = lambda p: p[0])[0]\n min_y = min(coords, key = lambda p: p[1])[1]\n max_x = max(coords, key = lambda p: p[0])[0]\n max_y = max(coords, key = lambda p: p[1])[1]\n print(min_x)\n print(min_y)\n print(max_x)\n print(max_y)\n return (min_x, max_y), (max_x, min_y)", "def bbox_rel(*xyxy):\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\n \n x_c = (xyxy[0].item() + xyxy[2].item()) /2\n y_c = (xyxy[1].item() + xyxy[3].item()) /2\n\n w = bbox_w\n h = bbox_h\n return x_c, y_c, w, h", "def convert_bbox(bbox, width, height):\n min_x, min_y, max_x, max_y = bbox\n # scale X axis\n min_x *= width\n max_x *= width\n # invert Y axis and scale\n min_y = (1 - min_y) * height\n max_y = (1 - max_y) * height\n\n return min_x, min_y, max_x, max_y", "def coords_to_bbox(coords):\n min_y, min_x, max_y, max_x = coords[0].min(), coords[1].min(), coords[0].max(), coords[1].max()\n return min_y, min_x, max_y - min_y, max_x - min_x", "def bbox_rel(*xyxy):\n bbox_left = min([xyxy[0].item(), xyxy[2].item()])\n bbox_top = min([xyxy[1].item(), xyxy[3].item()])\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\n x_c = (bbox_left + bbox_w / 2)\n y_c = (bbox_top + bbox_h / 2)\n w = bbox_w\n h = bbox_h\n return x_c, y_c, w, h", "def points_to_bbox(p):\n llx = urx = p[0][0]\n lly = ury = p[0][1]\n for x in p[1:]:\n if x[0] < llx:\n llx = x[0]\n elif x[0] > urx:\n urx = x[0]\n if x[1] < lly:\n lly = x[1]\n elif x[1] > ury:\n ury = x[1]\n return llx, lly, urx, ury", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def xywh_xyxy(boxes):\n bbox = np.zeros(boxes.shape)\n bbox[:, 0] = boxes[:, 0] \n bbox[:, 1] = boxes[:, 1] \n bbox[:, 2] = boxes[:, 0] + 1 * boxes[:, 2]\n bbox[:, 3] = boxes[:, 1] + 1 * boxes[:, 3]\n return bbox", "def get_bbox(self) -> Tuple[Vec, Vec]:\n if self.is_brush():\n bbox_min, bbox_max = self.solids[0].get_bbox()\n for s in self.solids[1:]:\n side_min, side_max = s.get_bbox()\n bbox_max.max(side_max)\n bbox_min.min(side_min)\n return bbox_min, bbox_max\n else:\n origin = self.get_origin()\n # the bounding box is 0x0 large for a point ent basically\n return origin, origin.copy()", "def bboxtransform(bbox):\n gta = np.zeros((len(bbox),4))\n for i in range(len(bbox)):\n cx = bbox[i,0]\n cy = bbox[i,1]\n w = bbox[i,2]\n h = bbox[i,3]\n gta[i,0] = cx - (w / 2.)\n gta[i,1] = cy - (h / 2.)\n gta[i,2] = cx + (w / 2.)\n gta[i,3] = cy + (h / 2.)\n return gta", "def _box2d_to_bbox(pg_box2d: str) -> Tuple[float, float, float, float]:\n m = _BOX2D_PATTERN.match(pg_box2d)\n if m is None:\n raise RuntimeError(f\"Unexpected postgis box syntax {pg_box2d!r}\")\n\n # We know there's exactly four groups, but type checker doesn't...\n # noinspection PyTypeChecker\n return tuple(float(m) for m in m.groups())", "def bbox_center(bbox):\n y, x, h, w = bbox\n return int(y + h/2), int(x + w/2)", "def normalize_bbox(bbox: TBox, rows: int, cols: int) -> TBox:\n\n if rows <= 0:\n raise ValueError(\"Argument rows must be positive integer\")\n if cols <= 0:\n raise ValueError(\"Argument cols must be positive integer\")\n\n tail: Tuple[Any, ...]\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n\n x_min, x_max = x_min / cols, x_max / cols\n y_min, y_max = y_min / rows, y_max / rows\n\n return cast(BoxType, (x_min, y_min, x_max, y_max) + tail) # type: ignore", "def bbox(self):\n return [self._x0, self._y0, self._x1, self._y1]", "def polybbox(a):\n if len(a) == 0:\n return False\n elif len(a) == 1:\n return pointbbox(a[0])\n else:\n minx = maxx = a[0][0]\n miny = maxy = a[0][1]\n for i in range(1,len(a)):\n x=a[i][0]\n y=a[i][1]\n if x < minx:\n minx =x\n elif x > maxx:\n maxx = x\n if y < miny:\n miny = y\n elif y > maxy:\n maxy = y\n return [ point(minx,miny),point(maxx,maxy)]", "def make_yolo_bbox(width, height, x1, y1, x2, y2):\n x1, y1 = x1 / width, y1 / height\n x2, y2 = x2 / width, y2 / height\n w = (x2 - x1) \n h = (y2 - y1) \n center_x = x1 + w/2\n center_y = y1 + h/2\n \n return center_x, center_y, w, h", "def bbox_vflip(bbox: BoxInternalType, rows: int, cols: int) -> BoxInternalType: # skipcq: PYL-W0613\n x_min, y_min, x_max, y_max = bbox[:4]\n return x_min, 1 - y_max, x_max, 1 - y_min", "def get_boundingbox(self):\n tile_iterator = iter(self)\n (coordinate,tile) = next(tile_iterator)\n assert(tile is not None)\n min_x = coordinate[0]\n max_x = min_x + 1\n min_y = coordinate[1]\n max_y = min_y + 1\n\n for (coordinate,tile) in tile_iterator:\n\n if coordinate[0] < min_x:\n min_x = coordinate[0]\n if coordinate[0]+1> max_x:\n max_x = coordinate[0] +1\n if coordinate[1] < min_y:\n min_y = coordinate[1]\n if coordinate[1]+1> max_y:\n max_y = coordinate[1] +1\n\n return ((min_x, min_y), (max_x, max_y))", "def xy_to_bbox(xy, buffer=None):\n if buffer is None:\n buffer = 0\n x0 = xy[:, 0].min() - buffer\n y0 = xy[:, 1].min() - buffer\n x1 = xy[:, 0].max() + buffer\n y1 = xy[:, 1].max() + buffer\n return [x0, y0, x1, y1]", "def api_bbox(bbox, srid=None, buffer=0.0):\n srid = srid or settings.SRID\n wkt_box = 'POLYGON(({0} {1}, {2} {1}, {2} {3}, {0} {3}, {0} {1}))'\n wkt = wkt_box.format(*bbox)\n native = wkt_to_geom(wkt, srid_from=srid)\n if srid != API_SRID:\n native.transform(API_SRID)\n if buffer > 0:\n extent = native.extent\n width = extent[2] - extent[0]\n native = native.buffer(width * buffer)\n return tuple(native.extent)", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def rect(coords : Tuple[int, int]) -> Tuple[int, int, int, int]:\n min_x = min([x for x, _ in coords])\n max_x = max([x for x, _ in coords])\n min_y = min([y for _, y in coords])\n max_y = max([y for _, y in coords])\n\n return (min_x, max_x, min_y, max_y)", "def optimize_bbox(img_shape,\n bbox,\n edge_width=8):\n (rows,columns) = img_shape\n (x1,y1,x2,y2) = bbox\n\n return max(0,x1-edge_width),max(0,y1-edge_width),min(rows-1,x2+edge_width),min(columns-1,y2+edge_width)", "def boundingBox(points):\n min_x, min_y = float('inf'), float('inf')\n max_x, max_y = float('-inf'), float('-inf')\n for x, _, y in points:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n\n return (min_x, min_y), (max_x, min_y), (max_x, max_y), (min_x, max_y)" ]
[ "0.74169517", "0.7330232", "0.73051816", "0.7260692", "0.72117823", "0.71556735", "0.711998", "0.70630515", "0.6968945", "0.6965542", "0.6959953", "0.68821084", "0.68737143", "0.68725014", "0.6858501", "0.68244123", "0.67616284", "0.67497444", "0.67070234", "0.66811466", "0.66633505", "0.6655659", "0.6646154", "0.6633142", "0.66301894", "0.6626409", "0.66137046", "0.6602287", "0.659943", "0.65884906" ]
0.75615424
0
Draws bboxes in a image given an array of circles [(x,y,radius)]
def bbox_from_circle(img, circles): seg_imgs = [] bboxes = [] aux = img.copy() for i,el in enumerate(circles): bbox = circle_2_bbox(el['coord']) bbox = fix_bbox(bbox,aux.shape) cv.rectangle(aux,bbox[0],bbox[1],(0,255,0)) bboxes.append(bbox) return bboxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bboxes(img, bboxes, color=(0, 0, 255), thick=6):\n draw_img = np.copy(img)\n # Draw rectangles given bbox coordinates as opposing coordinates\n # bboxes = opposing coordinates: (x1,y1), (x2,y2)\n [cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick) for bbox in bboxes]\n return draw_img", "def show_centre_of_bbox(self, image, objects):\n for obj in objects:\n image = cv2.circle(image, \n (int(obj.centre_cords[0] * self.x), int(obj.centre_cords[1] * self.y)), \n radius=5, \n color=AXE_COLOR, \n thickness=-1)\n \n return image", "def draw_boxes(image, bboxes, color=(0., 0., 1.0), thick=6):\n # make a copy of the image\n draw_img = np.copy(image)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img", "def drawbboxes(img, bboxes, labels):\n thickness = 5\n color = (0, 255, 0)\n for bbox in bboxes:\n # top-left is x1, y1; bottom-right is x2,y2\n x1, y1, x2, y2, prob, category = (\n int(bbox[0]),\n int(bbox[1]),\n int(bbox[2]),\n int(bbox[3]),\n round(bbox[4], 2),\n labels[int(bbox[5])],\n )\n img = cv.rectangle(img, (x1, y1), (x2, y2), color, thickness)\n img = cv.putText(\n img,\n f\"Label: {category} ({prob})\",\n (x1, y1 - 10),\n 0,\n 0.5,\n color,\n thickness // 3,\n )\n return img", "def draw_boxes_on_image(img, bboxes, color=(0, 0, 1), thick=6):\n imcopy = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n\n return imcopy", "def draw_boxes(bboxes: [[int]], img: 'np.array', line_width: int=2) -> 'np.array':\n for x, y, w, h in bboxes:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), line_width)\n return img", "def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n imcopy = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return imcopy", "def draw_bboxes(img, bboxes):\n colors = tf.cast(np.array([[1, 0, 0, 1]] * 10), dtype=tf.float32)\n img_with_bounding_boxes = tf.image.draw_bounding_boxes(\n img,\n bboxes,\n colors\n )\n plt.figure()\n plt.imshow(img_with_bounding_boxes[0])\n plt.show()", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def draw_image_bboxes(pixel_candidates, gt_candidate, detection_candidate):\n fig, ax = plt.subplots()\n ax.imshow(pixel_candidates, cmap='gray')\n\n for candidate in detection_candidate:\n minc, minr, maxc, maxr = candidate\n rect = mpatches.Rectangle((minc, minr), maxc - minc + 1, maxr - minr + 1, fill=False, edgecolor='red', linewidth=2)\n ax.add_patch(rect)\n\n for candidate in gt_candidate:\n minc, minr, maxc, maxr = candidate\n rect = mpatches.Rectangle((minc, minr), maxc-minc+1, maxr-minr+1, fill=False, edgecolor='green', linewidth=2)\n ax.add_patch(rect)\n\n #plt.show()", "def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)", "def draw_bboxes_withindex(img,boxes, uids):\n source = Image.fromarray(img)\n draw = ImageDraw.Draw(source)\n w2,h2 = (img.shape[0],img.shape[1])\n \n font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSerif.ttf', 40)\n #font = ImageFont.truetype('arial.ttf', 24)\n\n\n idx = 0\n\n for b in boxes:\n xmin,ymin,xmax,ymax = b\n \n for j in range(3):\n draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline=\"red\")\n draw.text((xmin+20, ymin+70), str(uids[idx]), font = font)\n idx +=1\n return source", "def draw_rafts(img_bgr, rafts_loc, rafts_radii, num_of_rafts):\n\n circle_thickness = int(2)\n circle_color = (0, 0, 255) # openCV: BGR\n\n output_img = img_bgr\n for raft_id in np.arange(num_of_rafts):\n output_img = cv.circle(output_img, (rafts_loc[raft_id, 0], rafts_loc[raft_id, 1]), rafts_radii[raft_id],\n circle_color, circle_thickness)\n\n return output_img", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def draw_boxes(img, paths, exit_masks=[]):\r\n for path in paths:\r\n contour, centroid = path[-1][:2]\r\n # DONT DRAW IF VEHICLE EXITS\r\n if vehicle_exits(centroid, exit_masks): continue\r\n x, y, w, h = contour\r\n\r\n # DRAW RECTANGLE AND CIRCLE DENOTING THE BOUNDARY AND CENTROID OF VEHICLE\r\n cv2.rectangle(img, (x, y), (x + w - 1, y + h - 1),BOUNDING_BOX_COLOUR, 1)\r\n cv2.circle(img, centroid, 2, CENTROID_COLOUR, -1)\r\n return img", "def draw_bounding_boxes_on_image_array(image, boxes, color=[], thickness=5):\n\n draw_bounding_boxes_on_image(image, boxes, color, thickness)\n\n return image", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)", "def draw_rubiks_points(disp_image, obj, color, radius=3, thickness=1):\n\tcv2.circle(disp_image, obj, radius, color=color, thickness=thickness)", "def circle(draw, bbox, thickness=4, loops=2, fill=(255,0,0)):\n offset = 0\n x1, y1, x2, y2 = bbox\n w, h = x2 - x1, y2 - y1\n x_c, y_c = x1 + w/2, y1 + h/2\n rot = noise(0.6)\n a, b = w, h\n for loop in range(loops):\n for r in np.arange(0, 2*pi + random.random(), 1/(max(w, h))):\n offset += noise()\n for i in range(thickness):\n x, y = ellipse_pt(r, x_c, y_c, a+i+offset, b+i+offset, rot)\n draw.point((x,y), fill=fill)\n a, b = a + 1, b + 1", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img", "def show_2dboxes(im, bdbs, color_list=[], random_color=True, scale=1.0):\n plt.cla()\n plt.axis('off')\n plt.imshow(im)\n for i, bdb in enumerate(bdbs):\n if bdb is None:\n continue\n bbox = np.array([bdb['x1'], bdb['y1'], bdb['x2'], bdb['y2']]) * scale\n if random_color is False:\n color = color_list[i]\n else:\n color = (rand(), rand(), rand())\n rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=2.5)\n plt.gca().add_patch(rect)\n plt.gca().text(bbox[0], bbox[1], '{:s}'.format(bdb['classname']), bbox=dict(facecolor=color, alpha=0.5), fontsize=9, color='white')\n plt.show()\n return im", "def draw_boxes(image, bounds, color):\n draw = ImageDraw.Draw(image)\n\n for bound in bounds:\n draw.polygon([\n bound.vertices[0].x, bound.vertices[0].y,\n bound.vertices[1].x, bound.vertices[1].y,\n bound.vertices[2].x, bound.vertices[2].y,\n bound.vertices[3].x, bound.vertices[3].y], None, color)\n # font = ImageFont.truetype(\"sans-serif.ttf\", 10)\n draw.text((bound.vertices[0].x, bound.vertices[0].y,),bound,(255,255,255),font=font)\n return image", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzero_y = np.array(nonzero[0])\n nonzero_x = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzero_x), np.min(nonzero_y)), (np.max(nonzero_x), np.max(nonzero_y)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img", "def obstacles_form(self,image):\r\n major_axis=60\r\n minor_axis=30\r\n c_y=246\r\n c_x=145\r\n c_y1=90\r\n c_x1=70\r\n radius=35\r\n for i in range(len(image)):\r\n for j in range(len(image[0])):\r\n\r\n #self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)\r\n self.circle(image,100,i,j,200,200)\r\n self.circle(image,100,i,j,800,200)\r\n #self.slanted_rect(image,i,j)\r\n self.boundary(image,i,j)\r\n self.boundary1(image,i,j)\r\n self.boundary2(image,i,j)\r\n self.c_shape(image,i,j)\r\n #exploration.c_shape(image,i,j)\r", "def draw_balls():\n for ball in balls:\n circle(screen, ball[0], (ball[1], ball[2]), ball[3]) # Unpacking the list\n for super_ball in super_balls:\n\n # Indexes here are standing for attributes of a particular ball\n circle(screen, super_ball[0][0], (super_ball[1], super_ball[2]), super_ball[3])\n circle(screen, super_ball[0][1], (super_ball[1], super_ball[2]), super_ball[3] - 10)\n circle(screen, super_ball[0][2], (super_ball[1], super_ball[2]), super_ball[3] - 20)", "def paint_circle(image,position_x,position_y,size,color = [0,255,0]):\r\n angles = 360\r\n step = math.pi/angles *2\r\n output = image.copy()\r\n for i in range(angles):\r\n angle = i*step\r\n point_x = int(position_x+size*math.cos(angle))\r\n point_y = int(position_y+size*math.sin(angle))\r\n if point_x>1 and point_x<len(image)-1 and point_y>1 and point_y<len(image[0])-1:\r\n output[point_x][point_y]=color\r\n output[point_x+1][point_y]=color\r\n output[point_x-1][point_y]=color\r\n output[point_x][point_y-1]=color\r\n output[point_x][point_y+1]=color\r\n return output", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cards\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img" ]
[ "0.68533266", "0.68072176", "0.6805508", "0.6788925", "0.676972", "0.6738393", "0.67133397", "0.664385", "0.66165227", "0.6587222", "0.6578446", "0.65585065", "0.6551722", "0.65482426", "0.6528621", "0.65220505", "0.64468735", "0.64413995", "0.6400262", "0.6379862", "0.63767797", "0.63521916", "0.6334738", "0.63332164", "0.6329452", "0.6326719", "0.6309995", "0.6309326", "0.62990314", "0.62686443" ]
0.73344976
0
Calculate heterozygosity samples = list of sample names vcf = VCF file
def calHet( inFile, varType ): names = [] print("Sample\tfracHet\thetCt\thomCt") # print header with open( inFile, 'r') as files: # open sample name file for i in files: i = i.rstrip() vcf = i + "." + varType + ".vcf" with open( vcf, 'r' ) as data: hom = 0.0 # count homozygous sites het = 0.0 # count heterozygous sites fractionHet = 0.0 # fraction heterozygous for var in data: if var.startswith("#"): # skip header continue else: var = var.rstrip() line = var.split("\t") stats = line[9].split(':') # alleles = list( map( int, stats[1].split(',') ) ) # create list of allele counts check = [ i for i in alleles if i > 0] # put any counts > 0 into a list if not check: # if all allele counts == 0 continue # all alleles are set to zero wtf? Result of a quality score that is low. elif len(check) > 1: # multiple allele counts , must be heterozygous het += 1 # more than one allele elif len(check) == 1: # only one allele has a count hom += 1 #print("%s\t%s\t%s\t%s\t%s\t%s" %(i, line[0], line[1], stats[0], stats[1], check ) ) if hom == 0: fractionHet = 100 else: fractionHet = het/(hom + het) # calculate fraction heterozygous print("%s\t%f\t%f\t%f" %(i, fractionHet, het,hom )) files.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vcf_samples(vcffile):\n try:\n vcf_reader = vcf.Reader(open(vcffile, 'r'))\n return vcf_reader.samples\n except Exception as error:\n print(f\"Could not read vcffile {vcffile}: continuing without vcf data: {str(error)}\")\n\n return []", "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def available_samples(vcf_path):\n return _header_from_vcf(vcf_path)[9:]", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'", "def count_variants(vcf_list, sample_list):\n\n df_lst = []\n\n sample_vcf_dct = dict(zip(sample_list,vcf_list))\n\n for s in sample_vcf_dct.keys():\n\n vcf_in = sample_vcf_dct[s]\n vcf = VariantFile(vcf_in)\n\n snv = 0\n indel = 0\n\n for rec in vcf:\n\n ref_len = len(rec.ref)\n\n for a in rec.alts:\n if len(a) > 1 or ref_len > 1:\n indel +=1\n else:\n snv +=1\n\n df_lst.append([s,snv,indel])\n\n out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels'])\n\n return out_df", "def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'", "def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)", "def extract_surf_samples(filename, samples, upright = False):\n\tif samples.ndim != 2 or samples.shape[1] != 4 : raise ValueError(\"Bad shape for 'samples' array\")\n\tnsamples = samples.shape[0]\n\tdescriptors = np.zeros((nsamples,64),'float64')\n\t_lib.extract_surf_samples(filename,nsamples,samples,upright,descriptors)\n\treturn descriptors", "def getQVsForComphetModel(comphetVariantsFilename, caseNames, controlNames):\r\n\r\n\t# If we have a sample file, then we have everyone's names:\r\n\tif len(caseNames) != 0 or len(controlNames) != 0:\r\n\r\n\t\tcaseCounts = {name: set() for name in caseNames}\r\n\t\tcontrolCounts = {name: set() for name in controlNames}\r\n\r\n\t\tvariantIDs = {\"case\": caseCounts, \"ctrl\": controlCounts}\r\n\r\n\t# Otherwise, we work just from the genotypes file and get names from there as we go.\r\n\telse:\r\n\t\tvariantIDs = {\"case\": defaultdict(set), \"ctrl\": defaultdict(set)}\r\n\r\n\treader = csv.reader(open(comphetVariantsFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor line in reader:\r\n\r\n\t\tline = dict(zip(header, line))\r\n\r\n\t\tcaseOrControl = line[\"Sample Phenotype (#1)\"]\r\n\t\tname = line[\"Sample Name (#1)\"]\r\n\t\tvariantID1 = line[\"Variant ID (#1)\"]\r\n\t\tvariantIDs[caseOrControl][name].add(variantID1)\r\n\t\t# The comphet file also includes homozygous mutations, in which case there is no Variant #2.\r\n\t\tif line[\"Sample Phenotype (#1)\"] == \"het\":\r\n\t\t\tvariantID2 = line[\"Variant ID (#2)\"]\r\n\t\t\tvariantIDs[caseOrControl][name].add(variantID2)\r\n\r\n\tcaseCounts = {name: len(variants) for name, variants in variantIDs[\"case\"].items()}\r\n\tcontrolCounts = {name: len(variants) for name, variants in variantIDs[\"ctrl\"].items()}\r\n\treturn caseCounts, controlCounts", "def GetHet(trrecord, samplelists=[], uselength=True):\n if len(samplelists) == 0: samplelists.append(None)\n hetvals = []\n for sl in samplelists:\n allele_freqs = trrecord.GetAlleleFreqs(samplelist=sl, uselength=uselength)\n hetvals.append(utils.GetHeterozygosity(allele_freqs))\n return hetvals", "def main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=16000,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=5,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--feature_type\", default=\"world\", choices=[\"world\", \"melspc\", \"mcep\"],\n type=str, help=\"feature type\")\n parser.add_argument(\n \"--mspc_dim\", default=80,\n type=int, help=\"Dimension of mel spectrogram\")\n parser.add_argument(\n \"--minf0\", default=40,\n type=int, help=\"minimum f0 for world analysis\")\n parser.add_argument(\n \"--maxf0\", default=400,\n type=int, help=\"maximum f0 for world analysis\")\n parser.add_argument(\n \"--fmin\", default=None, nargs=\"?\",\n type=int, help=\"minimum frequency for melspc\")\n parser.add_argument(\n \"--fmax\", default=None, nargs=\"?\",\n type=int, help=\"maximum frequency for melspc\")\n parser.add_argument(\n \"--mcep_dim\", default=24,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=0.41,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=1024,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--highpass_cutoff\", default=70,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\n \"--save_wav\", default=True,\n type=strtobool, help=\"Whether to save filtered wav file\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n else:\n logging.basicConfig(level=logging.WARNING,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n logging.warning(\"logging is disabled.\")\n\n # show arguments\n for key, value in vars(args).items():\n logging.info(\"%s = %s\" % (key, str(value)))\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n logging.info(\"number of utterances = %d\" % len(file_list))\n\n # check directory existence\n if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:\n os.makedirs(args.wavdir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n # divide list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n if args.feature_type == \"world\":\n target_fn = world_feature_extract\n elif args.feature_type == \"melspc\":\n target_fn = melspectrogram_extract\n else:\n target_fn = melcepstrum_extract\n for f in file_lists:\n p = mp.Process(target=target_fn, args=(f, args,))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()", "def get_hpc_data(filename='./data/hpc/50000_scanned_voxels.Bfloat', sample_size=None):\n\tarr = to_voxels(read_float(filename))\n\tif sample_size is not None:\n\t\tnp.random.shuffle(arr)\n\t\treturn arr[0:sample_size, :]\n\treturn arr", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def all_frequency_spectrum_folded(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,called=True,output=\"sum\",nb_ind_with_min_cov=\"all\"):\n\t###CHOOSE THE RIGHT VCF\n\tprint vcf_file\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\tsfs=[0]*(len(inds))\n\t#Function\n\tnsites_ok=0\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t#print \"check;' \",check,\"'\"\n\t\tif check==0: \n\t\t\treturn [sfs,0]\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t#print \"HERE\"\n\t\t\t#print input_vcf,record,mincov,maxcov, inds, nb_ind_with_min_cov\n\t\t\t#raise Exception\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\t#print inds\n\t\t\t#print \"cond\",cond\n\t\t\t#print \"HERE2\"\n\t\t\tif cond:# if it does\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t\t#print record.nucl_diversity\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\t#print record.nucl_diversity\n\t\t\t\t#print \"samples pairwise\", len(record.samples),record.nucl_diversity \n\t\t\t\t#print str([ind[\"GT\"]for ind in record.samples ]), str([ind[\"GT\"]for ind in record.samples ]).count(\"1\")\n\t\t\t\tnalt = str([ind[\"GT\"]for ind in record.samples ]).count(\"1\")\n\t\t\t\tif nalt==0 and record.nucl_diversity>0:raise Exception\n\t\t\t\tif nalt<len(inds)*2:\n\t\t\t\t\tif nalt>len(inds): nalt = len(inds)*2-nalt\n\t\t\t\t\t#print sfs\n\t\t\t\t\t#print nalt \n\t\t\t\t\tif (nalt-1)>=0: #remove non variants (0-1) that would add to the position SFS[-1]\n\t\t\t\t\t\tsfs[nalt-1]+=1\n\t\t\t\t\t#print sfs\n\t\t\t\t\t#if b!=record.nucl_diversity : print \" old and new diversity\", b,record.nucl_diversity\n\t\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\traise Exception(\"not implement chrom=='all' yet\")\n\treturn [numpy.array(sfs),nsites_ok]", "def process_VCF(input_vcf, targets_file, out_vcf = None) :\n\n\tfVCF_OUT = None\n\tif out_vcf is not None :\n\t\tfVCF_OUT = open(out_vcf, 'w')\n\tfDUP_OUT = open(targets_file, 'w')\n\n\tvariants_dict = {}\n\tvariants_list = []\n\tnum_redundant, num_kept = 0, 0\n\tfINVCF = open(input_vcf, 'r')\n\tfor line in fINVCF :\n\t\tif line.startswith('#') :\n\t\t\tif line.startswith(\"#CHROM\") :\n\t\t\t\tindividuals = re.split('\\t', line.strip())[9:]\n\t\t\t\tstdout.write(\"%d individuals included in the VCF file: %s\\n\" %(len(individuals), input_vcf))\n\t\t\tif fVCF_OUT :\n\t\t\t\tfVCF_OUT.write(line)\n\t\telse :\n\t\t\ttmp_line = re.split('\\t', line.strip())\n\t\t\tref_base = tmp_line[3]\n\t\t\talt_base = tmp_line[4]\n\t\t\tchrom_id = tmp_line[0]\n\t\t\tchrom_pos = tmp_line[1]\n\t\t\tqual = tmp_line[5]\n\t\t\tfilter = tmp_line[6]\t\t\t\t\t# PASS or FILTERED by VQSR #\n\t\t\t# fix sites having different types of calls: redundant calls #\n\t\t\tif not variants_dict.has_key(chrom_id+':'+chrom_pos) :\n\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\tvariants_list.append(chrom_id+':'+chrom_pos)\n\t\t\telse :\n\t\t\t\tnum_redundant += 1\n\t\t\t\tsame_site_diff_call = re.split('\\t', variants_dict[chrom_id+':'+chrom_pos])\n\t\t\t\ttmp_qual = same_site_diff_call[5]\n\t\t\t\ttmp_filter = same_site_diff_call[6]\n\t\t\t\ttmp_alt_base = same_site_diff_call[4]\n\t\t\t\tfDUP_OUT.write(\"%s\\n%s\\n\" %(variants_dict[chrom_id+':'+chrom_pos], line.strip()))\n\t\t\t\tif (tmp_filter != \"PASS\" and filter != \"PASS\") or (filter == \"PASS\" and tmp_filter == \"PASS\") :\t\t# if two different call both passed the VQSR or both not, we remove it from the final call set #\t\n\t\t\t\t\tvariants_dict.pop(chrom_id+':'+chrom_pos)\n\t\t\t\t\tvariants_list.remove(chrom_id+':'+chrom_pos)\n\t\t\t\t\tif filter == \"PASS\" :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both pass\\n\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both filtered\\n\")\n\t\t\t\telif filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" second kept\\n\")\n\t\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\t\tnum_kept += 1\n\t\t\t\telif tmp_filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" first kept\\n\")\n\t\t\t\t\tnum_kept += 1\n\tstdout.write(\"%d\\t%d\\n\" %(num_redundant, num_kept))\n\n\tif fVCF_OUT :\n\t\tfor i in range(len(variants_list)) :\n\t\t\tfVCF_OUT.write(\"%s\\n\" %(variants_dict[variants_list[i]]))\n\t\tfVCF_OUT.close()\n\tfINVCF.close()", "def testGetVegaMag(self):\n std = MKIDStd.MKIDStd()\n vegaFlux = std.load(\"vega\")\n bd17Flux = std.load(\"bd17\")\n for filter in ['U','B','V','R','I']:\n aFilter = std.filters[filter] \n mag = std.getVegaMag(vegaFlux, aFilter)\n self.assertAlmostEqual(0.03, mag, msg=\"filter=%s mag=%f\"%(filter,mag))", "def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()", "def world_feature_extract(wav_list, args):\n # define feature extractor\n feature_extractor = FeatureExtractor(\n analyzer=\"world\",\n fs=args.fs,\n shiftms=args.shiftms,\n minf0=args.minf0,\n maxf0=args.maxf0,\n fftl=args.fftl)\n\n for i, wav_name in enumerate(wav_list):\n logging.info(\"now processing %s (%d/%d)\" % (wav_name, i + 1, len(wav_list)))\n\n # load wavfile and apply low cut filter\n fs, x = wavfile.read(wav_name)\n if x.dtype != np.int16:\n logging.warning(\"wav file format is not 16 bit PCM.\")\n x = np.array(x, dtype=np.float64)\n if args.highpass_cutoff != 0:\n x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)\n\n # check sampling frequency\n if not fs == args.fs:\n logging.error(\"sampling frequency is not matched.\")\n sys.exit(1)\n\n # extract features\n f0, _, _ = feature_extractor.analyze(x)\n uv, cont_f0 = convert_to_continuos_f0(f0)\n cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)\n codeap = feature_extractor.codeap()\n mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)\n\n # concatenate\n cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)\n uv = np.expand_dims(uv, axis=-1)\n feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)\n\n # save to hdf5\n hdf5name = args.hdf5dir + \"/\" + os.path.basename(wav_name).replace(\".wav\", \".h5\")\n write_hdf5(hdf5name, \"/world\", feats)\n\n # overwrite wav file\n if args.highpass_cutoff != 0 and args.save_wav:\n wavfile.write(args.wavdir + \"/\" + os.path.basename(wav_name), fs, np.int16(x))", "def prepare_lv1_data(feature_choice, file_name):\n\n\tif feature_choice == \"xgb_bin\":\n\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULL\"] = df.isnull().sum(axis=1)\n\t\tdf = df.fillna(-1)\n\t\t#Get tsne data\n\t\tprint \"Getting tsne data\"\n\t\tdf_tsne_full = pd.read_csv(\"./Data/Raw/tsne_full_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_full\", \"V2_full\"]] = df_tsne_full[[\"V1\", \"V2\"]]\n\t\tdf_tsne_binary = pd.read_csv(\"./Data/Raw/tsne_binary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_binary\", \"V2_binary\"]] = df_tsne_binary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_distance = pd.read_csv(\"./Data/Raw/tsne_distance_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_distance\", \"V2_distance\"]] = df_tsne_distance[[\"V1\", \"V2\"]]\n\n\t\tprint \"Comparison features\"\n\t\tdf[\"COMP_IH4_IH7\"] = df[\"Insurance_History_4\"].values == df[\"Insurance_History_7\"].values\n\t\tdf[\"COMP_IH4_IH3\"] = np.abs(df[\"Insurance_History_4\"].values - df[\"Insurance_History_3\"].values)\n\t\tdf[\"COMP_IH9_IH7\"] = np.abs(df[\"Insurance_History_9\"].values - df[\"Insurance_History_7\"].values)\n\t\tdf[\"COMP_MH6_MK48\"] = np.abs(df[\"Medical_History_6\"].values - df[\"Medical_Keyword_48\"].values)\n\t\tdf[\"COMP_MH33_MK23\"] = np.abs(df[\"Medical_History_33\"].values - df[\"Medical_Keyword_23\"].values)\n\t\tdf[\"COMP_MH37_MK11\"] = np.abs(df[\"Medical_History_37\"].values - df[\"Medical_Keyword_11\"].values)\n\t\tdf[\"COMP_MH25_MH26\"] = np.abs(df[\"Medical_History_25\"].values - df[\"Medical_History_26\"].values)\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\treturn X,y,Id\n\n\telif feature_choice == \"knn\" or feature_choice == \"cosine\":\n\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Save then drop Id and y\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with columns with missing values\n\t\tdf = df.fillna(-1)\n\t\t# Encode categorical\t\t\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\t\t\n\t\tdf['BMI_Age'] = df['BMI'] * df['Ins_Age']\n\t\tmed_keyword_columns = df.columns[df.columns.str.startswith('Medical_Keyword_')]\n\t\tdf['Med_Keywords_Count'] = df[med_keyword_columns].sum(axis=1)\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\t\t# # Standardize\n\t\tX = StandardScaler().fit_transform(X)\n\n\t\treturn X,y,Id\n\n\telif feature_choice in [\"linreg\", \"logistic\", \"keras_reg1\"]:\n\n\t\tprint \"Preprocessing\"\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULLCOUNT\"] = df.isnull().sum(axis=1)\n\t\tdf = df.fillna(df.median())\n\t\t#Get tsne data\n\t\tprint \"Getting tsne data\"\n\t\tdf_tsne_full = pd.read_csv(\"./Data/Raw/tsne_full_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_full\", \"V2_full\"]] = df_tsne_full[[\"V1\", \"V2\"]]\n\t\tdf_tsne_binary = pd.read_csv(\"./Data/Raw/tsne_binary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_binary\", \"V2_binary\"]] = df_tsne_binary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_ternary = pd.read_csv(\"./Data/Raw/tsne_ternary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_ternary\", \"V2_ternary\"]] = df_tsne_ternary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_distance = pd.read_csv(\"./Data/Raw/tsne_distance_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_distance\", \"V2_distance\"]] = df_tsne_distance[[\"V1\", \"V2\"]]\n\t\tdf_tsne_cosine = pd.read_csv(\"./Data/Raw/tsne_cosine_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_cosine\", \"V2_cosine\"]] = df_tsne_cosine[[\"V1\", \"V2\"]]\n\n\t\t# Get correlation distance data\n\t\tprint \"Getting correlation data\"\n\t\tdf_distance = pd.read_csv(\"./Data/Raw/%s_distance_correlation.csv\" % file_name)\n\t\tlist_col_corr = [col for col in df_distance.columns.values if col != \"Id\" and col !=\"Response\"]\n\t\tdf[list_col_corr] = df_distance[list_col_corr]\n\n\t\t# Add custom features\n\t\tprint \"Feature engineering\"\n\t\tdf[\"SUMKEYWORD\"] = np.zeros(len(df))\n\t\tdf[\"SUMINSURED\"] = np.zeros(len(df))\n\t\tfor col in df.columns.values :\n\t\t\tif \"Key\" in col :\n\t\t\t\tdf[\"SUMKEYWORD\"]+=df[col]\n\t\t\tif \"Insured\" in col :\n\t\t\t\tdf[\"SUMINSURED\"]+=df[col]\n\n\t\tdf[\"CINSINF\"] = np.zeros(len(df))\n\t\tdf[\"CINSINFMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,8):\n\t\t\tcol = \"InsuredInfo_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CINSINF\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CINSINFMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CINSHIST\"] = np.zeros(len(df))\n\t\tdf[\"CINSHISTMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,10):\n\t\t\tif i !=6:\n\t\t\t\tcol = \"Insurance_History_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CINSHIST\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CINSHISTMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CMEDKEY\"] = np.zeros(len(df))\n\t\tdf[\"CMEDKEYMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,49):\n\t\t\tcol = \"Medical_Keyword_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CMEDKEY\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CMEDKEYMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CMEDHIST\"] = np.zeros(len(df))\n\t\tdf[\"CMEDHISTMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,42):\n\t\t\tif i not in [1,2,10,15,24]:\n\t\t\t\tcol = \"Medical_History_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CMEDHIST\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CMEDHISTMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CPRODINFO\"] = np.zeros(len(df))\n\t\tdf[\"CPRODINFOMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,8):\n\t\t\tif i not in [2,4]:\n\t\t\t\tcol = \"Product_Info_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CPRODINFO\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CPRODINFOMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CEMPINFO\"] = np.zeros(len(df))\n\t\tdf[\"CEMPINFOMAX\"] = np.zeros(len(df))\n\t\tfor i in range(2,6):\n\t\t\tcol = \"Employment_Info_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CEMPINFO\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CEMPINFOMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tprint \"Comparison features\"\n\t\tdf[\"COMP_IH4_IH7\"] = df[\"Insurance_History_4\"].values == df[\"Insurance_History_7\"].values\n\t\tdf[\"COMP_IH4_IH3\"] = np.abs(df[\"Insurance_History_4\"].values - df[\"Insurance_History_3\"].values)\n\t\tdf[\"COMP_IH9_IH7\"] = np.abs(df[\"Insurance_History_9\"].values - df[\"Insurance_History_7\"].values)\n\t\tdf[\"COMP_MH6_MK48\"] = np.abs(df[\"Medical_History_6\"].values - df[\"Medical_Keyword_48\"].values)\n\t\tdf[\"COMP_MH33_MK23\"] = np.abs(df[\"Medical_History_33\"].values - df[\"Medical_Keyword_23\"].values)\n\t\tdf[\"COMP_MH37_MK11\"] = np.abs(df[\"Medical_History_37\"].values - df[\"Medical_Keyword_11\"].values)\n\t\tdf[\"COMP_MH25_MH26\"] = np.abs(df[\"Medical_History_25\"].values - df[\"Medical_History_26\"].values)\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Custom variables\n\t\tprint \"Kaggle features\"\n\t\tdf['custom_var_1'] = df['Medical_History_15'] < 10\n\t\tdf['custom_var_3'] = df['Product_Info_4'] < 0.075\n\t\tdf['custom_var_4'] = df['Product_Info_4'] == 1\n\t\tdf['custom_var_6'] = (df['BMI'] + 1)**2\n\t\tdf['custom_var_7'] = df['BMI']**0.8\n\t\tdf['custom_var_8'] = df['Ins_Age']**8.5\n\t\tdf['BMI_Age'] = (df['BMI'] * df['Ins_Age'])**2.5\n\t\tdf['custom_var_10'] = df['BMI'] > np.percentile(df['BMI'], 0.8)\n\t\tdf['custom_var_11'] = (df['BMI'] * df['Product_Info_4'])**0.9\n\t\tage_BMI_cutoff = np.percentile(df['BMI'] * df['Ins_Age'], 0.9)\n\t\tdf['custom_var_12'] = (df['BMI'] * df['Ins_Age']) > age_BMI_cutoff\n\t\tdf['custom_var_13'] = (df['BMI'] * df['Medical_Keyword_3'] + 0.5)**3\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\tprint \"Standardizing\"\n\t\tX = StandardScaler().fit_transform(X)\n\n\t\treturn X,y,Id\n\n\telif feature_choice == \"xgb_reg\":\n\n\t\tprint \"Preprocessing\"\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULLCOUNT\"] = df.isnull().sum(axis=1)\n\t\t#Get tsne data\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Custom variables\n\t\tprint \"Kaggle features\"\n\t\tdf['custom_var_1'] = df['Medical_History_15'] < 10\n\t\tdf['custom_var_3'] = df['Product_Info_4'] < 0.075\n\t\tdf['custom_var_4'] = df['Product_Info_4'] == 1\n\t\tdf['custom_var_6'] = (df['BMI'] + 1)**2\n\t\tdf['custom_var_7'] = df['BMI']**0.8\n\t\tdf['custom_var_8'] = df['Ins_Age']**8.5\n\t\tdf['BMI_Age'] = (df['BMI'] * df['Ins_Age'])**2.5\n\t\tdf['custom_var_10'] = df['BMI'] > np.percentile(df['BMI'], 0.8)\n\t\tdf['custom_var_11'] = (df['BMI'] * df['Product_Info_4'])**0.9\n\t\tage_BMI_cutoff = np.percentile(df['BMI'] * df['Ins_Age'], 0.9)\n\t\tdf['custom_var_12'] = (df['BMI'] * df['Ins_Age']) > age_BMI_cutoff\n\t\tdf['custom_var_13'] = (df['BMI'] * df['Medical_Keyword_3'] + 0.5)**3\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\treturn X,y,Id", "def read_annovar_vcf(input_vcf):\n hash_table = {}\n vcf_reader = vcf.Reader(filename=input_vcf)\n\n for i, r in enumerate(vcf_reader):\n hash_variant = {}\n\n hash_fields = dict(r.INFO)\n hash_fields.update(dict(zip(r.samples[0].data._fields, r.samples[0].data)))\n\n chrom = r.CHROM\n pos = str(r.POS)\n ref = str(r.REF)\n alt = str(r.ALT[0])\n l_samples = len(r.samples)\n\n if r.FILTER == []:\n hash_variant['FILTER'] = \"PASS\"\n else:\n hash_variant['FILTER'] = str(r.FILTER)\n\n hash_variant['QUAL'] = str(r.QUAL)\n\n hash_variant['chr'] = chrom.strip()\n hash_variant['pos'] = pos.strip()\n hash_variant['ref'] = ref.strip()\n hash_variant['alt'] = alt.strip()\n hash_variant['Func.refGene'] = str(hash_fields.get('Func.refGene', '.')[0])\n hash_variant['Gene.refGene'] = str(hash_fields.get('Gene.refGene', '.')[0])\n hash_variant['GeneDetail.refGene'] = str(hash_fields.get('GeneDetail.refGene', '.')[0])\n hash_variant['ExonicFunc.refGene'] = str(hash_fields.get('ExonicFunc.refGene', '.')[0])\n hash_variant['AAChange.refGene'] = str(hash_fields.get('AAChange.refGene', '.')[0])\n hash_variant['cytoBand'] = str(hash_fields.get('cytoBand', '.')[0])\n hash_variant['ExAC_ALL'] = str(hash_fields.get('ExAC_ALL', '.'))\n hash_variant['ExAC_AFR'] = str(hash_fields.get('ExAC_AFR', '.'))\n hash_variant['ExAC_AMR'] = str(hash_fields.get('ExAC_AMR', '.'))\n hash_variant['ExAC_EAS'] = str(hash_fields.get('ExAC_EAS', '.'))\n hash_variant['ExAC_FIN'] = str(hash_fields.get('ExAC_FIN', '.'))\n hash_variant['ExAC_NFE'] = str(hash_fields.get('ExAC_NFE', '.'))\n hash_variant['ExAC_OTH'] = str(hash_fields.get('ExAC_OTH', '.'))\n hash_variant['ExAC_SAS'] = str(hash_fields.get('ExAC_SAS', '.'))\n hash_variant['avsnp147'] = str(hash_fields.get('avsnp147', '.')[0])\n hash_variant['SIFT_score'] = str(hash_fields.get('SIFT_score', '.')[0])\n hash_variant['SIFT_pred'] = str(hash_fields.get('SIFT_pred', '.')[0])\n hash_variant['Polyphen2_HDIV_score'] = str(hash_fields.get('Polyphen2_HDIV_score', '.')[0])\n hash_variant['Polyphen2_HDIV_pred'] = str(hash_fields.get('Polyphen2_HDIV_pred', '.')[0])\n hash_variant['Polyphen2_HVAR_score'] = str(hash_fields.get('Polyphen2_HVAR_score', '.')[0])\n hash_variant['Polyphen2_HVAR_pred'] = str(hash_fields.get('Polyphen2_HVAR_pred', '.')[0])\n hash_variant['LRT_score'] = str(hash_fields.get('LRT_score', '.')[0])\n hash_variant['LRT_pred'] = str(hash_fields.get('LRT_pred', '.')[0])\n hash_variant['MutationTaster_score'] = str(hash_fields.get('MutationTaster_score', '.')[0])\n hash_variant['MutationTaster_pred'] = str(hash_fields.get('MutationTaster_pred', '.')[0])\n hash_variant['MutationAssessor_score'] = str(hash_fields.get('MutationAssessor_score', '.')[0])\n hash_variant['MutationAssessor_pred'] = str(hash_fields.get('MutationAssessor_pred', '.')[0])\n hash_variant['FATHMM_score'] = str(hash_fields.get('FATHMM_score', '.')[0])\n hash_variant['FATHMM_pred'] = str(hash_fields.get('FATHMM_pred', '.')[0])\n hash_variant['PROVEAN_score'] = str(hash_fields.get('PROVEAN_score', '.')[0])\n hash_variant['PROVEAN_pred'] = str(hash_fields.get('PROVEAN_pred', '.')[0])\n hash_variant['VEST3_score'] = str(hash_fields.get('VEST3_score', '.')[0])\n hash_variant['CADD_raw'] = str(hash_fields.get('CADD_raw', '.')[0])\n hash_variant['CADD_phred'] = str(hash_fields.get('CADD_phred', '.')[0])\n hash_variant['DANN_score'] = str(hash_fields.get('DANN_score', '.')[0])\n hash_variant['fathmm-MKL_coding_score'] = str(hash_fields.get('fathmm-MKL_coding_score', '.')[0])\n hash_variant['fathmm-MKL_coding_pred'] = str(hash_fields.get('fathmm-MKL_coding_pred', '.')[0])\n hash_variant['MetaSVM_score'] = str(hash_fields.get('MetaSVM_score', '.')[0])\n hash_variant['MetaSVM_pred'] = str(hash_fields.get('MetaSVM_pred', '.')[0])\n hash_variant['MetaLR_score'] = str(hash_fields.get('MetaLR_score', '.')[0])\n hash_variant['MetaLR_pred'] = str(hash_fields.get('MetaLR_pred', '.')[0])\n hash_variant['integrated_fitCons_score'] = str(hash_fields.get('integrated_fitCons_score', '.')[0])\n hash_variant['integrated_confidence_value'] = str(hash_fields.get('integrated_confidence_value', '.')[0])\n hash_variant['GERP++_RS'] = str(hash_fields.get('GERP++_RS', '.')[0])\n hash_variant['phyloP7way_vertebrate'] = str(hash_fields.get('phyloP7way_vertebrate', '.')[0])\n hash_variant['phyloP20way_mammalian'] = str(hash_fields.get('phyloP20way_mammalian', '.')[0])\n hash_variant['phastCons7way_vertebrate'] = str(hash_fields.get('phastCons7way_vertebrate', '.')[0])\n hash_variant['phastCons20way_mammalian'] = str(hash_fields.get('phastCons20way_mammalian', '.')[0])\n hash_variant['SiPhy_29way_logOdds'] = str(hash_fields.get('SiPhy_29way_logOdds', '.')[0])\n\n l_samples = r.samples[::]\n l_sample_ids = []\n for sample in l_samples:\n sample_id = sample.sample\n sample_gt = sample.data.GT\n hash_variant[sample_id] = sample_gt\n l_sample_ids.append(sample_id)\n\n hash_table[(chrom, pos, alt)] = hash_variant\n\n return hash_table, l_sample_ids", "def main(args):\n samples = TQSampleFolder.loadLazySampleFolder(args.input_file + \":\" + args.sample_folder)\n reader = TQSampleDataReader(samples)\n\n # this list contains 2-tuples with (\"CutName\", \"HistogramName\")\n hist_info = list()\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Signal_Rebin\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Top\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Other\", \"[ee+mm+em+me]\"))\n\n processes = list()\n processes.append(Process(\"sig\", r\"Signal\", \"/sig/{channel}/{campaign}/nonres\"))\n processes.append(Process(\"bkg\", r\"Background\", \"/bkg/{channel}/{campaign}/[prompt+nonprompt]\"))\n\n output_directory = \"results/mva_yields_soverb/\"\n ensure_directory(output_directory)\n output_file_name = os.path.splitext(os.path.basename(args.input_file))[0] + \".tex\"\n\n with LaTeXFile.from_rel_path(os.path.join(output_directory, output_file_name)) as tex:\n tex.document_settings.append(\"landscape\")\n tex.write_header()\n tex.begin_document()\n\n logging.info(\"Getting per-bin significances\")\n for cut_name, histogram_name, channel in hist_info:\n logging.info(\"Processing %s/%s\", cut_name, histogram_name)\n hists = dict()\n for process in processes:\n campaign = \"[c16a+c16d+c16e]\"\n hists[process.name] = reader.getHistogram(\n process.path.format(channel=channel, campaign=campaign), \"{}/{}\".format(cut_name, histogram_name)\n )\n\n table_data = list()\n sigs = list()\n hist_sig = hists[\"sig\"]\n hist_bkg = hists[\"bkg\"]\n for i in range(1, hist_sig.GetNbinsX() + 1):\n s = hist_sig.GetBinContent(i)\n b = hist_bkg.GetBinContent(i)\n\n if b != 0:\n # z = math.sqrt(2 * ((s + b) * math.log(1 + s / b) - s))\n z = s / math.sqrt(b)\n sigs.append(z)\n else:\n z = \"--\"\n table_data.append((i, z))\n logging.debug(\"Bin % 2d: %g\", i, z)\n table_data.append((\"Total\", math.sqrt(sum([z ** 2 for z in sigs]))))\n\n tex.write_table(\n table_data,\n [\"{}\", \"{:.4f}\"],\n [\"Bin\", \"Significance\"],\n \"{}/{}\".format(cut_name, histogram_name),\n format_rows=\"cc\",\n )\n\n tex.end_document()\n tex.write_make_file()", "def filter_variants_rna(file, tumor_coverage, tumor_var_depth,\n tumor_var_freq, num_callers, ensembl_version):\n ens_data = EnsemblRelease(int(ensembl_version))\n variants = list()\n reader = vcfpy.Reader.from_path(file)\n for record in reader:\n for info in record.INFO['CSQ']:\n record_INFO = Record_INFO(*info.split('|'))\n funcensGene = record_INFO.Consequence\n has_func_ens = 'missense' in funcensGene or 'frame' in funcensGene\n avsnp150 = record_INFO.Existing_variation.split('&')[0] if 'rs' in record_INFO.Existing_variation else 'NA'\n gnomad_AF = record_INFO.gnomAD_AF if record_INFO.gnomAD_AF != '' else 'NA'\n cosm_count = record_INFO.Existing_variation.count('COSV')\n cosmic70 = ';'.join(record_INFO.Existing_variation.split('&')[-cosm_count::]) if cosm_count > 0 else 'NA'\n gene = record_INFO.SYMBOL\n\n if has_func_ens:\n called = {x.sample: x.data for x in record.calls if x.called}\n filtered = dict()\n pass_variants = 0\n try:\n if 'HaplotypeCaller' in called and 'PASS' in record.FILTER:\n tumor_DP = int(called['HaplotypeCaller']['DP'])\n token = called['HaplotypeCaller']['AD']\n tumor_AD = int(token[1]) if type(token) is list else int(token)\n tumor_VAF = np.around(tumor_AD / float(tumor_DP) * 100, 3) if tumor_DP > 0.0 else 0.0\n if tumor_DP >= tumor_coverage and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth:\n pass_variants += 1\n filtered['HaplotypeCaller'] = '{};{};{}'.format(tumor_DP, tumor_AD, tumor_VAF)\n if 'varscan' in called and 'PASS' in record.FILTER:\n tumor_DP = int(called['varscan']['DP'])\n token = called['varscan']['AD']\n tumor_AD = int(token[0]) if type(token) is list else int(token)\n token = called['varscan']['FREQ']\n value = token[0] if type(token) is list else token\n tumor_VAF = float(value.replace('%', '')) if tumor_DP > 0.0 else 0.0\n if tumor_DP >= tumor_coverage and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth:\n pass_variants += 1\n filtered['varscan'] = '{};{};{}'.format(tumor_DP, tumor_AD, tumor_VAF)\n except KeyError:\n continue\n\n variant_epitopes = epitopes(record, record_INFO, ens_data)\n variant = Variant()\n variant.chrom = record.CHROM\n variant.start = record.POS\n variant.ref = record.REF\n variant.alt = record_INFO.Allele\n variant.callers = '|'.join(['{}:{}'.format(key, value) for key, value in filtered.items()])\n variant.num_callers = len(filtered)\n variant.status = pass_variants >= num_callers\n variant.epitopes = variant_epitopes\n variant.dbsnp = avsnp150\n variant.gnomad = gnomad_AF\n variant.cosmic = cosmic70\n variant.type = 'rna'\n variant.gene = gene\n variants.append(variant)\n\n return variants", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def highpass_filter(display):\r\n for trainOrTest in trainTest:\r\n resultPath = os.path.join('hpf_data', trainOrTest)\r\n originalPath = 'original_data'\r\n for pokemon in pokemons:\r\n pokeData = os.path.join(originalPath, trainOrTest, pokemon)\r\n files = os.listdir(pokeData)\r\n for picture in files:\r\n # Setting path\r\n path = os.path.join(pokeData, picture)\r\n\r\n # Reading image\r\n Img = dip.im_to_float(cv2.imread(path, 1))\r\n\r\n # Splitting the image into blue, green, red portions\r\n b, g, r = cv2.split(Img)\r\n\r\n # Splitting image, taking mean\r\n avg = np.mean([np.mean(b.flatten()), np.mean(g.flatten()), np.mean(r.flatten())])\r\n\r\n # Finding acceptable frequency\r\n precision = 0.002\r\n target = avg / 12\r\n _, j = hpf(b, target, precision)\r\n\r\n # Running hpf\r\n b_out, _ = hpf(b, target, precision, j)\r\n g_out, _ = hpf(g, target, precision, j)\r\n r_out, _ = hpf(r, target, precision, j)\r\n\r\n # Normalizing mean to 1\r\n b_out = b_out * (1 / np.max(b_out))\r\n g_out = g_out * (1 / np.max(g_out))\r\n r_out = r_out * (1 / np.max(r_out))\r\n\r\n # Combiner (Logic)\r\n std = 100 # how many standard deviations above mean for rgb parts\r\n sigmas = [np.var(b_out) ** 0.5, np.var(g_out) ** 0.5, np.var(r_out) ** 0.5]\r\n means = [np.mean(b_out), np.mean(g_out), np.mean(r_out)]\r\n output = combiner(b_out, g_out, r_out, means + sigmas * std)\r\n\r\n output = dip.float_to_im(output)\r\n\r\n if display:\r\n plt.subplot(1, 2, 1)\r\n plt.title('Original Image')\r\n plt.imshow(Img)\r\n plt.subplot(1, 2, 2)\r\n plt.title(\"High pass filter result\")\r\n plt.imshow(output)\r\n\r\n resultPic = os.path.join(resultPath, pokemon, picture)\r\n # Saving resultant image\r\n dip.im_write(output, resultPic)", "def test_fc(self):\n self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n for info in variantControlInfo:\n self.assertTrue(info not in self.nhf.metadata)", "def processData(args):\n inputfile=args.inputfile\n threshold=args.threshold\n mask_file=args.maskfile\n crop=args.crop\n voxelsize=args.voxelsize\n binaryClosing=args.binaryclosing\n binaryOpening=args.binaryopening\n vtfile=args.vtfile\n vt2esofspy=args.vt2esofspy\n output_report_csv_file=args.output_report_csv_file\n \n # Reading/Generating data\n if inputfile is None: # # Using generated sample data\n logger.info('Generating sample data...')\n metadata = {'voxelsize_mm': [1, 1, 1]}\n data3d = generate_sample_data(1, 0, 0)\n else: # Normal runtime\n dr = datareader.DataReader()\n data3d, metadata = dr.Get3DData(inputfile)\n\n # Custom voxel size\n if voxelsize is not None:\n metadata['voxelsize_mm'] = voxelsize\n\n # Crop data\n if crop is not None:\n logger.debug('Croping data: %s', str(crop))\n data3d = data3d[crop[0]:crop[1], crop[2]:crop[3], crop[4]:crop[5]].copy()\n\n # Init HistologyAnalyser object\n logger.debug('Init HistologyAnalyser object')\n ha = HistologyAnalyser(data3d, metadata, threshold, \n binaryClosing=binaryClosing, binaryOpening=binaryOpening, \n nogui=True, aggregate_near_nodes_distance=args.aggregatenearnodes,\n hist_length_range=args.hist_length_range,\n hist_radius_range=args.hist_radius_range\n )\n\n # Remove Area = Load mask from file\n if mask_file is not None:\n logger.debug('Loading mask from file...')\n mask = misc.obj_from_file(filename=mask_file, filetype='pickle')\n if ha.data3d.shape == mask.shape:\n ha.data3d_masked = mask\n ha.data3d[mask == 0] = np.min(ha.data3d)\n else:\n raise ValueError('Mask file has wrong dimensions '+str(mask.shape))\n \n # Segmentation\n logger.debug('Segmentation')\n ha.data_to_skeleton()\n\n # Computing statistics\n logger.info(\"# ## ## ## ## statistics\")\n ha.data_to_statistics()\n\n # Saving files\n logger.info(\"# ## ## write stats to file\")\n ha.writeStatsToCSV()\n if vtfile is not None:\n ha.writeStatsToYAML(vtfile)\n if vt2esofspy is not None:\n ha.exportVT2esofspy(vt2esofspy)\n\n # ## Histology report\n logger.info(\"# ## ## Histology report\")\n hr = HistologyReport(ha.hr_hist_length_range, ha.hr_hist_radius_range)\n hr.data = ha.stats\n\n # Add results Record\n if crop is not None:\n label = str(crop[0])+\"-\"+str(crop[1])\n else:\n label = \"0-end\"\n # pass label into addResultRecord with stats\n hr.data['general']['label'] = label\n\n hr.generateStats()\n # TODO Rename functions\n hr.writeReportToCSV()\n hr.writeReportToYAML()\n\n \n if inputfile is None:\n hr.addResultsRecord(label=label, recordfilename=output_report_csv_file)\n else:\n hr.addResultsRecord(label=label, datapath=inputfile, recordfilename=output_report_csv_file)\n\n # ## End\n logger.info('Finished')", "def filter_variants_dna(file, normal_coverage, tumor_coverage, tumor_var_depth,\n tumor_var_freq, normal_var_freq, t2n_ratio, num_callers,\n num_callers_indel, ensembl_version):\n\n ens_data = EnsemblRelease(int(ensembl_version))\n variants = list()\n reader = vcfpy.Reader.from_path(file)\n for record in reader:\n for info in record.INFO['CSQ']:\n record_INFO = Record_INFO(*info.split('|'))\n funcensGene = record_INFO.Consequence\n has_func_ens = 'missense' in funcensGene or 'frame' in funcensGene\n avsnp150 = record_INFO.Existing_variation.split('&')[0] if 'rs' in record_INFO.Existing_variation else 'NA'\n gnomad_AF = record_INFO.gnomAD_AF if record_INFO.gnomAD_AF != '' else 'NA'\n cosm_count = record_INFO.Existing_variation.count('COSV')\n cosmic70 = ';'.join(record_INFO.Existing_variation.split('&')[-cosm_count::]) if cosm_count > 0 else 'NA'\n gene = record_INFO.SYMBOL\n\n if has_func_ens:\n called = {x.sample: x.data for x in record.calls if x.called}\n filtered = dict()\n pass_snp = 0\n pass_indel = 0\n try:\n if 'NORMAL.mutect' in called and 'TUMOR.mutect' in called and 'PASS' in record.FILTER:\n normal_DP = int(called['NORMAL.mutect']['DP'])\n token = called['NORMAL.mutect']['AD']\n normal_AD = int(token[1]) if type(token) is list else int(token)\n token = called['NORMAL.mutect']['AF']\n value = token[0] if type(token) is list else token\n normal_VAF = np.around(float(value) * 100,\n 3) if normal_DP > 0.0 else 0.0\n tumor_DP = int(called['TUMOR.mutect']['DP'])\n token = called['TUMOR.mutect']['AD']\n tumor_AD = int(token[1]) if type(token) is list else int(token)\n token = called['TUMOR.mutect']['AF']\n value = token[0] if type(token) is list else token\n tumor_VAF = np.around(float(value) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_snp += 1\n filtered['mutect'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n if 'NORMAL.somaticsniper' in called and 'TUMOR.somaticsniper' in called:\n normal_DP = int(called['NORMAL.somaticsniper']['DP'])\n normal_AD = sum(called['NORMAL.somaticsniper']['DP4'][2:])\n normal_VAF = np.around((normal_AD / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n tumor_DP = int(called['TUMOR.somaticsniper']['DP'])\n tumor_AD = sum(called['TUMOR.somaticsniper']['DP4'][2:])\n tumor_VAF = np.around((tumor_AD / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n is_somatic = int(called['TUMOR.somaticsniper']['SS']) == 2\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio and is_somatic:\n pass_snp += 1\n if is_somatic:\n filtered['somaticsniper'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n\n if ('NORMAL.varscan' in called and 'TUMOR.varscan' in called) \\\n or ('NORMAL.varscan_indel' in called and 'TUMOR.varscan_indel' in called) \\\n and 'PASS' in record.FILTER and 'SOMATIC' in record.INFO:\n label_index = 'varscan' if 'NORMAL.varscan' in called else 'varscan_indel'\n normal_DP = int(called['NORMAL.{}'.format(label_index)]['DP'])\n normal_AD = sum(called['NORMAL.{}'.format(label_index)]['DP4'][2:])\n token = called['NORMAL.{}'.format(label_index)]['FREQ']\n value = token[0] if type(token) is list else token\n normal_VAF = float(value.replace('%', ''))\n tumor_DP = int(called['TUMOR.{}'.format(label_index)]['DP'])\n tumor_AD = sum(called['TUMOR.{}'.format(label_index)]['DP4'][2:])\n token = called['TUMOR.{}'.format(label_index)]['FREQ']\n value = token[0] if type(token) is list else token\n tumor_VAF = float(value.replace('%', ''))\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n if 'indel' in label_index:\n pass_indel += 1\n else:\n pass_snp += 1\n filtered[label_index] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n if 'NORMAL.strelka' in called and 'TUMOR.strelka' in called and 'PASS' in record.FILTER:\n ref_index = record.REF + 'U'\n alt_index = str(record.ALT[0].serialize()) + 'U'\n # normal_DP = int(called['NORMAL.strelka']['DP'])\n token = called['NORMAL.strelka'][ref_index]\n normal_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['NORMAL.strelka'][alt_index]\n normal_AD2 = int(token[0]) if type(token) is list else int(token)\n normal_DP = normal_AD1 + normal_AD2\n normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n # tumor_DP = int(called['TUMOR.strelka']['DP'])\n token = called['TUMOR.strelka'][ref_index]\n tumor_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['TUMOR.strelka'][alt_index]\n tumor_AD2 = int(token[0]) if type(token) is list else int(token)\n tumor_DP = tumor_AD1 + tumor_AD2\n tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD2 >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_snp += 1\n filtered['strelka'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD2,\n normal_VAF,\n tumor_DP,\n tumor_AD2,\n tumor_VAF)\n if 'NORMAL.strelka_indel' in called and 'TUMOR.strelka_indel' in called and 'PASS' in record.FILTER:\n # normal_DP = int(called['NORMAL.strelka_indel']['DP'])\n token = called['NORMAL.strelka_indel']['TAR']\n normal_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['NORMAL.strelka_indel']['TIR']\n normal_AD2 = int(token[0]) if type(token) is list else int(token)\n normal_DP = normal_AD1 + normal_AD2\n normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n # tumor_DP = int(called['TUMOR.strelka_indel']['DP'])\n token = called['TUMOR.strelka_indel']['TAR']\n tumor_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['TUMOR.strelka_indel']['TIR']\n tumor_AD2 = int(token[0]) if type(token) is list else int(token)\n tumor_DP = tumor_AD1 + tumor_AD2\n tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD2 >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_indel += 1\n filtered['strelka_indel'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD2,\n normal_VAF,\n tumor_DP,\n tumor_AD2,\n tumor_VAF)\n except KeyError:\n continue\n\n variant_epitopes = epitopes(record, record_INFO, ens_data)\n variant = Variant()\n variant.chrom = record.CHROM\n variant.start = record.POS\n variant.ref = record.REF\n variant.alt = record.ALT[0].serialize()\n variant.callers = '|'.join(['{}:{}'.format(key, value) for key, value in filtered.items()])\n variant.num_callers = len(filtered)\n variant.status = pass_snp >= num_callers or pass_indel >= num_callers_indel\n variant.epitopes = variant_epitopes\n variant.dbsnp = avsnp150\n variant.gnomad = gnomad_AF\n variant.cosmic = cosmic70\n variant.type = 'dna'\n variant.gene = gene\n variants.append(variant)\n\n return variants", "def NbSamplesV(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_NbSamplesV(self, *args)" ]
[ "0.59488827", "0.5802758", "0.58007336", "0.57328737", "0.56093895", "0.55808663", "0.5559472", "0.5527993", "0.55187845", "0.5462843", "0.54014647", "0.5394218", "0.53905374", "0.53511345", "0.53466797", "0.5334894", "0.5314936", "0.5283237", "0.5243797", "0.5231647", "0.52301526", "0.5215122", "0.52148986", "0.5213243", "0.5209299", "0.51476276", "0.5142385", "0.513135", "0.5128017", "0.5116325" ]
0.6672723
0
A convenience function for getting a single suggestion.
def get_suggestion(): global _suggestions_iterator while True: try: return next(_suggestions_iterator) except StopIteration: _suggestions_iterator = iter(suggestions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suggestion(self, suggestion_id):\r\n return suggestions.Suggestion(self, suggestion_id)", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def get_suggestion(artist_name):\n return 'do some magic!'", "def get(self, id):\n adm = Administration()\n s = adm.get_suggestion_by_id(id)\n return s", "def get(self, id):\n adm = Administration()\n s = adm.get_suggestion_by_id(id)\n return s", "def suggestion(self, suggestion_id):\n return suggestions.Suggestion(self, suggestion_id)", "def suggestion(self):\n raise NotImplementedError()", "def get_room(self):\n\n return self.suggestion_set[0]", "def fetchSuggestion(self, keyword, seed_keyword, meta_keyword):\n # user agent is an HTTP browser request header that gives servers information regarding the client device and/or operating system on which the browser is running\n user_agent_list = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n ]\n url = \"http://suggestqueries.google.com/complete/search?client=chrome&hl={}&gl={}&callback=?&q={}\".format(\n self.language, self.country, keyword)\n user_agent = random.choice(user_agent_list)\n headers = {\"user-agent\": user_agent, \"dataType\": \"jsonp\"}\n response = requests.get(url, headers=headers, verify=True)\n if response.status_code == 200:\n suggestions = json.loads(response.text)\n sugg = []\n index = 0\n relevancies = []\n suggesttypes = []\n suggestsubtypes = []\n verbatimrelevance = \"\"\n if \"google:suggestrelevance\" in suggestions[4].keys():\n relevancies = suggestions[4]['google:suggestrelevance']\n if \"google:suggesttype\" in suggestions[4].keys():\n suggesttypes = suggestions[4]['google:suggesttype']\n if \"google:verbatimrelevance\" in suggestions[4].keys():\n verbatimrelevance = suggestions[4]['google:verbatimrelevance']\n if \"google:suggestsubtypes\" in suggestions[4].keys():\n suggestsubtypes = suggestions[4]['google:suggestsubtypes']\n for word in suggestions[1]:\n if self.checkSeedKeywordExists(word, meta_keyword):\n sugg.append({\n 'keyword': word,\n 'relevancy_score': relevancies[index] if len(relevancies) > 0 else None,\n 'suggesttype':suggesttypes[index] if len(suggesttypes) > 0 else None,\n 'verbatimrelevance' : verbatimrelevance,\n 'seed_keyword': seed_keyword,\n 'meta_keyword': meta_keyword,\n 'suggestsubtype' : suggestsubtypes[index] if len(suggestsubtypes) > 0 else None,\n })\n else:\n continue\n index += 1\n return sugg\n # returning false when google blocks an ip for some time \n return False", "def _suggest(self, trial_id: int) -> Optional[TrialSuggestion]:\n raise NotImplementedError", "def suggestion(self, suggestion_id):\r\n return suggestions.ForumSuggestion(self, suggestion_id)", "def get_character(self):\n\n return self.suggestion_set[2]", "def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass", "def _load_suggestion(self):\n curItem = self.tree.focus()\n parent = self.tree.parent(curItem)\n\n categories = ['approved', 'conflicts', 'suggestions', 'unknown', \\\n 'cldr',]\n if parent is '':\n #skip it\n pass\n else:\n if parent not in categories:\n curTerm = parent\n category = self.tree.parent(parent)\n else:\n curTerm = curItem\n category = parent\n if CurItem != CurTerm:\n self.preferred.set(self.tree.item(curItem)['values'][1])", "def suggest(word, cutoff=0.77):\n if word in LOOKUP_TABLE:\n return LOOKUP_TABLE[word]\n\n guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)\n if guess and len(guess) > 0:\n return guess[0]\n return word", "def get_suggestion_set(self):\n\n return self.suggestion_set", "def google_suggest(self, callback, who, arg, store=True):\n\t\t\n sugs = self.get_xml('http://google.com/complete/search', {'output':'toolbar', 'q': arg})\n\n if sugs is not None:\n try:\n sugs = [x[0].get('data') for x in sugs]\n except Exception, e:\n print \"XML error with Google Suggest: %s\" % e\n\t\t\t\n suggestions = self.remove_lyrics(sugs)\n random_sug = choice(suggestions)\n\t\t\t\n # Same string as we started with - roll again\n if random_sug == arg:\n try:\n suggestions.pop(suggestions.index(random_sug))\n except:\n pass\n random_sug = choice(suggestions)\n\t\t\t\t\n if random_sug is not None:\n if store:\n self.store_suggestion(who, arg)\n random_sug.strip('')\n random_sug.strip('\\r')\n w = random_sug.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if '?' not in w[-1:]:\n random_sug = random_sug + '?'\n return random_sug", "def askOne(self, query, context):\n results = self.ask(query, context, True)\n return results.iterator().next() if (len(results) > 0) else None", "def fake_get_hint(_):\r\n return {'best_hint': 'This is the best hint.',\r\n 'rand_hint_1': 'A random hint',\r\n 'rand_hint_2': 'Another random hint',\r\n 'answer': '42.5'}", "async def Suggestion(self, ctx, *, sug:str=None):\r\n\t\tif not sug:\t\r\n\t\t\treturn await ctx.send('No Suggestions given')\r\n\r\n\t\tif \tself.settings.BotConfig('SuggestionChannel') != 0:\r\n\t\t\tch = self.bot.get_channel(self.settings.BotConfig('SuggestionChannel'))\r\n\t\t\tif ctx.author.top_role.colour:\r\n\t\t\t\tcol = ctx.author.top_role.colour\r\n\t\t\telse:\r\n\t\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\t\tembed=discord.Embed(title=\"Suggestion\", description=f\"{sug}\", color=col)\r\n\t\t\tembed.set_footer(text=f\"Server: {ctx.guild} || User: {ctx.author}\")\r\n\t\t\tawait ctx.send('I have sent Suggestion')\r\n\t\t\tawait ch.send(embed=embed)\r\n\t\telse:\r\n\t\t\tawait ctx.send('No Suggestion channel found')", "def Suggest(self, request, global_params=None):\n config = self.GetMethodConfig('Suggest')\n return self._RunMethod(\n config, request, global_params=global_params)", "def handle_suggest():\n return 0", "def suggest(self, **kwargs):\n return suggest.suggest(self._host, self._session, **kwargs)", "def get_option(self, sorting_option_string=None, max_number=20):\n if sorting_option_string is None:\n print(\"sorting option string is not given. It will be a default option, score\")\n sorting_option_string = 'score'\n\n sorting_option = SortingOption.get_type_of(sorting_option_string)\n option = SuggestionOption(sorting_option=sorting_option, max_number=max_number)\n return option", "def suggest(self, trial_id: int) -> Optional[TrialSuggestion]:\n ret_val = self._suggest(trial_id)\n if ret_val is not None:\n assert isinstance(ret_val, TrialSuggestion)\n if ret_val.config is not None:\n ret_val = TrialSuggestion(\n spawn_new_trial_id=ret_val.spawn_new_trial_id,\n checkpoint_trial_id=ret_val.checkpoint_trial_id,\n config=self._postprocess_config(ret_val.config),\n )\n return ret_val", "def suggestions(prefix : str = typer.Argument(...), n : int = typer.Option(5, help=\"Number of suggestions to display\")): \n response_url = url + \"/suggestions/\" + prefix + \"?suggestion_nums=\" + str(n) \n response = requests.get(response_url) \n for i in range(len(response.json())):\n typer.echo(response.json()[i])", "def spelling_suggestions(drug_name):\n if not isinstance(drug_name, str):\n raise TypeError(\"drug_name must be a string.\")\n r = requests.get(f\"https://rxnav.nlm.nih.gov/REST/spellingsuggestions.json?name={drug_name}\")\n response = r.json()\n suggestions = response['suggestionGroup']['suggestionList']['suggestion']\n return suggestions", "def autosuggest(self, suggest, focus=None, clip=None, display='full',\n format='json', lang=None):\n\n params = {\n 'addr': suggest,\n 'display': display,\n 'format': format,\n 'lang': lang or self.lang,\n }\n if focus:\n params.update({\n 'focus': focus\n })\n if clip:\n params.update({\n 'clip': clip\n })\n\n return self._request('/autosuggest', params)", "def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]", "def name(self):\n # type: () -> Text\n\n return \"form_suggestion\"" ]
[ "0.70957506", "0.7064316", "0.6983561", "0.6963836", "0.6963836", "0.6800833", "0.6749406", "0.6550867", "0.6436159", "0.6428319", "0.6357224", "0.62608695", "0.62456524", "0.6239825", "0.6186077", "0.60764414", "0.6011701", "0.5944827", "0.5927803", "0.582557", "0.5824507", "0.58002", "0.5763963", "0.5749", "0.5747528", "0.5699837", "0.56868166", "0.5669288", "0.5618347", "0.5617589" ]
0.7540617
0
Builds game board by retrieving a sudoku puzzle preset from a sudoku dataset and then sets up the game board. Also calls a backtracking algorithm to derive a solution for the sudoku puzzle.
def build_game_board(self): # retrieves new sudoku puzzle from dataset sudoku_set = self.data.get_sudoku_set() sudoku_problem, sudoku_solution = sudoku_set[0], sudoku_set[1] # removes old game boards self.board = [] self.puzzle = [] self.alg_solution = [] self.data_solution = [] # sets up sudoku puzzle to array format segment = [] for num in sudoku_problem: segment.append(int(num)) if len(segment) == 9: self.board.append(segment) self.puzzle.append(segment[:]) segment = [] self.alg_solution = alg.solve_sudoku(self.puzzle) # uses sudoku backtracking algorithm to solve puzzle # sets up the provided sudoku puzzle solution from dataset to array format for num in sudoku_solution: segment.append(int(num)) if len(segment) == 9: self.data_solution.append(segment) segment = [] self.game_state = "Not Solved, Keep Trying!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solveSudoku(board):\n # represents all numbers in a specific row, col, box\n # format: if (5,9) is in rows, that means row 5 contains digit 9\n\t\t# format: if (3, 2) is in cols, that means col 3 contains digit 2\n\t\t# format: if (0,2,8) is in boxes, that means box (0,2) contains 8\n\t\t# cellsToFill is a stack that holds all the (i,j) cells we need to fill\n rows, cols, boxes = set(), set(), set()\n cellsToFill = []\n m, n = len(board), len(board[0])\n \n def initDataSets():\n for i in range(m):\n for j in range(n):\n char = board[i][j]\n if char == '.':\n cellsToFill.append((i,j))\n else:\n addToDataSets((i, char), (j, char), (i//3, j//3, char))\n\n def addToDataSets(curRow, curCol, curBox):\n rows.add(curRow)\n cols.add(curCol)\n boxes.add(curBox)\n \n def removeFromDataSets(curRow, curCol, curBox):\n rows.remove(curRow)\n cols.remove(curCol)\n boxes.remove(curBox)\n \n def backtrack():\n if not cellsToFill:\n return True\n \n i, j = cellsToFill.pop()\n for char in '123456789':\n # check if the number is already in a row/col/box, if it is then skip to the next number\n curRow, curCol, curBox = (i, char), (j, char), (i//3, j//3, char)\n if curRow in rows or curCol in cols or curBox in boxes: continue\n \n # if not, add the number to the row/col/box\n addToDataSets(curRow, curCol, curBox)\n board[i][j] = char\n \n # start the recursive call for inserting the next number\n if (backtrack()):\n return True\n \n # backtrack wasn't successful, remove the number from the row/col/box\n removeFromDataSets(curRow, curCol, curBox)\n board[i][j] = '.'\n \n cellsToFill.append((i,j))\n return False\n \n initDataSets()\n print(board)\n backtrack()", "def solve_soduku(sudoku, screen):\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True", "def solveSudoku(self, board: List[List[str]]) -> None:\n # initialize the hashmaps\n for row in range(self.size):\n for col in range(self.size):\n value = board[row][col]\n if value != '.':\n self.rows[row].add(value)\n self.cols[col].add(value)\n self.cells[self.cell_idx(row, col)].add(value)\n \n # start backtracking at the first field\n self.backtrack(board, 0)\n return board", "def solveSudoku(self, board: List[List[str]]) -> None:\n def getLocs(board):#初始化,获取需要填充的位置,记录为一个栈\n locs = []\n for row in range(9):\n for col in range(9):\n if board[row][col] == '.':\n locs.append((row, col))\n return locs\n\n def getMaps(board):#定义三个字典,跟踪9行、9列和9块的已填充数字,采用数据结构为defaultdict\n from collections import defaultdict as dd\n rowMap = [dd(int) for _ in range(9)]\n colMap = [dd(int) for _ in range(9)]\n blockMap = [dd(int) for _ in range(9)]\n for row in range(9):\n for col in range(9):\n if board[row][col] != '.':\n num = int(board[row][col])\n rowMap[row][num] += 1\n colMap[col][num] += 1\n bolckIndex = int(row/3)*3+int(col/3)\n blockMap[bolckIndex][num] += 1\n return rowMap, colMap, blockMap\n\n def fillBoard(board, locs):#递归填充剩余的数独空位置\n if not locs:\n return True\n row, col = locs.pop()#弹出一个待填充位置\n bolckIndex = int(row/3)*3+int(col/3)\n found = False\n for num in range(1, 10):\n if found:\n break\n if not rowMap[row][num] and not colMap[col][num] and not blockMap[bolckIndex][num]:\n ##如果当前行、当前列和当前块均不存在该数字,则将数字更新到相应行、列、块,并尝试填充\n rowMap[row][num] = 1\n colMap[col][num] = 1\n blockMap[bolckIndex][num] = 1\n board[row][col] = str(num)\n found = fillBoard(board, locs)#递归到下一层填充\n rowMap[row][num] = 0##状态回溯,将填充的位置清空\n colMap[col][num] = 0\n blockMap[bolckIndex][num] = 0\n if not found:##如果本轮都无法求解,则回溯到初始状态,继续从前面再填充\n locs.append((row, col))\n board[row][col] = '.'\n return found\n\n rowMap, colMap, blockMap = getMaps(board)\n locs = getLocs(board)\n fillBoard(board, locs)", "def solveSudoku(self, board) -> None:\n # Get size of board\n n = len(board)\n \n # Initialise Hashmaps\n rowMap, colMap, boxMap = {}, {}, {}\n \n # Create set for each index in row, col and box hashmaps\n for i in range(n):\n \n rowMap[i] = set()\n colMap[i] = set()\n boxMap[i] = set()\n\n # Add values to board\n for i in range(n):\n for j in range(n):\n \n # Get value on board\n val = board[i][j]\n valBoxId = self.getBoxId(i,j)\n \n # Insert to respective hashmaps\n if val != \".\":\n rowMap[i].add(val)\n colMap[j].add(val)\n boxMap[valBoxId].add(val)\n \n # Perform backtracking\n self.solveBacktrack(board, rowMap, colMap, boxMap, 0, 0)\n\n return board", "def buildpuzzle(self):\r\n self.puzzle = copy.deepcopy(self.rows)\r\n if self.difficulty == 1:\r\n self.removedigits(1)\r\n if self.difficulty == 2:\r\n self.removedigits(2)\r\n if self.difficulty == 3:\r\n self.removedigits(3)", "def solveSudoku(self, board: List[List[str]]) -> None:\n def dfs(idx):\n if idx == len(blankIdx):\n return True\n else:\n i, j = blankIdx[idx]\n for num in rg:\n num += 1\n if (num not in rows[i] and\n num not in cols[j] and\n num not in boxs[i//3][j//3]):\n board[i][j]=str(num)\n rows[i].add(num)\n cols[j].add(num)\n boxs[i//3][j//3].add(num)\n if dfs(idx+1):\n return True\n board[i][j] = blank\n rows[i].remove(num)\n cols[j].remove(num)\n boxs[i//3][j//3].remove(num)\n \n rg,blank = range(9), \".\"\n rows = [set() for _ in rg]\n cols = [set() for _ in rg]\n boxs = [[set() for _ in range(3)] for j in range(3)]\n blankIdx = list()\n for i in rg:\n for j in rg:\n if board[i][j]!=blank:\n ele = int(board[i][j])\n rows[i].add(ele)\n cols[j].add(ele)\n boxs[i//3][j//3].add(ele)\n else:\n blankIdx.append((i,j))\n dfs(0)", "def sudoku_solver(board):\n row, col= find_empty(board)\n if row == -1 and col == -1:\n return True\n for i in range(1, 10):\n if valid(board, row, col, i):\n board[row][col] = i\n if sudoku_solver(board):\n return True\n board[row][col] = 0\n return False", "def solveSudoku(self, board: List[List[str]]) -> None:\n self.backtrack(board, 0, 0)", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def generate_sudoku(self):\n\n # randomly generate the first row \n random_order_number = [x for x in range(1, 10)]\n random.shuffle(random_order_number)\n for x in range(9):\n value = random_order_number[x]\n this_cell = self.grid[0][x]\n this_cell.value = value\n self.remove_value(this_cell, 0, x, value)\n\n row = 1\n column = 0\n while row <9 and column < 9:\n time.sleep(0.05)\n # search for options\n # should only be done once for each cell\n this_cell = self.grid[row][column]\n if this_cell.options == None:\n this_cell.options = self.find_options(row, column, this_cell.grid)\n\n if not this_cell.options:\n # backtrace should only happen when there is no options for this cell\n row, column = self.backtrace(this_cell, row, column)\n\n else:\n # case 3: the number has options and the number returned from the cell is valid\n if this_cell.value != None:\n self.add_value(this_cell, row, column)\n this_cell.get_value_from_options()\n # when you switch the value for a value from the option, put the current value back into the row\n self.remove_value(this_cell, row, column, this_cell.value)\n if column == 8:\n row += 1\n column = 0\n else:\n column += 1\n try:\n self.print_detail(this_cell, row, column)\n except IndexError:\n pass", "def solveSudoku(self, board):\n self.back_track(board)\n print(board)", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solve_board(self):\n\n self.fill_board()\n\n if self.bts_solver():\n for i in self.sudoku_board.keys():\n self.file.write(str(self.sudoku_board[i]))\n self.file.write(\" BTS\")\n print(\"Solution Found!\")", "def solveSudoku(self, board: List[List[str]]) -> None:\n row, col, part = [set() for _ in range(9)], [set() for _ in range(9)], [set() for _ in range(9)]\n blank = []\n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n row[i].add(board[i][j])\n col[j].add(board[i][j])\n part[i//3 * 3 + j//3].add(board[i][j])\n else:\n blank.append([i, j])\n def recursion(row, col, part, blank, board, count, n):\n if count == n:\n return True\n else:\n x, y = blank.pop()\n for c in range(1, 10):\n c = str(c)\n if c not in row[x] and c not in col[y] and c not in part[x//3 * 3 + y//3]:\n row[x].add(c)\n col[y].add(c)\n part[x//3 * 3 + y//3].add(c)\n board[x][y] = c\n count += 1\n check = recursion(row, col, part, blank, board, count, n)\n if check:\n return check\n row[x].remove(c)\n col[y].remove(c)\n part[x//3 * 3 + y//3].remove(c)\n board[x][y] = \".\"\n count -= 1\n blank.append([x,y])\n return False\n count, n = 0, len(blank)\n recursion(row, col, part, blank, board, count, n)", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solveSudoku(self, board: List[List[str]]) -> None:\n\n def deepCopy(src, tar):\n n = len(src)\n for i in range(n):\n for j in range(n):\n tar[i][j] = src[i][j]\n\n def getNums(board, x, y):\n used_nums_x = []\n used_nums_y = []\n used_nums_square = []\n for i in range(n):\n if board[i][y] != '.':\n used_nums_y.append(board[i][y])\n for j in range(n):\n if board[x][j] != '.':\n used_nums_x.append(board[x][j])\n\n x1 = (x // 3) * 3\n x2 = ((x // 3) + 1) * 3 - 1\n y1 = (y // 3) * 3\n y2 = ((y // 3) + 1) * 3 - 1\n\n for i in range(x1, x2 + 1):\n for j in range(y1, y2 + 1):\n if board[i][j] != '.':\n used_nums_square.append(board[i][j])\n\n used_nums = set(used_nums_x + used_nums_y + used_nums_square)\n nums = set([str(i) for i in range(1, 10)]) - used_nums\n return nums\n\n def helper(board, points, result):\n n = len(board)\n if len(points) == 0:\n deepCopy(board, result)\n return\n\n x, y = points[-1]\n nums = getNums(board, x, y)\n for num in nums:\n board[x][y] = num\n points.pop()\n helper(board, points, result)\n points.append((x, y))\n board[x][y] = '.'\n\n n = len(board)\n points = [(i, j) for i in range(n) for j in range(n) if board[i][j] == '.']\n result = [['0'] * n for _ in range(n)]\n helper(board, points, result)\n deepCopy(result, board)", "def solveSudoku(grid):\n\n #if the board is not empty, then check to see if its solved\n #return True if it is\n if not findEmpty(grid):\n if grid.checkBoard():\n return True\n else:\n return False\n #finds the first empty position\n p = findEmpty(grid)\n #considers 1-9 and then places it into the empty spot\n for i in range(1, 10):\n grid.board[p[0]][p[1]] = i\n #if the input is viable, then it goes solves the new given board until its solved\n if grid.checkInput(p[0], p[1]):\n if solveSudoku(grid):\n return True\n #if there are no viable options for that spot, then it backtracks \n grid.board[p[0]][p[1]] = 0\n return False", "def solveSudoku(self, board: List[List[str]]) -> None:\n row = collections.defaultdict(set)\n col = collections.defaultdict(set)\n block = collections.defaultdict(set)\n pos = []\n\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n row[i].add(board[i][j])\n col[j].add(board[i][j])\n block[i // 3 * 3 + j // 3].add(board[i][j])\n else:\n pos.append((i, j))\n\n self.dfs(board, pos, row, col, block)\n return", "def solveSudoku(self, board: List[List[str]]) -> None:\n self.board = board\n self.boardx = self.init_board(board)\n self.num_dict_origin = {'1':1, '2':1, '3':1, '4':1, '5':1, '6':1, '7':1, '8':1, '9':1}\n \n row, col = -1, -1\n while row < 9-1:\n row += 1\n col = -1\n while col < 9-1:\n col += 1\n if self.boardx[row][col][-1] == False: # 跳过给定值的位置\n continue\n if self.add_tree(row, col): # 如果可以继续分支,则继续\n continue\n else: # 否则,返回分叉点,剪枝\n result = self.back_fork(row, col)\n if result is None: # 无解情况\n print('无解')\n return\n else: # 返回分叉点\n row, col = result\n self.boardx[row][col].pop(0)\n \n self.fill_board() # 填充棋盘", "def sudoku_solver(m):\n square_sides = int(sqrt(len(m)))\n dicts = initialize_dicts(m, square_sides)\n dicts, square_coords = populate_dicts(m, square_sides, dicts)\n dicts = get_missing(dicts)\n candidates = get_candidates(m, dicts, square_coords)\n m, candidates = scan_sudoku(m, dicts, square_coords, candidates)\n single_candidates = single_candidate(candidates, square_coords, dicts)\n m, candidates = fill_fit(m, dicts, square_coords, single_candidates=single_candidates)\n candidates = get_candidates(m, dicts, square_coords)\n naked_sets_fields_row, naked_sets_fields_cols = find_naked_sets(candidates, dicts, setlength=2)\n candidates, naked_sets = remove_naked_sets_from_candidates(candidates, naked_sets_fields_row, naked_sets_fields_cols)\n candidates = get_candidates(m, dicts, square_coords, naked_sets)\n naked_sets_fields_row, naked_sets_fields_cols = find_naked_sets(candidates, dicts, setlength=3)\n return m", "def solveSudoku(self, board: List[List[str]]) -> None:\n \n # Returns the coordinates of the next available cell in the board\n def nextPositionToFill(board):\n row = len(board)\n column = len(board[0])\n \n for r in range(row):\n for c in range(column):\n if board[r][c] == \".\":\n return r,c\n return -1, -1\n \n # Checks if the value placed is valid\n def isValid(board, row, column, value):\n \n rowOK = all([value != board[row][index] for index in range(len(board)) ])\n \n if rowOK:\n columnOK = all([value != board[index][column] for index in range(len(board[0]))])\n if columnOK:\n box_x = 3 * (row//3)\n box_y = 3 * (column//3)\n \n for r in range(box_x, box_x+3):\n for c in range(box_y, box_y+3):\n if board[r][c] == value:\n return False \n return True\n return False\n \n # The main recursive method that solves the sudoku board\n def sudokuSolver(board, row, column):\n row, column = nextPositionToFill(board)\n \n if (row, column) == (-1, -1):\n return True\n \n for value in range(1, len(board)+1):\n if isValid(board, row, column, str(value)):\n board[row][column] = str(value)\n if sudokuSolver(board, row, column):\n return True\n board[row][column] = \".\" # Backtracking step\n \n return False\n \n return sudokuSolver(board, 0, 0)", "def solveSudoku(self, board: List[List[str]]) -> None:\n n19 = set(list('123456789'))\n conn = defaultdict(set)\n center = [(i,j) for i in {1,4,7} for j in {1,4,7}]\n def get_conn(i,j):\n for x in range(0, 9):\n conn[(i,j)].add((x,j))\n conn[(i,j)].add((i,x))\n for ci, cj in center:\n if abs(i-ci)<=1 and abs(j-cj)<=1:\n for ii in range(-1,2):\n for jj in range(-1,2):\n ni, nj = ci + ii, cj + jj\n conn[(i,j)].add((ni, nj))\n break\n conn[(i,j)].discard((i,j))\n\n\n for i in range(9):\n for j in range(9):\n get_conn(i,j)\n\n def get_avail(i, j):\n choices = set(n19)\n for ni, nj in conn[(i,j)]:\n choices.discard(board[ni][nj])\n return choices\n\n to_fill = set()\n for i, row in enumerate(board):\n for j, v in enumerate(row):\n if v == '.':\n to_fill.add((i,j))\n\n def solve():\n if not to_fill:\n return True\n min_avail = n19\n ci, cj = None, None\n for i, j in to_fill:\n val = get_avail(i,j)\n if not val:\n return False\n if len(val) < len(min_avail):\n min_avail = val\n ci, cj = i, j\n to_fill.discard((ci, cj))\n for x in min_avail:\n board[ci][cj] = x\n if solve():\n return True\n board[ci][cj] = '.'\n to_fill.add((ci, cj))\n return False\n print(solve())", "def solveSudoku(self, board: List[List[str]]) -> None:\n size=len(board)\n sqr_size=int(math.sqrt(size))\n \n def insert_into_board(i,j,element):\n board[i][j]=element\n update_row_and_column(i,j)\n update_sqr(i,j)\n \n #updating columns and rows after interting an element into a cell so the columns can't use it anymore\n #return the list of updated cell by this change so we can update them back inside the recursive back track function\n def update_row_and_column(i,j):\n list_updated=[]\n for k in range(size):\n if type(board[i][k])!=str and board[i][j] in board[i][k] :\n list_updated.append((i,k))\n board[i][k].discard(board[i][j])\n if type(board[k][j])!=str and board[i][j] in board[k][j] :\n list_updated.append((k,j))\n board[k][j].discard(board[i][j])\n return list_updated\n \n #updating columns and rows after interting an element into a cell so the columns can't use it anymore\n #return the list of updated cell by this change so we can update them back inside the recursive back track function\n def update_sqr(i,j):\n list_updated=[]\n sqr_i=sqr_size*int(i/sqr_size)\n sqr_j=sqr_size*int(j/sqr_size)\n for k in range(sqr_size):\n for l in range(sqr_size):\n if type(board[sqr_i+k][sqr_j+l])!=str and board[i][j] in board[sqr_i+k][sqr_j+l]:\n list_updated.append((sqr_i+k,sqr_j+l))\n board[sqr_i+k][sqr_j+l].discard(board[i][j])\n return list_updated\n \n def scan():\n for i in range(size):\n for j in range(size):\n if type(board[i][j])!=str and len(board[i][j])==1:\n insert_into_board(i,j,list(board[i][j])[0])\n \n def check_to_continue():\n for i in range(size):\n for j in range(size):\n if len(board[i][j])==0:\n return False\n return True\n \n def check_is_finished():\n for i in range(size):\n for j in range(size):\n if type(board[i][j])!=str:\n return False\n return True\n \n list_not_filled=[]\n \n def solve_backtrack():\n if check_is_finished():\n return True\n if not check_to_continue():\n return False\n (i,j)=list_not_filled.pop()\n if type(board[i][j])!=str:\n temp=board[i][j]\n for el in temp:\n board[i][j]=el\n index_row_column=update_row_and_column(i,j)\n index_sqr=update_sqr(i,j)\n check=solve_backtrack()\n if check:\n return True\n board[i][j]=temp\n for (o,p) in index_row_column:\n board[o][p].add(el)\n for (o,p) in index_sqr:\n board[o][p].add(el)\n list_not_filled.append((i,j))\n else:\n return solve_backtrack()\n return False\n \n \n #initializing the board ans updating none cells to a list of potential elements\n for i in range(size):\n for j in range(size):\n if board[i][j]=='.':\n board[i][j]=set([str(d) for d in range(1,size+1)])\n \n #updating the rows and columns and smal sqrs for inital elements\n for i in range(size):\n for j in range(size):\n if type(board[i][j])==str:\n update_row_and_column(i,j)\n update_sqr(i,j)\n \n #scaning to solve for simple cases in the start\n #We solve this to reduce the number of iteration in the back track function \n for i in range(size*size):\n scan()\n \n #updating list_not_filled for backtrack\n for i in range(size):\n for j in range(size):\n if type(board[i][j])!=str:\n list_not_filled.append((i,j))\n \n # starting backtrack after initial process\n solve_backtrack()", "def solve_puzzle(grid):\n solutions = []\n if not grid.valid():\n return solutions\n # Backtracking, iterating over (first) smallest list of candidates for empty vertices\n candidates = grid.candidate_map()\n min_number_of_candidates = min([9] + [len(candidates[ln][rw]) for ln in range(9) for rw in range(9) if grid.grid[ln][rw] is None])\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if grid.grid[line][row] is None and len(candidates[line][row]) == min_number_of_candidates:\n for guess in candidates[line][row]:\n grid.grid[line][row] = guess\n for solution in solve_puzzle(grid):\n solutions.append(solution)\n grid.grid[line][row] = None\n break\n else:\n solutions.append(Sudoku(grid.__str__()))\n return solutions", "def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)", "def demo():\n\n # Initialize board with all cells having possible values 1..9\n board = board_init()\n\n # Unsolved demo puzzle\n # Hard puzzle by Arto Inkala:\n # http://abcnews.go.com/blogs/headlines/2012/06/can-you-solve-the-hardest-ever-sudoku/\n read_puzzle(board, \"8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..\")\n\n # Print unsolved puzzle\n print(\"Initial Sudoku board:\")\n print_board(board)\n\n # Solve the puzzle\n board = solve_puzzle(board)\n\n # Print the solution\n print(\"Solution:\")\n print_board(board)\n\n\n # Write output to file\n write_to_file(board)\n \n return 0", "def solve_puzzle(board):\n # Propagate value effects\n board = simplify_puzzle(board, [])\n\n # Brute force remaining cells\n board = brute(board)\n\n # Verify that the puzzle was successfully solved\n assert get_length(board)==81\n assert valid_attempt(board)\n\n return board", "def solveSudoku(self, board: List[List[str]]) -> None:\n row_set = defaultdict(set)\n col_set = defaultdict(set)\n bl_set = defaultdict(set)\n nums = set([str(i) for i in range(1, 10)])\n dot_list = []\n for i in range(9):\n for j in range(9):\n val = board[i][j]\n if val == '.':\n dot_list.append([i, j])\n continue\n bl = (i // 3)*3 + j // 3\n row_set[i].add(val)\n col_set[j].add(val)\n bl_set[bl].add(val)\n\n n = len(dot_list)\n stack = []\n count = 0\n try_dict = defaultdict(set)\n def next_num(next_count):\n if next_count == n:\n return 0\n i, j = dot_list[next_count]\n b = (i // 3) * 3 + j // 3\n return len(nums - row_set[i] - col_set[j] - bl_set[b])\n while count < n:\n i, j = dot_list[count]\n b = (i // 3) * 3 + j // 3\n left_set = nums - row_set[i] - col_set[j] - bl_set[b] - try_dict[count]\n if left_set:\n insert_num = 0\n space = 0\n for left_num in left_set:\n for _set in (row_set[i], col_set[j], bl_set[b]):\n _set.add(left_num)\n next_space = next_num(count+1)\n for _set in (row_set[i], col_set[j], bl_set[b]):\n _set.remove(left_num)\n if next_space >= space:\n insert_num = left_num\n space = next_space\n for _set in (row_set[i], col_set[j], bl_set[b]):\n _set.add(insert_num)\n stack.append(insert_num)\n try_dict[count].add(insert_num)\n count += 1\n else:\n try_dict[count].clear()\n count -= 1\n rm_num = stack.pop()\n i, j = dot_list[count]\n b = (i // 3) * 3 + j // 3\n for _set in (row_set[i], col_set[j], bl_set[b]):\n _set.remove(rm_num)\n\n # print(stack)\n\n for p, v in zip(dot_list, stack):\n board[p[0]][p[1]] = v", "def solveSudoku(self, board: 'List[List[str]]') -> 'None':\n\n select = '.'\n row_set = []\n col_set = []\n arr_set = []\n\n for row in range(9):\n for col in range(9):\n if col == 0:\n row_set.append(set('123456789'))\n if row == 0:\n col_set.append(set('123456789'))\n if row % 3 == 0 and col % 3 == 0:\n arr_set.append(set('123456789'))\n\n if board[row][col].isdigit():\n row_set[row].remove(board[row][col])\n col_set[col].remove(board[row][col])\n arr_index = (row - row % 3) + col // 3\n arr_set[arr_index].remove(board[row][col])" ]
[ "0.70219713", "0.6696299", "0.6669564", "0.665291", "0.6652331", "0.6648256", "0.6491506", "0.6417593", "0.64122254", "0.6406946", "0.64067495", "0.6398763", "0.6398049", "0.6371711", "0.63527167", "0.6332174", "0.63301975", "0.6281087", "0.6276849", "0.62565374", "0.62547344", "0.6252944", "0.6233744", "0.61962247", "0.61779505", "0.6161378", "0.6155183", "0.6154236", "0.61540943", "0.61345345" ]
0.8129647
0
Requests user input for the row column and number input they would like to enter as the next entry to the Sudoku puzzle. Has some lightweight data validation through a try / except format and asks for another input attempt if invalid inputs were provided.
def request_number_input(self): try: self.print_board(self.board) row = int(input("Please enter row to add number to (0-8): ")) col = int(input("Please enter column to add number to (0-8): ")) num = int(input("Please enter number you wish to add (1-9): ")) response = self.set_number(col, row, num) print(response) # verifies if move was valid or if invalid inputs were provided. except: print("Invalid input, try again!") self.request_number_input()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input(self):\n while True:\n try:\n self.rows = int(input(\"Number of rows: \"))\n while self.rows < 2 or self.rows > 30:\n self.rows = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.cols = int(input(\"Number of columns: \"))\n while self.cols < 2 or self.cols > 30:\n self.cols = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.mines = int(input(\"Number of mines: \"))\n while self.mines < 1 or (self.mines >= self.rows * self.cols):\n tile_count = self.rows * self.cols\n self.mines = int(input(\"Please enter a number between 1 and \" + str(tile_count - 1) + \": \"))\n break\n except ValueError:\n print(\"Please enter a number!\")", "def ask_input(player, row_or_column):\n\n row_or_column_number = ask_input_helper(player, row_or_column)\n while row_or_column_number not in range(board_size):\n print \"Please choose a number within the range.\" \n row_or_column_number = ask_input_helper(player, row_or_column)\n return row_or_column_number", "def ask_input_helper(player, row_or_column):\n\n try:\n return int(raw_input(\"Player {}, please choose a {}.\".format(player, row_or_column)))\n except ValueError:\n return ask_input_helper(player, row_or_column)", "def attack_input(self):\n while True:\n if self.user == 'player':\n print(\"ITS YOUR TURN TO ATTACK!\\n\")\n try:\n column = input('ENTER DESIRED COLUMN (A-J): \\n').upper()\n if not re.match('^[A-J]*$', column):\n print('PLEASE ENTER A VALID LETTER BETWEEN A-J')\n else:\n column = self.letters_to_numbers[column]\n break\n except KeyError:\n print('PLEASE ENTER A LETTER')\n elif self.user == 'computer guess':\n column = self.comp_attack_column()\n if column == range(0, 10):\n break\n else:\n column = random.randint(0, 9)\n break\n while True:\n if self.user == 'player':\n try:\n row = input('ENTER DESIRED ROW (0-9): \\n')\n if row in self.row_input:\n row = int(row)\n break\n else:\n raise ValueError\n except ValueError:\n print('PLEASE ENTER A VALID NUMBER BETWEEN 0-9')\n elif self.user == 'computer guess':\n row = self.comp_attack_row()\n if row == range(0, 10):\n break\n else:\n row = random.randint(0, 9)\n break\n return column, row", "def get_user_move(self):\n while True:\n user_input = input(\"Enter the coordinates: > \")\n try:\n col, row = map(int, user_input.split())\n if col not in [1, 2, 3] or row not in [1, 2, 3]:\n raise CoordinateError\n idx = self.board_coords[(col, row)]\n if self.game_board[idx] != ' ':\n raise CellOccupyError\n return idx\n except ValueError:\n print(\"You should enter numbers!\")\n except CoordinateError:\n print(\"Coordinates should be from 1 to 3!\")\n except CellOccupyError:\n print('This cell is occupied! Choose another one!')", "def input_validation(self, prompt):\r\n\r\n while True:\r\n try:\r\n x, y = map(int, input(prompt).split())\r\n except ValueError: # when there is less than or more than 2 input values\r\n print('Invalid input try again.')\r\n continue\r\n if (x != self.selected[0]) or (y != self.selected[1]): # different from first choice\r\n if (0 <= x <= 3) and (0 <= y <= 12): # Valid input\r\n if not ([x, y] in self.bin): # Check if this card is still there or not\r\n break\r\n else:\r\n print('This card has already been taken.')\r\n continue\r\n else: # invalid input\r\n print('Row and column should be from 0 to 3 and 1 to 12 respectively.')\r\n continue\r\n else:\r\n print('Choose a card different from your first choice')\r\n continue\r\n return x, y", "def input_coordinates(playing_field, playground_mines):\n while True:\n try:\n x = input('Write number of line from 0 to %s:' % (FIELD_SIZE - 1))\n if x == 'test':\n test_game(playing_field, playground_mines)\n y = input('Write number of line from 0 to %s:' % (FIELD_SIZE - 1))\n if y == 'test':\n test_game(playing_field, playground_mines)\n elif not is_coords_in_range(int(x), int(y)):\n raise TypeError\n return int(x), int(y)\n except ValueError:\n print('Wrong input, try again')\n except TypeError:\n print('Your number of coordinate is out of field')", "def get_table(self):\n \n # During testing, this'll speed the process update\n \n row = 0\n while row < 9:\n sudoku_row = input(\"Please enter the contents of row {}, using 0 to represent blanks:\".format(row+1))\n if len(sudoku_row) == 9:\n column = 0\n while column < 9:\n number_in_box = int(sudoku_row[column])\n self.table[row][column] = number_in_box\n column += 1\n row += 1\n else:\n print(\"You can only enter 9 numbers. Not letters. Not more. Not fewer. 9 numbers.\")", "def _input_coords(game,player):\r\n print(Player.get_name(player)+'('+Player.get_spec(player)+')'+\" it's your turn\")\r\n coords = input(\"coords of cell? \")\r\n coords = coords.split(',')\r\n try :\r\n x = int(coords[0])\r\n y = int(coords[1])\r\n if game[x][y] == '' : \r\n return (x,y)\r\n except :\r\n return _input_coords(game,player)\r\n print('illegal play, choose an empty cell')\r\n return _input_coords(game,player)", "def get_row():\n\n while True:\n try:\n guess = int(input(\"Guess a row: \\n\"))\n if guess in range(1, grid_size + 1):\n return guess\n else:\n print(\"Bruh! That's not even in the ocean o_O\")\n except ValueError:\n print(f\"\\nPlease enter number between 1 and {grid_size}\")", "def _getUserInputs(self):\n try:\n userCoor = self.view.textCoordinates.get(\"1.0\", \"end-1c\")\n if needless(userCoor):\n \"\"\"If the user's input is not given,\n it will generate coordinates randomly\n and print out coordinates in the textfield.\"\"\"\n self.model.validateEntryCities(self.view.entryCities.get())\n self.model.generateCoordinates()\n self.view.printCoordinates(self.model.coordinates)\n else: \n self.model.parseCoordinates(userCoor)\n\n self.iterations = self.model.validateEntryIterations(\n self.view.entryIterations.get())\n self.refresh_time = self.model.validateEntryTimer(\n self.view.entryREFRESH_TIME_MS.get())\n except InvalidIterationInput as E:\n self.view.invalidInput(E)\n except InvalidCityInput as E: \n self.view.invalidInput(E)\n except InvalidREFRESH_TIME_MSInput as E:\n self.view.invalidInput(E)\n except InvalidCoordinatesIndexInput as E:\n self.view.invalidInput(E)\n except InvalidCoordinatesInput as E:\n self.view.invalidInput(E)\n except InvalidCoordinatesRangeIndexInput as E:\n self.view.invalidInput(E)\n else:\n self.validInput = True", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def get_col():\n\n while True:\n try:\n guess_letter = str(input(\"Guess a column: \\n\")).upper()\n guess = letter_and_index_conversion(guess_letter, grid_size)\n if guess in range(1, grid_size + 1):\n return guess\n else:\n print(\"Bruh! That's not even in the ocean o_O\")\n except ValueError:\n print(\n f\"\\nPlease enter a letter for the column between {alphabet_list[0]} and {alphabet_list[grid_size - 1]}\"\n )", "def input_stone_position():\n p_1 = input(\"input first co-ordinate, range 0 to 7:\")\n p_2 = input(\"input second co-ordinate, range 0 to 7:\")\n # if input is anything else but 1 2 3 4 5 6 7 8 9 0 ipython shell returns a ValueError\n\n try:\n return (int(p_1), int(p_2))\n except ValueError as val_err:\n print(\"A ValueError occured with message {}\".format(val_err))\n print(\"You should input something like 1 (then press ENTER) 5 (then press ENTER).\")\n repeat = input(\"Do you want to try again [type t] or end the game [type e] or continue [type what you want]?\")\n if repeat == 't':\n return input_stone_position()\n elif repeat == 'e':\n print(\"Press ctrl + c to end the game.\")", "def askMove(self,posibleMoves):\n print(\"Where will you move?\")\n while True:\n pos = raw_input(\"Type Colum and Row 'CR' Ex:a1 for first column/row: \")\n if len(pos) == 2:\n c = ord(pos[0])-97\n r = int(pos[1])-1\n move = c+r*8\n if move in posibleMoves:\n return move\n print(\"Invalid move, try again\")\n return", "def prompt_user_check_input(self):\r\n user_input = 0\r\n # grabs user input and changes it to an int\r\n while True:\r\n try:\r\n user_input = int(\r\n input(\"\\033[1;33mMake your move by entering the number of an open space on the board: \\033[0m\"))\r\n except ValueError:\r\n print(\"Why do you refuse to enter a number, Dave?\")\r\n continue\r\n else:\r\n break\r\n\r\n # makes sure the user enters a number 0-8 and verifies that the space the user selected is open\r\n if self.verify_valid_num(user_input) and self.write_user_choice(user_input):\r\n return True\r\n else:\r\n self.prompt_user_check_input()", "def inputProcess(self, line):\n fields = line.split()\n # check if input argument size is not 1 or 2\n if len(fields) < 1 or len(fields) > 2:\n print 'Invalid input size!'\n return True\n # call corresponding functions based on input argument(s)\n if fields[0] == 'GET':\n res = self.get()\n if res != '':\n print res,\n elif fields[0] == 'EXIT':\n return False\n elif fields[0] == 'BOARD':\n print self.displayBoard()\n elif fields[0] == 'PUT':\n if len(fields) != 2:\n print 'PUT command needs one argument!'\n return True\n try:\n column = int(fields[1])\n if column < 1 or column > 4:\n print 'Column number for PUT command needs to be from 1 to 4'\n else:\n print self.put(column)\n except ValueError:\n print 'Invalid input, for column number please enter an integer from 1 to 4'\n else:\n print 'Invalid input, valid commands consists of GET BOARD EXIT PUT <column> only'\n return True", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def obtain_user_input():\n\n\n league_types = ['PPR', 'STD'] # possible acceptable league types\n\n while True: # continue till valid entry given\n try:\n league_type = input(\"Enter a League Type (PPR or STD): \").upper() # obtain value from user\n if league_type in league_types: # check if it's valid\n break # entry is valid therefore break\n else: # invalid entry\n raise ValueError\n except:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either PPR or STD\")\n\n\n positions = ['WR', 'RB', 'QB', 'TE'] # possible acceptable positions\n while True: # continue till valid entry given\n try:\n pos = input(\"Please enter a position (WR, RB, QB, or TE): \").upper() # obtain value from user\n if pos in positions: # make sure position is valid\n break # entry is valid so break.\n else: # invalid entry\n raise ValueError\n except:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either WR, RB, QB, or TE\")\n\n\n\n idx = pd.IndexSlice # index slice object used to slice df\n num_pos = final_df.loc[idx[league_type, pos], :].shape[0] # total count of the position.\n while True: # continue till valid entry given\n try:\n n_rows = input(f\"Enter a count of players to study as an integer (max: {num_pos} for {pos}): \")\n n_rows = int(n_rows) # will raise ValueError if not an integer.\n if (n_rows <= num_pos and n_rows >0): # ensure < than count of position\n break # brak since valid entry\n else: # invalid entry\n raise ValueError\n except ValueError:\n # presesnt error message and redo loop\n print(f\"Invalid entry: please enter an integer less than {num_pos} and > 0.\")\n\n\n # possible user entry values. \n rank_dict = {\n \"1\": \"ADP\",\n \"2\": \"TTL PTS\"\n }\n\n while True: # continue till valid entry given\n # obtain value from user\n rank_sys = input(\"Enter how you would like to rank players (1 for ADP, 2 for 2020 Total Points): \")\n try:\n if rank_sys in rank_dict: # valid entry\n rank_sys = rank_dict[rank_sys]\n break\n else: # invalid entry\n raise ValueError\n except ValueError:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either 1 for ADP, or 2 for 2020 Total Points\")\n\n return league_type, pos, rank_sys, n_rows", "def StateSpace():\n # collecting input for inicial space's position\n # collecting row1's inputs\n def row1_fun():\n while True:\n try:\n row1 = list(map(int, input(\"row #1: \").split()))\n break\n except:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n\n \n while len(row1) != 3:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row1.clear()\n row1 = list(map(int, input(\"row #1: \").split()))\n if len(row1) == 3:\n for x in row1:\n if x != 1 and x != 0:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row1.clear()\n row1 = list(map(int, input(\"row #1: \").split()))\n return row1\n \n def row2_fun():\n while True:\n try:\n row2 = list(map(int, input(\"row #2: \").split()))\n break\n except:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n\n \n while len(row2) != 3:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row2.clear()\n row2 = list(map(int, input(\"row #2: \").split()))\n if len(row2) == 3:\n for x in row2:\n if x != 1 and x != 0:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row2.clear()\n row2 = list(map(int, input(\"row #2: \").split()))\n return row2\n \n def row3_fun():\n while True:\n try:\n row3 = list(map(int, input(\"row #3: \").split()))\n break\n except:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n\n \n while len(row3) != 3:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row3.clear()\n row3 = list(map(int, input(\"row #3: \").split()))\n if len(row3) == 3:\n for x in row3:\n if x != 1 and x != 0:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"exemple: 1 1 1\")\n print(\"exemple: 0 0 0\")\n print(\"exemple: 1 0 1\")\n print(\"\") \n row3.clear()\n row3 = list(map(int, input(\"row #3: \").split()))\n return row3\n\n\n # all the data was collected for the inicial spacial state\n rows = [row1_fun(),row2_fun(),row3_fun()]\n # turning the rows into a 5x5 numpy array\n game = np.array(rows)\n sets = []\n\n # overkill of lists\n lists = [[] for i in range(10000)]\n lists1 = [[] for i in range(10000)]\n # --- copies and appends np array ---\n c = np.copy(game)\n # turns numpy array into string so I can compare them \n c1 = np.array2string(c)\n lists[0].append(c1)\n lists1[0].append(c)\n sets_values = []\n sets.append(c1)\n sets_values.append(c)\n control = 0\n \n # --- state space core logic ---\n picks = []\n while np.count_nonzero(game):\n game = lists1[0][control]\n pick = 1\n picks.append(pick)\n for pick in range(9):\n pick += 1\n picks.append(pick)\n play(game,pick)\n d = np.copy(game)\n print(\"\")\n print(d)\n d1 = np.array2string(d)\n \n if d1 not in sets:\n sets.append(d1)\n sets_values.append(d)\n lists[0].append(d1)\n lists1[0].append(d)\n else:\n pass\n del d, d1\n control += 1\n print(\"Congratulations, the game is resolved!\")\n print(\"Amount of plays made until the game was beaten: \", len(picks))\n print(\"\")\n print(\"1.) Final Solution (in steps taken)\")\n print(\"2.) Final Solution (in coordinates)\")\n print(\"3.) Take me back to the Main Menu\")\n print(\"\")\n \n # translate picks to coordinates\n final_coordinates = []\n for x in picks:\n pick = x\n coordinates = translate(pick)\n final_coordinates.append(coordinates)\n\n # print final answer\n def final_answer():\n while True:\n try:\n question = int(input('Your choice: '))\n break\n except:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"\")\n print(\"1.) Final Solution (in steps taken)\")\n print(\"2.) Final Solution (in coordinates)\")\n print(\"3.) Take me back to the Main Menu\")\n print(\"\")\n \n if question == 1:\n os.system('cls||clear')\n print(picks)\n if question == 2:\n os.system('cls||clear')\n print(final_coordinates)\n if question == 3:\n os.system('cls||clear')\n from main import main\n main() \n else:\n print(\"\")\n print(\"Invalid option. Please try again.\")\n print(\"\")\n print(\"1.) Final Solution (in steps taken)\")\n print(\"2.) Final Solution (in coordinates)\")\n print(\"3.) Take me back to the Main Menu\")\n print(\"\")\n final_answer()\n final_answer()", "def human_go(self, board):\r\n coord_pattern = re.compile(\r\n \"[0-{}],[0-{}]\".format(board.shape[0], board.shape[1])\r\n )\r\n print(\"Enter Coordinates of your go then press enter.\")\r\n input_str = input(\"(space seperated, 0-2 with origin in top left)\\n\")\r\n\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n y, x = [int(coord) for coord in input_str.split(\",\")]\r\n if board[x][y] != 0:\r\n print(\"That square is already taken, please try again\")\r\n self.human_go()\r\n else:\r\n board[x][y] = -1\r\n return board", "def get_coordinates():\n\tallowed_range = [0,1,2]\n\trow = int(input(\"Enter row: \")) - 1\n\tcol = int(input(\"Enter column: \")) - 1", "def process(self):\n # Process value 1, 2, ..., 9 in order\n for val in range(1, 10):\n # For each row\n for x in range(0, 9):\n exist = False\n can_enter = []\n for y in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(y)\n\n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n y = can_enter[0]\n self.put(x, y, val)\n \n # For each column\n for y in range(0, 9):\n exist = False\n can_enter = []\n for x in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(x)\n\n # Put val in only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0]\n self.put(x, y, val)\n \n # For each block\n for bx in range(0, 3):\n for by in range(0, 3):\n exist = False\n can_enter = []\n for x in range(bx * 3, (bx + 1) * 3):\n for y in range(by * 3, (by + 1) * 3):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append((x, y))\n \n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0][0]\n y = can_enter[0][1]\n self.put(x, y, val)", "def move_col(board: Connect4Board) -> int:\r\n\r\n while True:\r\n\r\n try:\r\n\r\n user_input = (int(input('Please specify the COLUMN number.\\nPlease enter an integer between 1 to {} for number of the column: '.format(board.get_num_columns())))) - 1\r\n\r\n #if game_state.valid_col(user_input):\r\n if (Connect4GameUI._get_valid_row(board, int(user_input)) != None and 0 <= user_input < board.get_num_columns()):\r\n\r\n return user_input\r\n\r\n else:\r\n\r\n raise InvalidInputException()\r\n\r\n except:\r\n\r\n print('\\nInvalid Input!!!')\r\n print('Please try it again.\\n')", "def getCoordinates(self, cardNumber):\n number = 'first' if cardNumber == 1 else 'second'\n while True:\n s = input(\"Enter coordinates for \" + number + \" card \")\n s = s.strip()\n x = s[0]\n y = s[-1]\n if x.isdigit() and y.isdigit():\n x = int(x)\n y = int(y)\n if 1 <= x <= self.rows and 1 <= y <= self.columns:\n return x, y\n else:\n print(\" ***Invalid coordinates! Try again.***\")\n else:\n print(\" ***Invalid coordinates! Try again.***\")", "def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))", "def read_and_print_from_hackerrank(self):\n a = list(map(int, input().split()))\n b = list(map(int, input().split()))\n for el in self.show_missing_numbers(a, b):\n print(el,)", "def play(self):\r\n user = []\r\n while 0 not in self.puzzle:\r\n print()\r\n print(\"Your score is \", self.score)\r\n print(\"1.Get Cell Value\")\r\n print(\"2.Set Cell Value\")\r\n print(\"3.Show solution\")\r\n s = int(input(\"Enter\"))\r\n if s == 1:\r\n row = int(input(\"Enter Row Number(0-8)\"))\r\n col = int(input(\"Enter Columm Number(0-8)\"))\r\n if row in [0,1,2,3,4,5,6,7,8] and col in [0,1,2,3,4,5,6,7,8]:\r\n x = self.get(row,col)\r\n print(\"The value is \",x)\r\n else:\r\n print(\"Invalid number. Try again\")\r\n\r\n if s == 2:\r\n row = int(input(\"Enter Row Number(0-8)\"))\r\n col = int(input(\"Enter Columm Number(0-8)\"))\r\n if row in [0,1,2,3,4,5,6,7,8] and col in [0,1,2,3,4,5,6,7,8]:\r\n if self.puzzle[row][col] == 0 or [row][col] in user:\r\n user.append([row,col])\r\n value = int(input(\"Enter digit\"))\r\n if value in [1,2,3,4,5,6,7,8,9]:\r\n self.set(row,col,value)\r\n self.print(self.puzzle)\r\n else:\r\n print(\"Enter valid number\")\r\n else:\r\n print(\"Invalid Number. Try Again\")\r\n if s == 3:\r\n print(\"Solution is \")\r\n self.print(self.rows)", "def input_getter(self, dcd_ply):\r\n \r\n player_no = dcd_ply.return_player()\r\n if player_no == 1:\r\n print(\"Turn: Player 1 \\n Enter position:\", end = \"\")\r\n self.inp_row, self.inp_col = list(map(int, input().split()))\r\n output = []\r\n output.append(self.inp_row)\r\n output.append(self.inp_col)\r\n # return output\r\n self.taken_or_not(output, player_no)\r\n \r\n else:\r\n print(\"Turn: Player 2 \\n Enter position:\", end = \"\")\r\n self.inp_row, self.inp_col = list(map(int, input().split()))\r\n output = []\r\n output.append(self.inp_row)\r\n output.append(self.inp_col)\r\n self.taken_or_not(output, player_no)" ]
[ "0.7249653", "0.7011725", "0.68896455", "0.65655696", "0.64116174", "0.63925433", "0.62278056", "0.6168719", "0.6091157", "0.6074832", "0.604573", "0.6043378", "0.5990928", "0.5926154", "0.5902846", "0.58928376", "0.5879233", "0.5877413", "0.5809618", "0.57965463", "0.57551295", "0.57445896", "0.5738835", "0.57173485", "0.56726015", "0.56365496", "0.55007917", "0.5494659", "0.54857236", "0.5471809" ]
0.71602094
1
Checks if the requested square to change is an original input for the puzzle, which cannot be changed.
def new_input_does_not_overlap_original_board(self, col, row): return self.puzzle[row][col] == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True", "def checkValidMove(self, move):\n boardCopy = copy.deepcopy(self)\n tilesChange = False\n if move == Move.UP:\n boardCopy.moveUp()\n elif move == Move.DOWN:\n boardCopy.moveDown()\n elif move == Move.LEFT:\n boardCopy.moveLeft()\n elif move == Move.RIGHT:\n boardCopy.moveRight()\n else:\n raise ValueError('Invalid Move was input')\n \n for i in range(4):\n for j in range(4):\n if boardCopy.getTile(i,j) != self.getTile(i,j):\n tilesChange = True\n del(boardCopy)\n return tilesChange", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n \n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n # Check if you're in the palace\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False \n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n\n # Checking if forward movement is legal\n elif self._color == 'BLUE':\n print(\"this soldier is blue\")\n if new_row == cur_row - 1 and new_col == cur_col:\n print(\"The blue soldier is trying to move forward\")\n # cant take your own piece\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n elif self._color == 'RED':\n print(\"this soldier is red\")\n if new_row == cur_row + 1 and new_col == cur_col:\n print(\"The red soldier is trying to move forward\")\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n else:\n return False\n else:\n return False", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n piece = self.piece_type(cur_pos, board)\n\n if state == \"UNFINISHED\":\n if (new_row == cur_row + 3) and (new_col == cur_col + 2): #F5\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col + 1] is not None:\n print(\"hello 1 elephant\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"1for some reason it thinks the new pos has a color of the same piece\")\n return\n print(\"elephant moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col - 2): #B1\n print(\"Hello im here\")\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col - 1] is not None:\n print(\"horse attempted to move left and up the board\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return\n print(\"e moved up and left\")\n return True\n\n elif (new_row == cur_row + 3) and (new_col == cur_col - 2): #\n # checking left and right are valid\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col - 1] is not None:\n print(\"hello e3\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"e moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col + 2): #F1\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col + 1] is not None:\n print(\"hello e4\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and left 2\")\n return True\n #---------------------------------------------------------------------------------------------------------------\n # Check if the forwards and backwards is legal\n elif (new_row == cur_row - 2) and (new_col == cur_col + 3): #G2\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col + 2] is not None:\n print(\"hello e5\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 5e\")\n return\n print(\"it worked e5\")\n return True\n\n elif (new_row == cur_row - 2) and (new_col == cur_col - 3): #A2\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello e6\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 6e\")\n return\n print(\"it worked e6\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col + 3): #G6\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello 7e\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"ebye 7\")\n return\n print(\"it worked e7\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col - 3): #A6\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row + 1][cur_col - 2] is not None:\n print(\"hello 8\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 8\")\n return\n print(\"it worked 8\")\n return True\n# else:\n # print(\"it actually never entered the if statement?\"\n #return False\n else:\n print(\"False\")\n return False", "def can_add_to_square(self, tile, value):\n start_row = tile.row // self.board_squared * self.board_squared\n start_col = tile.column // self.board_squared * self.board_squared\n\n for row in range(start_row, start_row + self.board_squared):\n for col in range(start_col, start_col + self.board_squared):\n if self.puzzle[row][col].value == value:\n return False\n\n return True", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n piece = self.piece_type(cur_pos, board)\n\n if state == \"UNFINISHED\": \n # Check if the movement left or right is legal\n if (new_row == cur_row - 1) and (new_col == cur_col + 2):\n # checking left and right are valid\n if board[cur_row][cur_col + 1] is not None:\n print(\"hello 1\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"1for some reason it thinks the new pos has a color of the same piece\")\n return\n print(\"Horse moved up and right 2\")\n return True\n\n elif (new_row == cur_row - 1) and (new_col == cur_col - 2):\n print(\"Hello im here\")\n # checking left and right are valid\n if board[cur_row][cur_col + 1] is not None:\n print(\"horse attempted to move left and up the board\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return\n print(\"Horse moved up and left 2\")\n return True\n\n elif (new_row == cur_row + 1) and (new_col == cur_col + 2):\n # checking left and right are valid\n if board[cur_row][cur_col - 1] is not None:\n print(\"hello 3\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and right 2\")\n return True\n\n elif (new_row == cur_row + 1) and (new_col == cur_col - 2):\n # checking left and right are valid\n if board[cur_row][cur_col - 1] is not None:\n print(\"hello 4\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and left 2\")\n return True\n #---------------------------------------------------------------------------------------------------------------\n # Check if the forwards and backwards is legal\n elif (new_row == cur_row - 2) and (new_col == cur_col + 1):\n # checking left and right are valid\n if board[cur_row - 1][cur_col] is not None:\n print(\"hello 5\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 5\")\n return\n print(\"it worked 5\")\n return True\n\n elif (new_row == cur_row - 2) and (new_col == cur_col - 1):\n # checking left and right are valid\n if board[cur_row - 1][cur_col] is not None:\n print(\"hello 6\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 6\")\n return\n print(\"it worked 6\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col + 1):\n # checking left and right are valid\n if board[cur_row + 1][cur_col] is not None:\n print(\"hello 7\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 7\")\n return\n print(\"it worked 7\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col - 1):\n # checking left and right are valid\n if board[cur_row + 1][cur_col] is not None:\n print(\"hello 8\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 8\")\n return\n print(\"it worked 8\")\n return True\n# else:\n # print(\"it actually never entered the if statement?\"\n #return False\n else:\n print(\"False\")\n return False", "def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def check_legal(self, cur_pos, new_pos, board, state): \n if cur_pos and new_pos in self._special:\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n if new_pos in self._special: # if its in the palace\n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n # Checking if forward or backward movement is legal\n elif (new_row == cur_row - 1 or new_row == cur_row + 1) and (new_col == cur_col):\n return True\n # Checking if diagonal lines are possible\n elif cur_pos in self._corners:\n if (new_row == cur_row + 1 or new_row == cur_row - 1) and (new_col == cur_col - 1 or new_col == cur_col + 1):\n return True\n else:\n return False\n else:\n return False", "def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)", "def is_valid(move):\n return isinstance(move, int) and move in Othello.squares()", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def check_legal(self, cur_pos, new_pos, board, state):\n if cur_pos and new_pos in self._special:\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n if new_pos in self._special: # if its in the palace\n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n # Checking if forward or backward movement is legal\n elif (new_row == cur_row - 1 or new_row == cur_row + 1) and (new_col == cur_col):\n return True\n # Checking if diagonal lines are possible\n elif cur_pos in self._corners:\n if (new_row == cur_row + 1 or new_row == cur_row - 1) and (new_col == cur_col - 1 or new_col == cur_col + 1):\n return True\n else:\n return False\n else:\n return False", "def is_solvable(self, row=0, col=0):\n if row == self.sl-1 and col == self.sl: \n return True\n\n # If column is the side length, mvoe indices to next row\n if col == self.sl:\n return self.is_solvable(row+1, 0)\n\n # If square has a value already, move to next column\n if self.puzzle[row][col] != 0: \n return self.is_solvable(row, col + 1)\n\n # If empty square, try each value in that square\n for value in range(1, self.sl+1): \n # If a valid value, recurse with that value and attempt to solve \n if self.valid_square(row, col, value): \n self.puzzle[row][col] = value\n solved = self.is_solvable(row, col + 1) \n self.puzzle[row][col] = 0\n\n # If value solves puzzle, return solved\n if solved:\n return solved\n\n return False", "def test_is_solved(self):\n p = hw.TilePuzzle([[1, 2], [3, 0]])\n self.assertTrue(p.is_solved())\n p = hw.TilePuzzle([[0, 1], [3, 2]])\n self.assertFalse(p.is_solved())", "def square_valid(board: Board, n: int, pawn_value: int, x: int, y: int) -> bool:\n\n return (coordinates_within_board(n, x, y) and\n square_playable(board, pawn_value, x, y))", "def has_valid_move(self, cur_square, board):\n coords = cur_square.coords\n neighbor_list = [tuple(map(sum, zip(coords, offset))) for offset in self._offsets]\n return self.has_valid_move_in_list(coords, neighbor_list, board)", "def valid_move(self, row, col):\n if not self._game_over:\n i_row, i_col = row-1, col-1\n #i_row and i_col wil be used to index the board (hence the i)\n (valid, flip_lst) = self._valid_placement(i_row, i_col)\n #print(\"FOR TESTING. Tiles Flipped: \", flip_lst)\n \n if valid:\n #Big Change: You decided to make determining validity\n # and flipping separate operations\n self._flip(i_row, i_col, flip_lst)\n else:\n print(\"\\nPlease enter a valid move!\")\n return False\n\n if self._board_is_full():\n self._game_over = True\n self._set_winner() \n \n self._switch_turn(self._turn)\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"\\nNo valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n self._switch_turn(self._turn) #Switch turn back to player before skip was determined\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"No valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n print(\"No moves exist for either player. GAME OVER\")\n self._game_over = True\n self._set_winner()\n return False\n\n return True\n elif self._game_over:\n print(\"The game is over. No more moves can be made!\")\n #TODO: Replace this^ with an exception later?\n return False", "def is_changed(self, new_grid):\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self.get_tile(row,col) != new_grid[row][col]:\n return True\n return False", "def is_valid(puzzle, xy_coord, input):\r\n # validate row consistency\r\n for x in range(len(puzzle.squares[0])):\r\n if puzzle.squares[xy_coord[1]][x].value == input and xy_coord[0] != x:\r\n return False\r\n\r\n # validate column consistency\r\n for y in range(len(puzzle.squares)):\r\n if puzzle.squares[y][xy_coord[0]].value == input and xy_coord[1] != y:\r\n return False\r\n\r\n # validate 3x3 subgrid consistency\r\n x_grid = xy_coord[0] // 3\r\n y_grid = xy_coord[1] // 3\r\n for y in range(y_grid * 3, y_grid * 3 + 3):\r\n for x in range(x_grid * 3, x_grid * 3 + 3):\r\n if puzzle.squares[y][x].value == input and (x, y) != xy_coord:\r\n return False\r\n\r\n return True", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n # Checking diagonals in the palace\n if cur_pos and new_pos in self._special:\n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking all possible diagonals\n elif new_row == cur_row + 1 and new_col == cur_col + 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row - 1 and new_col == cur_col - 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row + 2 and new_col == cur_col + 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_col - 2 and new_row == cur_col - 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True \n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n else:\n return False\n else:\n return False", "def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])", "def square_playable(board: Board, pawn_value: int, x: int, y: int) -> bool:\n\n square = board[y][x]\n opponent_value = 3 if pawn_value is 1 else 1\n\n if argv[2] is '0':\n return square is 0\n else:\n return (square is not pawn_value and\n square is not pawn_value+1 and # unplayable square for player\n square is not opponent_value and\n square is not 5) # 5 is unplayable for the two players", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "async def check(self):\n\n while not self.solved:\n # Get list of possible numbers this square can have\n possibles = self.get_possible_numbers()\n # If there's only once possibility, then use this number...this square is now solved\n if len(possibles) == 1:\n self.num = possibles.pop()\n # If there are no possible squares well...something's wrong, that shouldn't be possible\n # This check is done because we want to be able to guess and check, and figure out if a guess is invalid\n elif len(possibles) == 0:\n raise ValueError(\"Impossible square; no possible numbers based on restrictions\")\n # Otherwise wait a small amount and continue\n else:\n await asyncio.sleep(0.05)", "def check_position_is_legal(grid, num, i, j):\n args = (grid, num, i, j)\n return (not check_row(*args)) and (not check_col(*args)) and (not check_local_square(*args))", "def is_valid_move(self, position, dest_square):\n if self.symbol.isupper() and position.turn != 'w':\n return False\n elif self.symbol.islower() and position.turn != 'b':\n return False\n elif dest_square not in self.calculate_scope(position):\n return False\n else:\n return True" ]
[ "0.68145555", "0.66621006", "0.65014184", "0.6457396", "0.64046955", "0.6342213", "0.6310124", "0.630704", "0.6286575", "0.62758124", "0.62362766", "0.6218367", "0.62178296", "0.61827266", "0.61717474", "0.61584324", "0.61545163", "0.61536086", "0.6134026", "0.6131081", "0.6129493", "0.61212134", "0.6102435", "0.6101317", "0.60984176", "0.60844016", "0.6080843", "0.608079", "0.6066803", "0.6034756" ]
0.710153
0
Method for retrieving game state.
def get_game_state(self): return self.game_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_game_state(self):\r\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._current_state", "def get_game_state(self):\n\n return self._game_state", "def get_game_state(self):\n\n return self._game_state", "def get_game_state(self):\n return self._game_status", "def game_state(self):\n return self._game_state", "def get_new_gamestate(self):", "def get_current_state(self):\n return self.game.get_current_state()", "def getGameState(self):\n return None", "def get_state(self):\n return self.state_map", "def GetState(self):\r\n \r\n return self.state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state" ]
[ "0.8740767", "0.86155", "0.86155", "0.86155", "0.8482095", "0.84146124", "0.84146124", "0.8371279", "0.8281865", "0.82611275", "0.7860468", "0.7752739", "0.7565724", "0.75503594", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294" ]
0.87546504
0
Nethod for playing a game of sudoku. Prints out rules and instructions and asks for user inputs. If current puzzle is solved, asks player if they would like to play again and provides a new puzzle.
def play_sudoku(puzzle): print_instructions() print("For review and grading purposes purposes, here is a sample solution:") puzzle.print_board(puzzle.alg_solution) # while puzzle is not solved, continues to ask user for their next input while puzzle.get_game_state() != "Solved!": puzzle.request_number_input() puzzle.print_board(puzzle.get_game_board()) # if puzzle is solved, asks user if they would like to play again play_again = input("Would you like to play again? Y/N: ") play_again = play_again.lower() if play_again == 'y': puzzle.build_game_board() play_sudoku(puzzle) else: print("Thanks for playing!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get the puzzle and its solution\r\n solution = get_game_solution(WORDS_FILE, grid_size)\r\n puzzle = shuffle_puzzle(solution)\r\n\r\n solved = check_win(puzzle, solution)\r\n print_solution_position(solution, puzzle)\r\n\r\n # Continue to loop until the puzzle is solved or the user gives up\r\n while not solved:\r\n player_action = input(DIRECTION_PROMPT)\r\n\r\n # Player move input handler\r\n # Updates the puzzle with the new board layout, if fail alert user\r\n if player_action in move_actions:\r\n move_attempt = move(puzzle, player_action)\r\n if move_attempt:\r\n puzzle = move_attempt\r\n else:\r\n print(INVALID_MOVE_FORMAT.format(player_action))\r\n\r\n # Other inputs handler\r\n elif player_action in other_actions:\r\n if player_action == GIVE_UP:\r\n break\r\n elif player_action == HELP:\r\n print(HELP_MESSAGE)\r\n\r\n # If there is no match for input, alert the user\r\n else:\r\n print(INVALID_MESSAGE)\r\n\r\n print_solution_position(solution, puzzle)\r\n solved = check_win(puzzle, solution)\r\n\r\n # Show message depending if user won or not\r\n if solved:\r\n print(WIN_MESSAGE)\r\n else:\r\n print(GIVE_UP_MESSAGE)\r\n\r\n # Check if the user wishes to play again\r\n play_again = input(PLAY_AGAIN_PROMPT)\r\n if not (play_again.lower() == \"y\" or play_again == \"\"):\r\n playing = False\r\n print(BYE)", "def play(self):\r\n user = []\r\n while 0 not in self.puzzle:\r\n print()\r\n print(\"Your score is \", self.score)\r\n print(\"1.Get Cell Value\")\r\n print(\"2.Set Cell Value\")\r\n print(\"3.Show solution\")\r\n s = int(input(\"Enter\"))\r\n if s == 1:\r\n row = int(input(\"Enter Row Number(0-8)\"))\r\n col = int(input(\"Enter Columm Number(0-8)\"))\r\n if row in [0,1,2,3,4,5,6,7,8] and col in [0,1,2,3,4,5,6,7,8]:\r\n x = self.get(row,col)\r\n print(\"The value is \",x)\r\n else:\r\n print(\"Invalid number. Try again\")\r\n\r\n if s == 2:\r\n row = int(input(\"Enter Row Number(0-8)\"))\r\n col = int(input(\"Enter Columm Number(0-8)\"))\r\n if row in [0,1,2,3,4,5,6,7,8] and col in [0,1,2,3,4,5,6,7,8]:\r\n if self.puzzle[row][col] == 0 or [row][col] in user:\r\n user.append([row,col])\r\n value = int(input(\"Enter digit\"))\r\n if value in [1,2,3,4,5,6,7,8,9]:\r\n self.set(row,col,value)\r\n self.print(self.puzzle)\r\n else:\r\n print(\"Enter valid number\")\r\n else:\r\n print(\"Invalid Number. Try Again\")\r\n if s == 3:\r\n print(\"Solution is \")\r\n self.print(self.rows)", "def play_game():\n clear()\n print(\" 1 | 2 | 3 \\n --- --- --- \\n\"\n \" 4 | 5 | 6 \\n --- --- --- \\n\"\n \" 7 | 8 | 9 \")\n player = 'Player_one'\n continue_game = True\n while continue_game:\n position = game.ask(player=player)\n if position is False:\n print(\"Please enter a number from 1-9.\")\n position = game.ask(player=player)\n clear()\n update_and_switch = game.update_and_switch(position, player=player)\n if update_and_switch is False:\n position = game.ask(player=player)\n game.update_and_switch(position, player=player)\n else:\n player = game.switch_player(player)\n continue_game = game.evaluate_winner()\n\n restart = input(\"Do you want to play again? (yes or no)\\n\").lower()\n if restart == 'yes':\n game.list = [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"]\n play_game()\n\n else:\n clear()\n print(\"Bye 👋 Hope you had fun!\")", "def demo():\n\n # Initialize board with all cells having possible values 1..9\n board = board_init()\n\n # Unsolved demo puzzle\n # Hard puzzle by Arto Inkala:\n # http://abcnews.go.com/blogs/headlines/2012/06/can-you-solve-the-hardest-ever-sudoku/\n read_puzzle(board, \"8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..\")\n\n # Print unsolved puzzle\n print(\"Initial Sudoku board:\")\n print_board(board)\n\n # Solve the puzzle\n board = solve_puzzle(board)\n\n # Print the solution\n print(\"Solution:\")\n print_board(board)\n\n\n # Write output to file\n write_to_file(board)\n \n return 0", "def solve_soduku(sudoku, screen):\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True", "def solveSudoku(self, board):\n self.back_track(board)\n print(board)", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def solveSudoku(grid):\n\n #if the board is not empty, then check to see if its solved\n #return True if it is\n if not findEmpty(grid):\n if grid.checkBoard():\n return True\n else:\n return False\n #finds the first empty position\n p = findEmpty(grid)\n #considers 1-9 and then places it into the empty spot\n for i in range(1, 10):\n grid.board[p[0]][p[1]] = i\n #if the input is viable, then it goes solves the new given board until its solved\n if grid.checkInput(p[0], p[1]):\n if solveSudoku(grid):\n return True\n #if there are no viable options for that spot, then it backtracks \n grid.board[p[0]][p[1]] = 0\n return False", "def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")", "def print_instructions():\n print(\"Welcome to the game of Sudoku!\")\n print(\"--------------------------------\")\n print(\"The goal of the game is to fill every 'square' here with a number.\")\n print(\"The rules of the game are simple:\")\n print(\" Rule No 1: You can only enter numbers 1-9 in each square.\")\n print(\" Rule No 2: You cannot repeat the use of a number within a row, column or 3x3 segment.\")\n print(\"--------------------------------\")\n print(\"Instructions:\")\n print(\" - You will be prompted to enter a row, a column, and then a number input.\")\n print(\" - The rows and column inputs are 0-indexed, meaning it goes from 0-8.\")\n print(\" - The number input is expected to be 1-9. Any other inputs will not be accepted.\")\n print(\" - Once you've filled out every square, the game will automatically check to see if your solution is valid!\")\n print(\" - If not, it will prompt you to try again, and you can continue to change your inputs or even write\")\n print(\" over your original entries.\")\n print(\"Good luck, have fun!\")", "def main():\n # clear the console screen\n os.system('clear')\n\n # get the names of the players\n player_1 = raw_input('What is the name of player 1? ')\n player_2 = raw_input('What is the name of player 2? ')\n\n # ask for the board size\n try:\n board_size = raw_input('How many rows and columns would you like to play with (3)? ')\n if board_size.strip() == '':\n board_size = 3\n else:\n board_size = int(board_size)\n except Exception as e:\n print \"I don't recognize your board size. Try again.\"\n sys.exit()\n\n # create the board (initialize with '-' instead of X and 0)\n board = create_board(board_size)\n\n # do tic-tac-toe until a winner is found\n outcome = tic_tac_toe(board, player_1, player_2)\n\n # print the outcome\n os.system('clear')\n print_board(board)\n print \"\\n%s wins!\" % (player_1 if outcome == 1 else player_2)\n\n\n # The code below writes the outcome to a file and then determines each \n # player's record. All you need to do is ensure that outcome is a boolean \n # value with True representing a win for player 1 and ensure that player_1 \n # and player_2 are both set.\n\n\n # the name of our game results file\n results_file = 'game_results.txt'\n\n write_result(results_file, outcome, player_1, player_2)\n\n print_records(results_file, player_1, player_2)\n\n\n # wait for the user to press enter to quit\n raw_input('\\nPress enter to quit...')\n\n # clear the console screen\n os.system('clear')", "def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def play_game():\n # let the user select her levle\n level = raw_input(\"\"\"\n Please select a game difficulty by typing it in!\n Possible choices include easy, medium, and hard.\n \"\"\")\n print \"You've chosen %s!\\n\" %(level)\n print \"You will get %s guesses per problem\\n\" %(number_of_guess)\n\n quiz_and_answer = quiz_and_answer_list[level]\n quiz, answer = quiz_and_answer[0], quiz_and_answer[1]\n\n # iterate through the blanks.\n for index, value in enumerate(answer):\n if index != len(answer) - 1:\n print \"The current paragraph reads as such:\\n\"\n print quiz\n guess = raw_input(\"What should be substituted in for __%s__?\" %(index + 1))\n quiz = guess_until_right(index, value, guess, quiz)\n if index == len(answer) - 1:\n print quiz\n print \"You won!\"\n else:\n print \"Correct!\\n\"", "def main():\n grid_size = ''\n pokemons_num = ''\n\n #input grid_size\n while True:\n grid_size = input('Please input the size of the grid: ')\n if grid_size.isdigit() == True and 1 <= int(grid_size) <= 26:\n break\n #input pokemons_num\n while pokemons_num.isdigit() == False:\n pokemons_num = input('Please input the number of pokemons: ')\n grid_size = int(grid_size)\n pokemons_num = int(pokemons_num)\n\n #initalize game\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n #print(pokemon_locations)\n game = UNEXPOSED*(grid_size**2)\n \n display_game(game,grid_size)\n\n #loop until win or lose\n while True:\n print('')\n user_input = input('Please input action: ')\n #no input\n if len(user_input) == 0:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #help\n if user_input == 'h':\n print(HELP_TEXT)\n display_game(game,grid_size)\n continue\n #quit\n if user_input == 'q':\n input_tmp = input('You sure about that buddy? (y/n): ')\n if input_tmp == 'y':\n print('Catch you on the flip side.')\n break\n elif input_tmp == 'n':\n print(\"Let's keep going.\")\n display_game(game,grid_size)\n continue\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n continue\n #restart\n if user_input == ':)':\n game = UNEXPOSED*(grid_size**2)\n pokemon_locations = generate_pokemons(grid_size, pokemons_num)\n print(\"It's rewind time.\")\n display_game(game,grid_size)\n continue\n #flag\n if user_input[0] == 'f':\n user_input = user_input[2:]\n position = parse_position(user_input,grid_size)\n if position != None:\n index_tmp = position_to_index(position,grid_size)\n game = flag_cell(game, index_tmp)\n else:\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n else:\n position = parse_position(user_input,grid_size)\n if position != None:\n #valid action\n index_tmp = position_to_index(position,grid_size)\n #if position flagged\n if game[index_tmp] == FLAG:\n display_game(game,grid_size)\n continue\n #lose\n if position_to_index(position,grid_size) in pokemon_locations:\n for loc in pokemon_locations:\n game = replace_character_at_index(game,loc,POKEMON)\n display_game(game,grid_size)\n print('You have scared away all the pokemons.')\n break\n #next step\n positions_to_show = big_fun_search(game, grid_size, pokemon_locations, position_to_index(position,grid_size))\n game = replace_character_at_index(game, index_tmp, str(number_at_cell(game, pokemon_locations, grid_size, index_tmp)))\n for posi in positions_to_show:\n #if flagged\n if game[posi] == FLAG:\n continue\n game = replace_character_at_index(game, posi, str(number_at_cell(game, pokemon_locations, grid_size, posi)))\n else:#not valid action\n print(\"That ain't a valid action buddy.\")\n display_game(game,grid_size)\n #check win\n if check_win(game, pokemon_locations) == True:\n print('You win.')\n break", "def solve(self) -> None:\n sudoku = Sudoku(self.get_data())\n solver = SudokuSolver(sudoku)\n validation = solver.validate_sudoku()\n if validation == 1:\n solver.main_sequence()\n self.get_result(solver)\n elif validation == -1:\n self.status_bar.config(text='This sudoku array contains invalid digits.', fg='red')\n return None", "def main():\n\n board = [[\".\"] * grid_size for i in range(grid_size)]\n ship_row = random_row(board)\n ship_col = random_col(board) - 1\n ships = 0\n turn = 0\n\n print_board(board)\n while turn < total_turns:\n\n guess_col = get_col()\n guess_row = get_row()\n\n print(\"-\" * 35)\n print(\n f\"You entered: {letter_and_index_conversion(guess_col, grid_size)}{guess_row} \\n\"\n )\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row - 1][guess_col - 1] = \"X\"\n print(\"Congratulations Captain! You got a hit!\")\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n ships += 1\n ship_row = random_row(board)\n ship_col = random_col(board)\n if ships == 10:\n print(\"Congratulations Captain! You won!\")\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n else:\n if (\n board[guess_row - 1][guess_col - 1] == \"X\" or\n board[guess_row - 1][guess_col - 1] == \"*\"\n ):\n print(\"You already guessed this one -_-\")\n print(\"-\" * 35)\n else:\n print(\"Your aim is WAY off! \\n\")\n board[guess_row - 1][guess_col - 1] = \"*\"\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n if turn == total_turns:\n print(\"Game Over! You ran out of turns\")\n print(\"-\" * 35)\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n\n print(f\"Turn {turn + 1} of {total_turns}\")\n print(f\"You have {10 - ships} ships left\")", "def sudoku(puzzle):\n positions = all_pos(puzzle)\n if solve(puzzle, positions, 0):\n return puzzle\n return None", "def main():\n\tcolorama.init()\n\n\n\n\tgrid = get_start_grid(*map(int,sys.argv[1:]))\n\tprint_grid(grid)\n\n\twhile True:\n\t\tgrid_copy = copy.deepcopy(grid)\n\t\tget_input = getch(\"Enter direction (w/a/s/d/n/r/q): \")\n\t\tif get_input in functions:\t\n\t\t\tfunctions[get_input](grid)\n\t\telif get_input == \"n\":\n\t\t\tif get_next_action(grid) == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\tfunctions[get_next_action(grid)](grid)\n\t\telif get_input == \"r\":\n\t\t\tbreak\n\t\telif get_input == \"q\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\nInvalid choice.\")\n\t\t\tcontinue\n\t\tif grid != grid_copy:\n\t\t\tif not prepare_next_turn(grid):\n\t\t\t\tprint_grid(grid)\n\t\t\t\tprint(\"Well played!\")\n\t\t\t\tbreak\n\t\tprint_grid(grid)\n\t\n\tif get_input == \"r\":\n\t\twhile True:\n\t\t\tgrid_copy = copy.deepcopy(grid)\n\n\t\t\tnext_action = get_next_action(grid)\n\t\t\tif next_action == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfunctions[next_action](grid)\n\t\t\tif grid != grid_copy:\n\t\t\t\tif not prepare_next_turn(grid):\n\t\t\t\t\tprint_grid(grid)\n\t\t\t\t\tprint(\"Well played!\")\n\t\t\t\t\tbreak\n\t\t\tprint_grid(grid)\n\n\tprint(\"Thanks for playing.\")", "def checkPuzzle(self):\n print('Got to checkPuzzle')", "def phase_8(self):\n\n def problem_1():\n test_board_1 = board(5, 5, snake_init_coordinates = [4, 2], fruit_init_coordinates = [0, 2])\n render = Render_engine('terminal', test_board_1)\n\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move up\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"up\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nafter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_2():\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [3, 2])\n test_board_1.Snake_init_from_lst([[3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n def problem_3():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3,4], [3, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n \n def problem_4():\n try:\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [1, 2])\n test_board_1.Snake_init_from_lst([[3, 3], [3, 2], [3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n print(\"Before move\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nAfter move right\")\n print(\"*******************************\")\n test_board_1.Snake_move(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n except GameBoardIndexError as error:\n print(\"Snake crash because\", str(error))\n\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n\\n\")\n\n problem_1()\n problem_2()\n problem_3()\n problem_4()", "def main():\n name = 'sudoku'\n input_puzzle_file = name + '.txt'\n if len(sys.argv) == 2:\n input_puzzle_file = sys.argv[1]\n name = Path(input_puzzle_file).stem\n assert len(name) > 0\n output_domains_file = name + \"_dom.txt\"\n output_constraints_file = name + \"_cst.txt\"\n\n print('Processing puzzles from file', input_puzzle_file)\n puzzles = read_puzzles(input_puzzle_file)\n print('Read in', len(puzzles), 'Sudoku puzzle instances.')\n\n print('Generating and writing domains to file', output_domains_file)\n domains = generate_domains(puzzles)\n write_puzzles_domains(name + \"_dom.txt\", domains)\n\n print('Generating and writing constraints to file', output_constraints_file)\n constraints = generate_constraints()\n write_puzzle_constraints(output_constraints_file, constraints)", "def solveSudoku(self, board):\n\n digits = { str(i) for i in range(1, 10) }\n rows = [ digits.copy() for _ in range(9) ]\n cols = [ digits.copy() for _ in range(9) ]\n boxs = [ [ digits.copy() for _ in range(3) ] for _ in range(3) ]\n unoccupied = set()\n\n def __recursiveSolver():\n if not unoccupied:\n return\n\n choices = digits.copy()\n for row, col in unoccupied:\n possible_moves = rows[row] & cols[col] & boxs[row // 3][col // 3]\n if len(possible_moves) < len(choices):\n action_pos = (row, col)\n choices = possible_moves\n if len(choices) == 1:\n break\n\n for choice in choices:\n (row, col) = action_pos\n\n unoccupied.remove(action_pos)\n board[row][col] = choice\n rows[row].remove(choice)\n cols[col].remove(choice)\n boxs[row // 3][col // 3].remove(choice)\n\n __recursiveSolver()\n if not unoccupied: return\n\n unoccupied.add(action_pos)\n board[row][col] = '.'\n rows[row].add(choice)\n cols[col].add(choice)\n boxs[row // 3][col // 3].add(choice)\n\n for row in range(9):\n for col in range(9):\n ch = board[row][col]\n if ch == '.':\n unoccupied.add((row, col))\n else:\n rows[row].remove(ch)\n cols[col].remove(ch)\n boxs[row // 3][col // 3].remove(ch)\n\n __recursiveSolver()", "def main():\n\tGame = TicTacToe()\n\tprint(\"Welcome to Tic-Tac-Toe\")\n\twhile True:\n\t\tprint(\"Player%d, take your move.\" % Game.turn)\n\t\trow = int(input(\"Enter row of move... \"))\n\t\tcol = int(input(\"Enter col of move... \"))\n\t\tGame.move(Game.turn, row, col)\n\t\tGame.printBoard()\n\t\tif Game.win:\n\t\t\trestart = int(input(\"Enter 1 to restart the game, 0 to end game... \"))\n\t\t\tif restart == 1:\n\t\t\t\tGame.restartGame()\n\t\t\telse:\n\t\t\t\tprint(\"Closing Tic-Tac-Toe Game...\")\n\t\t\t\treturn", "def run(self):\n self.initialise()\n self.setup_disks()\n self.solve_puzzle()\n input('Finished. Press ENTER to exit.')", "def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def start_game() -> None:\n rows = get_int()\n cols = get_int()\n state = game.GameState(rows, cols)\n\n line = next_line()\n if line == 'CONTENTS':\n rowList = []\n for i in range(rows):\n row = []\n line = raw_next_line()\n for index in range(cols):\n row.append(line[index])\n rowList.append(row)\n state.set_board_contents(rowList)\n\n while True:\n _display_board(state)\n line = next_line()\n if line == 'Q':\n return\n if line == '':\n if state.tick():\n _display_board(state)\n break\n else:\n _process_command(line, state)\n print('GAME OVER')", "def play_game(self):\r\n try: # Asks user how many rounds they want to play:\r\n game_rounds = int(input(\r\n \"Please enter the desired number of rounds to play: \"\r\n ))\r\n except ValueError: # Ensures input value is correct\r\n print(\"Sorry, I didn't quite catch that.\\nPlease try again,\"\r\n \" and make sure you enter a valid number.\\n\")\r\n return self.play_game()\r\n # Game Starts:\r\n print(\"\\nGame start!\\n\")\r\n for round in range(game_rounds):\r\n print(f\"ROUND {round}:\")\r\n self.play_round()\r\n self.game_over() # Game concludes naturally.\r", "def solveSudoku(self, board: List[List[str]]) -> None:\n self.backtrack(board, 0, 0)" ]
[ "0.7092953", "0.689259", "0.68871856", "0.6726647", "0.6684823", "0.6676533", "0.6589806", "0.6555856", "0.6482301", "0.63967913", "0.6358735", "0.6353301", "0.6346815", "0.63003695", "0.6269608", "0.6250014", "0.6243339", "0.62017316", "0.61864555", "0.6185452", "0.61633503", "0.61602", "0.61572176", "0.6155443", "0.6152154", "0.61468226", "0.6145169", "0.61349106", "0.6130442", "0.612408" ]
0.82174706
0
Prints to console a set of instructions for how to play a game of Sudoku.
def print_instructions(): print("Welcome to the game of Sudoku!") print("--------------------------------") print("The goal of the game is to fill every 'square' here with a number.") print("The rules of the game are simple:") print(" Rule No 1: You can only enter numbers 1-9 in each square.") print(" Rule No 2: You cannot repeat the use of a number within a row, column or 3x3 segment.") print("--------------------------------") print("Instructions:") print(" - You will be prompted to enter a row, a column, and then a number input.") print(" - The rows and column inputs are 0-indexed, meaning it goes from 0-8.") print(" - The number input is expected to be 1-9. Any other inputs will not be accepted.") print(" - Once you've filled out every square, the game will automatically check to see if your solution is valid!") print(" - If not, it will prompt you to try again, and you can continue to change your inputs or even write") print(" over your original entries.") print("Good luck, have fun!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def instructions():\n\t\n\tprint \\\n\t\"\"\"\n\tToday we will play the perennial favorite game of...\n\tRock! Paper!! Scissors!!!.\n\tThe objective of the game is to outthink your opponent (in this case me) and defeat.\n\tThe rules are very simple\n\t1. Paper covers the Rock\n\t2. Rock breaks the Scissors\n\t3. Scissors cut the Paper\n\t\n\tChoose your move from the following:\n\t1. Paper (p)\n\t2. Rock (r)\n\t3. Scissors (s)\n\t\n\tAre you ready? Alright then, let's play...\n\t\"\"\"", "def printInstructions(self):\n print(\"\"\"•\tAim of the Game is to be the first to lose all of your chips\n•\tPlayers are put in order of the lowest to \nhighest based on their first roll\n(This is done automatically when you enter your name)\n• You start out with 5 chips.\n• When it is your turn you roll the die.\n\\t•\tIf the space with the same number as the die is empty (value of 0),\n\\t\\tput a chip there.\n\\t•\tbut if there already is a chip there (value of 1), you must take it.\n\\t•\tIf you roll a 6, you always put one of your chips on the space number 6 – \n\\t\\tregardless of how many chips are there already. \n\\t\\tChips on space number 6 are out of the game,\n\\t\\tand you never pick these up again.\n\"\"\")", "def intro_instructions():\n print(\"The board will be updated after each move.\")\n print(\"Watch both the board and the python prompt after each move.\")\n print(\"Player 1 is white and player 2 is orange\")\n print(\"Green boxes are snakes and yellow boxes are ladders.\")\n print(\"If you hit any part of the snake(not just the head), you will slide down to the snakes tail\")\n print(\"If you hit any part of the ladder(not just the bottom), you will climb to the ladder's top\")\n print(\"May the luckiest player win\")", "def instruction():\n print('- - - - - - - - - - - - - - - - - - - - -')\n print(\"this is instruction for tic tac toe game\".upper())\n print('- - - - - - - - - - - - - - - - - - - - -')\n print('This is game for two players')\n print('Each player can choose a number between 1 and 9')\n print('Numbers represent the fields on the board')\n print('You can choose only numbers that are not taken by any player')\n list_of_symbols = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n print_board(list_of_symbols)\n print('You win the game if you have 3 symbols in column, row or diagonally')\n print('- - - - - - - - - - - - - - - - - - - - -')\n\n begin_game()", "def print_grid(puzzle: str) -> None:\r\n grid = generate_grid(puzzle)\r\n print(grid)", "def print_sudoku(sudoku, name='SUDOKU'):\n\n print \"### {} ###\".format(name)\n for row in sudoku:\n print row", "def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')", "def start():\n display_board()\n print(\"\\n\")\n y_n_prompt()", "def print_sudoku_solution(solution):\n for row in range(9):\n for col in range(9):\n print solution['%d-%d' % (row, col)][0],\n if col == 2 or col == 5:\n print '|',\n print\n if row == 2 or row == 5:\n print '------+-------+------'", "def show_possible_moves():\n print(\"Possible moves:\")\n print(\"\\t\\\\sw - Moves a card from Stock to Waste.\")\n print(\"\\t\\\\wf <suit> - Moves a card from Waste to the <suit> Foundation. Suit must be one of: \"\n \"clubs/diamonds/hearts/spades.\")\n print(\"\\t\\\\wt <tableau_num> - Moves a card from Waste to the <tableau_num> Tableau. <tableau_num> must be \"\n \"between 1 and 7, inclusive. \")\n print(\"\\t\\\\tf <tableau_num> <suit> - Moves a card from the <tableau_num> Tableau to the <suit> foundation. \"\n \"Same input rules as above. \")\n print(\"\\t\\\\tt <num_1> <num_2> - Moves all face-up cards from <num_1> Tableau to <num_2> Tableau. Same input \"\n \"rules as above. \")\n print(\"\\t\\\\help - Displays all possible moves. \")\n print(\"\\t\\\\quit - Quit the game.\\n\")", "def print_board(self):\n\n print\n\n for row in xrange(8):\n for column in xrange(8):\n if self.squares[row][column]:\n print self.squares[row][column],; sys.stdout.write(u'')\n else:\n if self.dark_square((row, column)):\n print u' __ ',; sys.stdout.write(u'')\n else:\n print u' . ',; sys.stdout.write(u'')\n print\n print", "def display_board():\n print(\"\\n\")\n print(\"-------------------------------------\")\n print(\"| \" + board[0] + \" | \" + board[1] +\n \" | \" + board[2] + \" 1 | 2 | 3 |\")\n print(\"| \" + board[3] + \" | \" + board[4] +\n \" | \" + board[5] + \" TicTacToe 4 | 5 | 6 |\")\n print(\"| \" + board[6] + \" | \" + board[7] +\n \" | \" + board[8] + \" 7 | 8 | 9 |\")\n print(\"-------------------------------------\")\n print(\"\\n\")", "def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")", "def print_board(board):\n\n colors = {\n '*': None,\n '2': 'red',\n '4': 'green',\n '8': 'yellow',\n '16': 'blue',\n '32': 'magenta',\n '64': 'cyan',\n '128': 'grey',\n '256': 'white',\n '512': 'green',\n '1024': 'red',\n '2048': 'blue',\n '4096': 'magenta'\n };\n header = \"Use the arrows keys to play 2048! Press q to quit\";\n print(header);\n N = len(board);\n vertical_edge = \"\";\n for i in range(N + 2):\n vertical_edge += \"-\\t\";\n print(vertical_edge);\n for y in range(N):\n row = \"\";\n for x in board[y]:\n\n # Handling installation fail (no colors printed)\n if termcolor is not None:\n row += termcolor.colored(x, colors[x]);\n else:\n row += x\n\n row += \"\\t\";\n print(\"|\\t\" + row + \"|\");\n if y is not N - 1: print(\"\")\n print(vertical_edge);\n\n if GUI_runnable:\n gui.update_grid(board)\n gui.update()", "def display_board():\n print(board[0], '|', board[1], '|', board[2])\n print(board[3], '|', board[4], '|', board[5])\n print(board[6], '|', board[7], '|', board[8])", "def show_board(player_name='player',win=False):\r\n print('\\n'*10)\r\n triple_hash();\r\n print(f' {loc[0]} # {loc[1]} # {loc[2]}')\r\n triple_hash()\r\n print(' #################################################################')\r\n triple_hash()\r\n print(f' {loc[3]} # {loc[4]} # {loc[5]}')\r\n triple_hash()\r\n print(' #################################################################')\r\n triple_hash()\r\n print(f' {loc[6]} # {loc[7]} # {loc[8]}')\r\n triple_hash()\r\n\r\n if win:\r\n print(f'\\n\\ncongratulations, {player_name}, you have won!')", "def display(sudoku_map):\n width = 1+max(len(sudoku_map[s]) for s in squares)\n line = '+'.join(['-'*width*3]*3)\n for r in rows:\n print(''.join(sudoku_map[r+c].center(width) + ('|' if c in '36' else '') for c in cols))\n \n if r in 'CF':\n print(line)\n print()", "def print_board(self):\n print(\" 1 2 3 4 5 6 7\")\n for row in range(self.playable_row_range[0], self.playable_row_range[1]):\n for col in range(self.playable_column_range[0], self.playable_column_range[1]):\n print(\"[{piece}]\".format(piece=self.board[row][col]), end=\" \")\n print('\\n', end=\"\")\n print(\"\\n\")", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def show_board(self):\n for i in range(self.num_rows):\n print(' ----'*8)\n s = \"\"\n for j in range(self.num_cols):\n s += '| {} '.format(self._show_piece(i, j))\n print(\"{}|\".format(s))\n print(' ----'*8)", "def start_with_console():\n print_welcome()\n option = input(\"Choose a number [1/2/3]: \")\n cexc.check_start_exceptions(option)\n if option == \"1\":\n picture = create_white_picture_with_inputs()\n elif option == \"2\":\n picture = load_picture_with_inputs()\n elif option == \"3\":\n picture = create_probability_picture_with_inputs()\n steps = get_steps(input(\"Give a number of steps to do (max=30000): \"))\n print_big_number_announcement(steps)\n Simulator(steps, picture).simulate()", "def printPuzzle(self):\n for i in range(9):\n print(self.puzzle[0][i], end=\" \")\n for n in range(1, 9):\n print()\n for m in range(9):\n print(self.puzzle[n][m], end=\" \")\n print(\"\\n\")", "def show(self):\n for y in range(3):\n if y > 0:\n print(\"--+---+--\")\n for x in range(3):\n if x > 0:\n print('|',)\n\n # Print a space for empty (0), an O for player 1, or an X for player 2\n print(\" OX\"[self.get_square(x, y)],)\n print", "def textuel_auto():\r\n print()\r\n grids = FileManager.read_sudoku(args.file)\r\n for grid in grids:\r\n print(\"Calcul...\")\r\n print(solver.solve(grid))\r\n print(\"Terminé !\")", "def display(self):\n\n #player UI\n s = \" \"\n for p in range(WIDTH):\n s += str(p)\n s += \" \"\n\n print(s)\n\n for row in range(HEIGHT):\n\n # player UI\n print(row, end=' ')\n\n for col in range(WIDTH):\n\n if self.board[row][col] == 1:\n print(\"X\", end=' ')\n elif self.board[row][col] == 2:\n print(\"O\", end=' ')\n else:\n print(\"-\", end=' ')\n print()", "def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")", "def demo():\n\n # Initialize board with all cells having possible values 1..9\n board = board_init()\n\n # Unsolved demo puzzle\n # Hard puzzle by Arto Inkala:\n # http://abcnews.go.com/blogs/headlines/2012/06/can-you-solve-the-hardest-ever-sudoku/\n read_puzzle(board, \"8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..\")\n\n # Print unsolved puzzle\n print(\"Initial Sudoku board:\")\n print_board(board)\n\n # Solve the puzzle\n board = solve_puzzle(board)\n\n # Print the solution\n print(\"Solution:\")\n print_board(board)\n\n\n # Write output to file\n write_to_file(board)\n \n return 0", "def text_output(self):\n print(self.board)\n print()" ]
[ "0.75848573", "0.71088547", "0.703432", "0.6988165", "0.69866717", "0.6634064", "0.6410708", "0.6397539", "0.6355258", "0.63541543", "0.6338774", "0.63357323", "0.6296808", "0.6284995", "0.6267385", "0.62419635", "0.6191577", "0.6185726", "0.6172115", "0.61716187", "0.6133821", "0.61204976", "0.61146796", "0.6113212", "0.60889184", "0.60711753", "0.606412", "0.6052195", "0.6051476", "0.6046981" ]
0.74090517
1
Creates four plotly visualizations using the New York Times Archive API
def return_figures(): # Add New York Times API Key nyt = NYTAPI("AsjeHhqDYrePA2GMPpYoY1KAKAdG7P99") # Select Year and Month of articles data = nyt.archive_metadata( date = datetime.datetime(2020, 7, 1) ) def data_to_df(data): # Initiate list for restructured information data_list = [] # Collect Data from API dictionary for article in data: new_data = [article.get("section_name"), article.get("news_desk"), article.get("pub_date"), article.get("headline").get("main"), article.get("abstract"), article.get("lead_paragraph"), article.get("type_of_material"), article.get("word_count")] # Append list of information from article to data list data_list.append(new_data) # Convert data list to DataFrame df = pd.DataFrame(data_list, columns=["section_name","news_desk", "pub_date", "headline", "abstract", "lead_paragraph", "type_of_material", "word_count"]) return df df = data_to_df(data) # first chart plots section distribution # as a pie chart graph_one = [] df_one = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_one.section_name.value_counts().index values = df_one.section_name.value_counts().values graph_one.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_one = dict(title = 'Distribution of sections of this months New York Times articles') # second chart plots section distribution # as a pie chart graph_two = [] df_two = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_two.news_desk.value_counts().index values = df_two.news_desk.value_counts().values graph_two.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_two = dict(title = 'Distribution of news desk of this months articles') # third chart plots section distribution # as a pie chart graph_three = [] df_three = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_three.type_of_material.value_counts().index values = df_three.type_of_material.value_counts().values graph_three.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_three = dict(title = 'Distribution for type of material of this months articles') # fourth chart plots section distribution # as a pie chart graph_four = [] # Convert publishing date columns to datetime format df["pub_date"] = pd.to_datetime(df["pub_date"]).dt.date df_four = df.copy() df_four = df_four.pub_date.value_counts().to_frame().sort_index() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values x_val = df_four.index y_val = df_four.values graph_four.append( go.Scatter( x=df_four.index, y=df_four["pub_date"], mode="lines", name="Articles" ) ) layout_four = dict(title = 'Number of articles published by days') # fourth chart plots section distribution # as a pie chart graph_five = [] # Calculate average number of words for this months articles avg_word_count = round(df.word_count.mean(),0) graph_five.append( go.Table( header=dict(values=['Average Word Count']), cells=dict(values=[avg_word_count]) ) ) layout_five = dict(title = '') # append all charts figures = [] figures.append(dict(data=graph_one, layout=layout_one)) figures.append(dict(data=graph_two, layout=layout_two)) figures.append(dict(data=graph_three, layout=layout_three)) figures.append(dict(data=graph_four, layout=layout_four)) figures.append(dict(data=graph_five, layout=layout_five)) return figures
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_figures():\n\n graph_one = []\n df = cleanparrisdf('data/Salem-Village-Data-Set.csv')\n sources = [0,0,0,1,1,1]\n targets = [2,3,4,2,3,4]\n values = df[\"petition_count\"].tolist()\n\n data_one = dict(\n type = 'sankey',\n node = dict(\n pad = 10,\n thickness = 30,\n line = dict(\n color = \"black\",\n width = 0.5\n ),\n label = [\"Church Member\", \"Non-Church Member\", \"Anti-Parris Signatory\", \"Non-Signatory\", \"Pro-Parris Signatory\"],\n color = [\"red\", \"blue\", \"black\", \"grey\", \"white\"]\n ),\n link = dict(\n source = sources,\n target = targets,\n value = values\n ))\n\n layout_one = dict(\n title = 'Salem Residents\\' Stance on Minister Samuel Parris in 1695'\n )\n\n# second chart plots ararble land for 2015 as a bar chart\n graph_two = []\n df = cleantimelinedf('data/Accused-Witches-Data-Set.csv')\n x_val = df[\"month\"].tolist()\n y_val1 = df[\"accusation_count\"].tolist()\n y_val2 = df[\"execution_count\"].tolist()\n\n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val1,\n mode = 'lines+markers',\n name = \"People Accused of Witchcraft\"\n )\n )\n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val2,\n mode = 'lines+markers',\n name = \"People Executed for Witchcraft\"\n )\n )\n\n labels = [\"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\"]\n\n layout_two = dict(title = 'Salem Witch Trial Victim Count Over Time',\n xaxis = dict(title = 'Month (1692)', tickvals=[k+2 for k in range(len(labels))], ticktext=labels, tickangle=315),\n yaxis = dict(title = 'Number of People'),\n )\n\n\n# third chart plots percent of population that is rural from 1990 to 2015\n graph_three = []\n df = cleanplacesdf('data/Accused-Witches-Data-Set.csv')\n graph_three.append(\n go.Scattergeo(\n lon = df['long'],\n lat = df['lat'],\n text = df['text'],\n marker = dict(\n size = df['places_count'],\n sizeref = 2. * max(df['places_count'])/100,\n color = 'red',\n line = dict(width = 0 )\n )\n )\n )\n\n layout_three = dict(\n title = 'Towns Affected (Bubbles Proportional to Number Accused)',\n geo = dict(\n showframe = False,\n projection=dict( type='orthographic' ),\n showland = True,\n oceancolor = 'rgb(204, 255, 255)',\n showocean= True,\n landcolor = 'rgb(229, 255, 204)',\n lonaxis = dict( range= [-71.7 , -70.3] ),\n lataxis = dict( range= [42.3, 43.5] )\n )\n )\n\n figures = []\n figures.append(dict(data=[data_one], layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n\n return figures", "def _dump_plotly(objs, images, func):\n l = len(objs)\n #print(l)\n titles = []\n for i,x in enumerate(objs):\n if 'id' in x:\n titles.append('shape id %d' % x.id)\n else:\n titles.append('item %d' % i)\n fig = tools.make_subplots(rows=l, cols=1, subplot_titles = titles,print_grid=False )\n #print('figure attmpt: ')\n #fig['layout']['xaxis1'].update(title='monkeybar')\n #for x in fig['layout']['xaxis1']:\n #print(x)\n fig.layout.showlegend = False\n for i,x in enumerate(objs):\n traces,annotations,title = func(x,images[i])\n im = {\n \"source\": 'data:image/png;base64, ' + getbase64(images[i]),\n \"x\": 1,\n \"y\": 1 - i/(l-.5),\n \"sizex\": .5,\n \"sizey\": .5,\n }\n fig.layout.images.append(im)\n for t in traces:\n fig.append_trace(t,i+1,1)\n if title is not None:\n fig.layout['xaxis%d' % (i+1)].update(title=title)\n if annotations is not None:\n for a in annotations:\n a['xref'] = 'x%d' % (i+1)\n a['yref'] = 'y%d' % (i+1)\n fig.layout.annotations += annotations\n\n fig['layout'].update(height=400*l, width=1100, margin={\n 'l':80,\n 'r':330,\n 't':100,\n 'b':80,\n 'pad':0,\n 'autoexpand':True,\n },title='plots')\n\n return fig", "def make_timeplot(df_measure, df_prediction):\n # mode = 'confirmed'\n mode = 'active'\n df_measure_confirmed = df_measure[mode]\n colors = px.colors.qualitative.Dark24\n n_colors = len(colors)\n fig = go.Figure()\n for i, country in enumerate(df_measure_confirmed.columns):\n fig.add_trace(go.Scatter(x=df_measure_confirmed.index, \n y=df_measure_confirmed[country],\n name=country[1], mode='markers+lines',\n marker_color=colors[i%n_colors],\n line_color=colors[i%n_colors],\n visible=False))\n for i, country in enumerate(df_prediction.columns):\n fig.add_trace(go.Scatter(x=df_prediction.index, \n y=df_prediction[country],\n name='+' + country[1], mode='lines',\n line_dash='dash',\n line_color=colors[i%n_colors],\n showlegend=False,\n visible=False))\n\n last_day = df_measure_confirmed.index.max()\n day = pd.DateOffset(days=1)\n fig.update_layout(title='',\n xaxis=dict(rangeslider_visible=True,\n range=(last_day - 10 * day,\n last_day + 4 * day)))\n fig.update_layout(\n updatemenus=[\n dict(\n type = \"buttons\",\n direction = \"left\",\n buttons=list([\n dict(\n args=[{\"visible\": [False,]*len(df_measure_confirmed.columns)}],\n label=\"Reset\",\n method=\"update\",\n ),\n dict(\n args=[\"yaxis\", {'type':'log'}],\n label=\"log\",\n method=\"relayout\",\n ),\n dict(\n args=[\"yaxis\", {'type':'linear'}],\n label=\"lin\",\n method=\"relayout\",\n ),\n\n ]),\n pad={\"r\": 10, \"t\": 10, \"b\":5},\n showactive=True,\n x=0.05,\n xanchor=\"left\",\n y=1.35,\n yanchor=\"top\",\n font_color='black',\n ),\n ],\n height=.9*FIRST_LINE_HEIGHT,\n)\n\n return fig", "def return_figures():\n\n # first chart plots arable land from 1990 to 2015 in top 10 economies \n # as a line chart\n graph_one = [] \n df_melt = clean_data('data/b055f1ad-17cc-43fd-bc5e-8a9572a0e573_Data.csv')\n df_melt.columns = ['country', 'year', 'population']\n df_melt.sort_values('population', ascending=False, inplace=True)\n top10 = df_melt.country.unique().tolist()\n \n for country in top10:\n x_val = df_melt[df_melt['country']==country].year.tolist()\n y_val = df_melt[df_melt['country']==country].population.tolist() \n \n \n graph_one.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines',\n name = country\n )\n )\n\n layout_one = dict(title = 'Most Populous countries growth(2000-2015)',\n xaxis = dict(title = 'Year'),\n yaxis = dict(title = 'Population'),\n )\n \n# second chart plots ararble land for 2015 as a bar chart \n \n graph_two = []\n \n df_2 = clean_data(\"data/co2emissions.csv\")\n df_2.columns = ['country', 'years','CO2']\n df_2.sort_values('CO2', ascending=False, inplace=True)\n for country in top10:\n x_val = df_2[df_2['country']==country].years.tolist()\n y_val = df_2[df_2['country']==country].CO2.tolist() \n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines+markers',\n name = country\n )\n )\n\n layout_two = dict(title = 'CO2 emissions in most populous countries',\n xaxis = dict(title = 'Year'),\n yaxis = dict(title = 'CO2 emissions'),\n )\n\n\n# third chart plots percent of population that is rural from 1990 to 2015\n graph_three = []\n df_3 = clean_data('data/GDP.csv')\n df_3.columns = ['country','year','GDP']\n df_3.sort_values('GDP', ascending=False, inplace=True)\n df_3=df_3[df_3['year'] ==2014]\n graph_three.append(\n go.Bar(\n x = df_3.country.tolist(),\n y = df_3.GDP.tolist(),\n )\n )\n\n layout_three = dict(title = 'GDP in USD',\n xaxis = dict(title = 'Country'),\n yaxis = dict(title = 'GDP(USD)')\n )\n \n# fourth chart shows rural population vs arable land\n graph_four = []\n df_4 = clean_data('data/TotalArea.csv')\n df_4.columns = ['country','year', 'area']\n df_4.sort_values('area', ascending=False, inplace=True)\n df_4=df_4[df_4['year']==2014]\n graph_four.append(\n go.Bar(\n x = df_4.country.tolist(),\n y = df_4.area.tolist(),\n )\n )\n\n layout_four = dict(title = 'Total Area (Sq. Km)',\n xaxis = dict(title = 'Country'),\n yaxis = dict(title = 'Total Area'),\n )\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n\n return figures", "def return_figures():\n\n # first chart plots arable land from 1990 to 2015 in top 10 economies \n # as a line chart\n \n graph_one = [] \n for country in countries_considered:\n graph_one.append(\n go.Scatter(\n x = [2015,2016,2017,2018,2019],\n y = dict_of_df['Happiness Score'].loc[country, ['2015', '2016','2017','2018','2019']].values,\n mode = 'lines',\n name = country\n )\n )\n\n layout_one = dict(title = 'Happiness Score For The Top 9 Countries From 2015 to 2019',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Countries'),\n )\n\n# second chart plots ararble land for 2015 as a bar chart \n graph_two = []\n \n # Figure 1 - horizontal bars displaying stacked scores from all criteria per top countries - 2019\n countries_sortedby_stacked_score = dict_of_df['stacked_score']['2019'].sort_values().index[125:]\n \n colors_bars = ['cornflowerblue', 'brown', 'gold', 'mediumseagreen', 'darkorange', 'turquoise',\n 'ivory']\n \n for index, crit in enumerate(criteria):\n graph_two.append(\n go.Bar(\n y = dict_of_df[crit]['2019'].loc[countries_sortedby_stacked_score].index,\n x = dict_of_df[crit]['2019'].loc[countries_sortedby_stacked_score].values, \n orientation = 'h',\n name = crit,\n text = [\"RANK : \" + str(dict_rank_countries[country][index]) + \" / \" + str(len(dict_of_df['stacked_score']['2019'])) for country in countries_sortedby_stacked_score],\n marker=dict(\n color=colors_bars[index])\n )\n )\n\n layout_two = dict(title = 'Stacked Scores For Top Countries in Happiness - 2019',\n xaxis = dict(title = 'Stacked Scores'),\n yaxis = dict(tickangle=-30),\n barmode='stack',\n width=800,\n height=400\n )\n\n\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n\n return figures", "def create_all_charts(df: pd.DataFrame, s3_resource_bucket):\n\n fig, ax = plt.subplots(4, 1, figsize=(10, 20))\n\n days_back = 30\n ax[0].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[0].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[0].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[0].legend(['MA_30day', 'MA_10day'])\n ax[0].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[0].set_title(f'{text_summary}')\n ax[0].title.set_size(16)\n\n days_back = 90\n ax[1].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[1].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[1].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[1].legend(['MA_30day', 'MA_10day'])\n ax[1].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[1].set_title(f'{text_summary}')\n ax[1].title.set_size(16)\n\n days_back = 365\n ax[2].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[2].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[2].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[2].legend(['MA_30day', 'MA_10day'])\n ax[2].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[2].set_title(f'{text_summary}')\n ax[2].title.set_size(16)\n\n days_back = 3650\n ax[3].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[3].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[3].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[3].legend(['MA_30day', 'MA_10day'])\n ax[3].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[3].set_title(f'{text_summary}')\n ax[3].title.set_size(16)\n\n fig.tight_layout(pad=3.0)\n\n fig.savefig('all_charts.png')\n\n s3_resource_bucket.upload_file('all_charts.png', 'all_charts.png',\n ExtraArgs={'ContentType': 'image/png'})\n # remove local file\n os.remove('all_charts.png')", "def draw_observation(data, date_obj, map_region):\n\n # set mapbox token\n px.set_mapbox_access_token(CONFIG.CONFIG['MAPBOX']['token'])\n\n # create figures\n map_center = {'lat':(map_region[2] + map_region[3]) * 0.5,\n 'lon':(map_region[0] + map_region[1]) * 0.5}\n figs = collections.OrderedDict()\n\n # draw precipitation\n bins = [0.1, 10, 25, 50, 100, 250, 1200]\n keys = ['0.1~10', '10~25', '25~50', '50~100', '100~250', '>=250']\n cols = ['lightgreen', 'yellow', 'lightskyblue', 'blue', 'magenta','maroon']\n cols_map = dict(zip(keys, cols))\n data['rain'] = pd.cut(data['PRE_Time_0808'], bins=bins, labels=keys)\n data['Rainfall'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['PRE_Time_0808'].astype(str)\n data['rain_size'] = data['PRE_Time_0808'] + data['PRE_Time_0808'].mean()\n df = data[data['rain'].notna()]\n if df.shape[0] >= 2:\n figs['Rainfall'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"rain\", category_orders={'rain': keys}, color_discrete_map = cols_map,\n hover_data={'Rainfall':True, 'Lon':False, 'Lat':False, 'rain':False, 'rain_size':False},\n mapbox_style='satellite-streets', size=\"rain_size\", center=map_center, size_max=10, zoom=4,\n title = 'Accumulated precipitation ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw maximum temperature\n bins = [35, 37, 40, 60]\n keys = ['35~37', '37~40', '>=40']\n cols = ['rgb(255,191,187)', 'rgb(250,89,0)', 'rgb(230,0,8)']\n cols_map = dict(zip(keys, cols))\n data['max_temp_warning'] = pd.cut(data['TEM_Max'], bins=bins, labels=keys)\n data['max_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Max'].astype(str)\n df = data[data['max_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Max_temperature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_temp_warning\", category_orders={'max_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_temp':True, 'Lon':False, 'Lat':False, 'max_temp_warning':False, 'TEM_Max':False},\n mapbox_style='satellite-streets', size=\"TEM_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw minimum temperature\n bins = [-120, -40, -30, -20, -10, 0]\n keys = ['<=-40','-40~-30', '-30~-20', '-20~-10', '-10~0']\n cols = ['rgb(178,1,223)', 'rgb(8,7,249)', 'rgb(5,71,162)', 'rgb(5,109,250)', 'rgb(111,176,248)']\n cols_map = dict(zip(keys, cols))\n data['min_temp_warning'] = pd.cut(data['TEM_Min'], bins=bins, labels=keys)\n data['min_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Min'].astype(str)\n df = data[data['min_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Min_temprature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_temp_warning\", category_orders={'min_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_temp':True, 'Lon':False, 'Lat':False, 'min_temp_warning':False, 'TEM_Min':False},\n mapbox_style='satellite-streets', size=-1.0*df[\"TEM_Min\"], center=map_center, size_max=10, zoom=4,\n title = 'Minimum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw low visibility\n data['VIS_Min'] /= 1000.0\n bins = [0, 0.05, 0.2, 0.5, 1]\n keys = ['<=0.05','0.05~0.2', '0.2~0.5', '0.5~1']\n cols = ['rgb(0,82,77)', 'rgb(0,153,160)', 'rgb(0,210,204)', 'rgb(95,255,252)']\n cols_map = dict(zip(keys, cols))\n data['min_vis_warning'] = pd.cut(data['VIS_Min'], bins=bins, labels=keys)\n data['VIS_Min_size'] = 2.0-data[\"VIS_Min\"]\n data['min_vis'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['VIS_Min'].astype(str)\n df = data[data['min_vis_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Low_visibility'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_vis_warning\", category_orders={'min_vis_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_vis':True, 'Lon':False, 'Lat':False, 'min_vis_warning':False, 'VIS_Min_size':False},\n mapbox_style='satellite-streets', size=\"VIS_Min_size\", center=map_center, size_max=10, zoom=4,\n title = 'Low visibility ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw high wind\n bins = [10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 32.7, 37.0, 120]\n keys = ['10.8~13.8','13.9~17.1', '17.2~20.7', '20.8~24.4', '24.5~28.4', '28.5~32.6', '32.7~36.9', '>=37.0']\n cols = ['rgb(0,210,244)', 'rgb(0,125,255)', 'rgb(253,255,0)', 'rgb(247,213,0)',\n 'rgb(255,141,0)', 'rgb(251,89,91)', 'rgb(255,3,0)', 'rgb(178,1,223)']\n cols_map = dict(zip(keys, cols))\n data['max_win_warning'] = pd.cut(data['WIN_S_Max'], bins=bins, labels=keys)\n data['max_win'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['WIN_S_Max'].astype(str)\n df = data[data['max_win_warning'].notna()]\n if df.shape[0] >= 2:\n figs['High_wind'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_win_warning\", category_orders={'max_win_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_win':True, 'Lon':False, 'Lat':False, 'max_win_warning':False, 'WIN_S_Max':False},\n mapbox_style='satellite-streets', size=\"WIN_S_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum wind speed ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=1000, height=800)\n\n return figs", "def create_figure():\n data = requests.get('https://msds603-swolemate-s3.s3.us-west-2.amazonaws.com/shiqi_xycoords.json').json()\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n lwrist = [v for record in data for k, v in record.items() if k=='left_wrist']\n x = [i[0] for i in lwrist]\n y = [i[1] for i in lwrist]\n axis.scatter(x,y)\n axis.set_xlabel('X')\n axis.set_ylabel('Y')\n axis.set_title('Left Wrist Position')\n return fig", "def diesel_2014():\n import plotly.plotly as py\n import plotly.graph_objs as go\n py.sign_in('littlejab', 'yblima8sc3')\n chart_min = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 29.99, 29.91, 29.85, 29.86, 29.99, 29.66, 29.41, 27.6],\n name = 'Min'\n )\n chart_avg = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 29.99, 29.91, 29.85, 29.86, 29.99, 29.66, 29.42, 27.64],\n name = 'Average'\n )\n chart_max = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 30.05, 30.01, 29.85, 29.86, 29.99, 29.66, 29.42, 27.91],\n name = 'Max'\n )\n data = [chart_min, chart_avg, chart_max]\n layout = go.Layout(barmode = 'group')\n fig = go.Figure(data = data, layout = layout)\n plot_url = py.plot(fig, filename = 'Diesel 2014')", "def plot_history(data):\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=data.index, y=data[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_all_time.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_all_time.html'))\n fig.show()\n\n recent = data[:data.first_valid_index() - pd.Timedelta(weeks=52)]\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=recent.index, y=recent[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_past_year.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_past_year.html'))\n fig.show()", "def charts(request):\n \n def histogram():\n x0 = np.random.randn(500)\n # Add 1 to shift the mean of the Gaussian distribution\n x1 = np.random.randn(500) + 1\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=x0))\n fig.add_trace(go.Histogram(x=x1))\n\n # Overlay both histograms\n fig.update_layout(barmode='overlay')\n fig.update_layout(title='Histogram')\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.75)\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def box_plot():\n np.random.seed(1)\n y0 = np.random.randn(50) - 1\n y1 = np.random.randn(50) + 1\n\n fig = go.Figure()\n fig.add_trace(go.Box(y=y0))\n fig.add_trace(go.Box(y=y1))\n fig.update_layout(title='Box Plot')\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def heat_map():\n \n np.random.seed(1)\n programmers = ['Alex','Nicole','Sara','Etienne','Chelsea','Jody','Marianne']\n base = datetime.datetime.today()\n dates = base - np.arange(180) * datetime.timedelta(days=1)\n z = np.random.poisson(size=(len(programmers), len(dates)))\n\n fig = go.Figure(data=go.Heatmap(\n z=z,\n x=dates,\n y=programmers,\n colorscale='Viridis'))\n\n fig.update_layout(\n title='Heat Map',\n xaxis_nticks=36)\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def scatter():\n x1 = [1,2,3,4]\n y1 = [30, 35, 25, 45]\n text1 = ['A', 'B', 'C', 'D']\n trace = go.Scatter(\n x=x1, y = y1, text= text1, mode='markers+text'\n )\n layout = dict(\n title='Scatter Plots',\n xaxis=dict(range=[min(x1), max(x1)]),\n yaxis=dict(range=[min(y1), max(y1)])\n )\n fig = go.Figure(data=[trace],layout=layout)\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n\n context = {\n 'plot1':heat_map(),\n 'plot2':scatter(),\n 'plot3':histogram(),\n 'plot4':box_plot()\n }\n return render(request, 'base/charts.html', context)", "def plot_figs(harbor_data):\n # Creates two subplots to show the temperature/time and altitude/time separately\n # Temperature over time data\n plt.subplot(2, 1, 1)\n plt.plot(harbor_data[\"wx_times\"], harbor_data[\"wx_temperatures\"])\n plt.xlim([0,2.35])\n plt.title(\"Harbor Flight Data\")\n plt.ylabel(\"Temperature, F\")\n # Altitude over time data\n plt.subplot(2, 1, 2)\n plt.plot(harbor_data[\"gps_times\"], harbor_data[\"gps_altitude\"])\n plt.xlabel(\"Mission Elapsed Time, Hours\")\n plt.ylabel(\"Altitude, Feet\")\n plt.show()\n\n # Creates two subplots to show the AltUp/TempUp and AltDown/TempDown separately\n # Altitude up over temperature up data\n plt.subplot(1,2,1)\n plt.plot(harbor_data[\"wx_temp_up\"], harbor_data[\"wx_alt_up\"])\n plt.title(\"Harbor Ascent Flight Data\")\n plt.xlabel(\"Temperature, F\")\n plt.ylabel(\"Altitude, Feet\")\n # Altitude down over temperature down data\n plt.subplot(1,2,2)\n plt.plot(harbor_data[\"wx_temp_down\"], harbor_data[\"wx_alt_down\"])\n plt.title(\"Habor Descent Flight Data\")\n plt.xlabel(\"Temperature, F\")\n plt.show()", "def forecast_stats(stats: pd.DataFrame, rperiods: pd.DataFrame = None, titles: dict = False,\r\n outformat: str = 'plotly', hide_maxmin: bool = False) -> go.Figure:\r\n\r\n\r\n def _plot_colors():\r\n return {\r\n '2 Year': 'rgba(254, 240, 1, .4)',\r\n '5 Year': 'rgba(253, 154, 1, .4)',\r\n '10 Year': 'rgba(255, 56, 5, .4)',\r\n '20 Year': 'rgba(128, 0, 246, .4)',\r\n '25 Year': 'rgba(255, 0, 0, .4)',\r\n '50 Year': 'rgba(128, 0, 106, .4)',\r\n '100 Year': 'rgba(128, 0, 246, .4)',\r\n }\r\n \r\n\r\n def _build_title(base, title_headers):\r\n if not title_headers:\r\n return base\r\n if 'bias_corrected' in title_headers.keys():\r\n base = 'Correccion del sesgo - ' + base\r\n for head in title_headers:\r\n if head == 'bias_corrected':\r\n continue\r\n base += f'<br>{head}: {title_headers[head]}'\r\n return base\r\n\r\n\r\n def _rperiod_scatters(startdate: str, enddate: str, rperiods: pd.DataFrame, y_max: float, max_visible: float = 0,\r\n visible: bool = None):\r\n colors = _plot_colors()\r\n x_vals = (startdate, enddate, enddate, startdate)\r\n r2 = rperiods['return_period_2'].values[0]\r\n if visible is None:\r\n if max_visible > r2:\r\n visible = True\r\n else:\r\n visible = 'legendonly'\r\n\r\n def template(name, y, color, fill='toself'):\r\n return go.Scatter(\r\n name=name,\r\n x=x_vals,\r\n y=y,\r\n legendgroup='returnperiods',\r\n fill=fill,\r\n visible=visible,\r\n line=dict(color=color, width=0))\r\n\r\n if list(rperiods.columns) == ['max_flow', 'return_period_20', 'return_period_10', 'return_period_2']:\r\n r10 = int(rperiods['return_period_10'].values[0])\r\n r20 = int(rperiods['return_period_20'].values[0])\r\n rmax = int(max(2 * r20 - r10, y_max))\r\n return [\r\n template(f'2 años: {r2}', (r2, r2, r10, r10), colors['2 Year']),\r\n template(f'10 años: {r10}', (r10, r10, r20, r20), colors['10 Year']),\r\n template(f'20 años: {r20}', (r20, r20, rmax, rmax), colors['20 Year']),\r\n ]\r\n\r\n else:\r\n r5 = int(rperiods['return_period_5'].values[0])\r\n r10 = int(rperiods['return_period_10'].values[0])\r\n r25 = int(rperiods['return_period_25'].values[0])\r\n r50 = int(rperiods['return_period_50'].values[0])\r\n r100 = int(rperiods['return_period_100'].values[0])\r\n rmax = int(max(2 * r100 - r25, y_max))\r\n return [\r\n template('Return Periods', (rmax, rmax, rmax, rmax), 'rgba(0,0,0,0)', fill='none'),\r\n template(f'2 años: {r2}', (r2, r2, r5, r5), colors['2 Year']),\r\n template(f'5 años: {r5}', (r5, r5, r10, r10), colors['5 Year']),\r\n template(f'10 años: {r10}', (r10, r10, r25, r25), colors['10 Year']),\r\n template(f'25 años: {r25}', (r25, r25, r50, r50), colors['25 Year']),\r\n template(f'50 años: {r50}', (r50, r50, r100, r100), colors['50 Year']),\r\n template(f'100 años: {r100}', (r100, r100, rmax, rmax), colors['100 Year']),\r\n ]\r\n\r\n #############################################################################\r\n ################################## MAIN #####################################\r\n #############################################################################\r\n\r\n # Start processing the inputs\r\n dates = stats.index.tolist()\r\n startdate = dates[0]\r\n enddate = dates[-1]\r\n\r\n plot_data = {\r\n 'x_stats': stats['flow_avg_m^3/s'].dropna(axis=0).index.tolist(),\r\n 'x_hires': stats['high_res_m^3/s'].dropna(axis=0).index.tolist(),\r\n 'y_max': max(stats['flow_max_m^3/s']),\r\n 'flow_max': list(stats['flow_max_m^3/s'].dropna(axis=0)),\r\n 'flow_75%': list(stats['flow_75%_m^3/s'].dropna(axis=0)),\r\n 'flow_avg': list(stats['flow_avg_m^3/s'].dropna(axis=0)),\r\n 'flow_25%': list(stats['flow_25%_m^3/s'].dropna(axis=0)),\r\n 'flow_min': list(stats['flow_min_m^3/s'].dropna(axis=0)),\r\n 'high_res': list(stats['high_res_m^3/s'].dropna(axis=0)),\r\n }\r\n if rperiods is not None:\r\n plot_data.update(rperiods.to_dict(orient='index').items())\r\n max_visible = max(max(plot_data['flow_75%']), max(plot_data['flow_avg']), max(plot_data['high_res']))\r\n rperiod_scatters = _rperiod_scatters(startdate, enddate, rperiods, plot_data['y_max'], max_visible)\r\n else:\r\n rperiod_scatters = []\r\n\r\n maxmin_visible = 'legendonly' if hide_maxmin else True\r\n scatter_plots = [\r\n # Plot together so you can use fill='toself' for the shaded box, also separately so the labels appear\r\n go.Scatter(name='Caudal máximo y mínimo',\r\n x=plot_data['x_stats'] + plot_data['x_stats'][::-1],\r\n y=plot_data['flow_max'] + plot_data['flow_min'][::-1],\r\n legendgroup='boundaries',\r\n fill='toself',\r\n visible=maxmin_visible,\r\n line=dict(color='lightblue', dash='dash')),\r\n go.Scatter(name='Máximo',\r\n x=plot_data['x_stats'],\r\n y=plot_data['flow_max'],\r\n legendgroup='boundaries',\r\n visible=maxmin_visible,\r\n showlegend=False,\r\n line=dict(color='darkblue', dash='dash'),),\r\n go.Scatter(name='Mínimo',\r\n x=plot_data['x_stats'],\r\n y=plot_data['flow_min'],\r\n legendgroup='boundaries',\r\n visible=maxmin_visible,\r\n showlegend=False,\r\n line=dict(color='darkblue', dash='dash')),\r\n\r\n go.Scatter(name='Percentil 25 - 75 de caudal',\r\n x=plot_data['x_stats'] + plot_data['x_stats'][::-1],\r\n y=plot_data['flow_75%'] + plot_data['flow_25%'][::-1],\r\n legendgroup='percentile_flow',\r\n fill='toself',\r\n line=dict(color='lightgreen'), ),\r\n go.Scatter(name='75%',\r\n x=plot_data['x_stats'],\r\n y=plot_data['flow_75%'],\r\n showlegend=False,\r\n legendgroup='percentile_flow',\r\n line=dict(color='green'), ),\r\n go.Scatter(name='25%',\r\n x=plot_data['x_stats'],\r\n y=plot_data['flow_25%'],\r\n showlegend=False,\r\n legendgroup='percentile_flow',\r\n line=dict(color='green'), ),\r\n\r\n go.Scatter(name='Pronóstico de alta resolución',\r\n x=plot_data['x_hires'],\r\n y=plot_data['high_res'],\r\n line={'color': 'black'}, ),\r\n go.Scatter(name='Caudal promedio del ensamble',\r\n x=plot_data['x_stats'],\r\n y=plot_data['flow_avg'],\r\n line=dict(color='blue'), ),\r\n ]\r\n\r\n scatter_plots += rperiod_scatters\r\n\r\n layout = go.Layout(\r\n title=_build_title('Caudal pronosticado', titles),\r\n yaxis={'title': 'Caudal (m<sup>3</sup>/s)', 'range': [0, 'auto']},\r\n xaxis={'title': 'Fecha (UTC +0:00)', 'range': [startdate, enddate], 'hoverformat': '%H:%M - %b %d %Y',\r\n 'tickformat': '%b %d %Y'},\r\n )\r\n figure = go.Figure(scatter_plots, layout=layout)\r\n\r\n return figure", "def plot(self):\n fig = go.Figure()\n for traj in self.data:\n fig.add_trace(\n go.Scatter(\n x=traj.age,\n y=traj.AF\n )\n )\n fig.update_layout(title=self.id)\n return fig", "def plot_3d (cities):\n\n # base all measures on first day present in each city\n day = 0\n # date time for the label\n dt = xpath(cities[0], ('data',day,'dt'))\n date=date_repr(dt)\n \n fig = plot.figure()\n ax = fig.gca(projection='3d')\n X = [ xpath(city, ('city','coord','lon')) for city in cities ]\n Y = [ xpath(city, ('city','coord','lat')) for city in cities ]\n P = [ xpath (city, ('data',day,'pressure'))\n for city in cities ]\n ax.plot_trisurf(X, Y, P, cmap=cm.jet, linewidth=0.2,\n label=\"Pressure on %s\"%date)\n ax.set_title (\"Pressure on %s\"%date)\n plot.show()", "def weather_plot(col, cities=cities):\n df = weather_data(cities)\n df['x'], df['y'] = lnglat_to_meters(df['lon'], df['lat'])\n table = hv.Table(df[['name', col]]).opts(width=800)\n points = df.hvplot.scatter('x','y', c=col, cmap='bkr', hover_cols=['name'])\n map_tiles = EsriImagery().opts(alpha=0.5, width=900, height=480, bgcolor='white')\n return pn.Column(points * map_tiles, table)", "def create_team_line_graph(plot_df, plot_type=\"scatter\", title_suffix=\"\"):\n fig = make_subplots(\n rows=4,\n cols=1,\n shared_xaxes=True,\n vertical_spacing=0.1,\n subplot_titles=(f\"Central {title_suffix}\", f\"South {title_suffix}\", f\"East {title_suffix}\", f\"North {title_suffix}\"),\n )\n\n team_colors = {\n \"Central\": \"#FE6B39\",\n \"East\": \"#FFD166\",\n \"North\": \"#439A86\",\n \"South\": \"#118AB2\",\n }\n\n if plot_type == \"bar\":\n plot_df[\"quarter\"] = pd.PeriodIndex(pd.to_datetime(plot_df[\"month\"]), freq=\"Q\")\n central = go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df[\"Central\"],\n text=plot_df[\"Central\"],\n marker={\"color\": team_colors[\"Central\"]},\n hoverinfo=\"x+y\",\n )\n south = go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df[\"South\"],\n text=plot_df[\"South\"],\n marker={\"color\": team_colors[\"South\"]},\n hoverinfo=\"x+y\",\n )\n east = go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df[\"East\"],\n text=plot_df[\"East\"],\n marker={\"color\": team_colors[\"East\"]},\n hoverinfo=\"x+y\",\n )\n north = go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df[\"North\"],\n text=plot_df[\"North\"],\n marker={\"color\": team_colors[\"North\"]},\n hoverinfo=\"x+y\",\n )\n\n if plot_type == \"scatter\":\n central = go.Scatter(\n x=plot_df[\"month\"],\n y=plot_df[\"Central\"],\n mode=\"lines\",\n line={\"width\": 7, \"color\": team_colors[\"Central\"]},\n hoverinfo=\"x+y\",\n )\n south = go.Scatter(\n x=plot_df[\"month\"],\n y=plot_df[\"South\"],\n mode=\"lines\",\n line={\"width\": 7, \"color\": team_colors[\"South\"]},\n hoverinfo=\"x+y\",\n )\n east = go.Scatter(\n x=plot_df[\"month\"],\n y=plot_df[\"East\"],\n mode=\"lines\",\n line={\"width\": 7, \"color\": team_colors[\"East\"]},\n hoverinfo=\"x+y\",\n )\n north = go.Scatter(\n x=plot_df[\"month\"],\n y=plot_df[\"North\"],\n mode=\"lines\",\n line={\"width\": 7, \"color\": team_colors[\"North\"]},\n hoverinfo=\"x+y\",\n )\n\n fig.add_trace(central, 1, 1)\n fig.add_trace(south, 2, 1)\n fig.add_trace(east, 3, 1)\n fig.add_trace(north, 4, 1)\n\n fig.update_yaxes(\n range=[plot_df[\"Central\"].min() * 0.95, plot_df[\"Central\"].max() * 1.05],\n row=1,\n col=1,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n nticks=4,\n )\n fig.update_yaxes(\n range=[plot_df[\"South\"].min() * 0.95, plot_df[\"South\"].max() * 1.05],\n row=2,\n col=1,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n nticks=4,\n )\n fig.update_yaxes(\n range=[plot_df[\"East\"].min() * 0.95, plot_df[\"East\"].max() * 1.05],\n row=3,\n col=1,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n nticks=4,\n )\n fig.update_yaxes(\n range=[plot_df[\"North\"].min() * 0.95, plot_df[\"North\"].max() * 1.05],\n row=4,\n col=1,\n showline=True,\n linewidth=1,\n linecolor=\"black\",\n nticks=4,\n )\n\n fig.update_xaxes(showline=True, linewidth=1, linecolor=\"black\", row=1, col=1)\n fig.update_xaxes(showline=True, linewidth=1, linecolor=\"black\", row=2, col=1)\n fig.update_xaxes(showline=True, linewidth=1, linecolor=\"black\", row=3, col=1)\n fig.update_xaxes(showline=True, linewidth=1, linecolor=\"black\", row=4, col=1)\n\n fig.update_layout(\n margin={\"pad\": 10, \"l\": 55, \"r\": 55, \"t\": 35, \"b\": 65},\n showlegend=False,\n plot_bgcolor=\"rgba(0,0,0,0)\",\n title=\"Participants\",\n )\n\n return fig", "def draw_weather_analysis(date_obj, data, map_region, return_dict):\n\n # image dictionary\n images = collections.OrderedDict()\n return_dict[0] = None\n\n # draw 2PVU surface pressure\n image = pv.draw_pres_pv2(\n data['pres_pv2'].values, data['pres_pv2']['lon'].values, data['pres_pv2']['lat'].values,\n map_region=map_region, title_kwargs={'name':'CFSR', 'time': date_obj})\n images['2PVU_Surface_Pressure'] = image\n\n # draw 200hPa wind field\n image = dynamics.draw_wind_upper(\n data['u200'].values, data['v200'].values, \n data['u200']['lon'].values, data['u200']['lat'].values,\n gh=data['gh200'].values, map_region=map_region, \n title_kwargs={'name':'CFSR', 'head': \"200hPa Wind | GH\", 'time': date_obj})\n images['200hPa_Wind'] = image\n\n # draw 500hPa height and temperature\n image = dynamics.draw_height_temp(\n data['gh500'].values, data['t500'].values, \n data['gh500']['lon'].values, data['gh500']['lat'].values, map_region=map_region, \n title_kwargs={'name':'CFSR', 'head': \"500hPa GH | T\", 'time': date_obj})\n images['500hPa_Height'] = image\n\n # draw 500hPa vorticity\n image = dynamics.draw_vort_high(\n data['u500'].values, data['v500'].values, \n data['u500']['lon'].values, data['u500']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"500hPa Wind | Vorticity | GH\", 'time': date_obj})\n images['500hPa_Vorticity'] = image\n\n # draw 700hPa vertical velocity\n image = dynamics.draw_vvel_high(\n data['u700'].values, data['v700'].values, data['w700'].values, \n data['w700']['lon'].values, data['w700']['lat'].values,\n gh=data['gh700'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"700hPa Vertical Velocity | Wind | GH\", 'time': date_obj})\n images['700hPa_Vertical_Velocity'] = image\n\n # draw 700hPa wind field\n image = dynamics.draw_wind_high(\n data['u700'].values, data['v700'].values, \n data['u700']['lon'].values, data['u700']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"700hPa Wind | 500hPa GH\", 'time': date_obj})\n images['700hPa_Wind'] = image\n\n # draw 700hPa temperature field\n image = thermal.draw_temp_high(\n data['t700'].values, data['t700']['lon'].values, data['t700']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"700hPa T | 500hPa GH\", 'time': date_obj})\n images['700hPa_Temperature'] = image\n\n # draw 700hPa relative humidity\n rh = calc.relative_humidity_from_specific_humidity(700 * units.hPa, data['t700'], data['q700']) * 100\n image = moisture.draw_rh_high(\n data['u700'].values, data['v700'].values, rh.values,\n data['u700']['lon'].values, data['u700']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"700hPa RH | Wind | 500hPa GH\", 'time': date_obj})\n images['700hPa_Relative_Humidity'] = image\n\n # draw 850hPa wind field\n image = dynamics.draw_wind_high(\n data['u850'].values, data['v850'].values, \n data['u850']['lon'].values, data['u850']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"850hPa Wind | 500hPa GH\", 'time': date_obj})\n images['850hPa_Wind'] = image\n\n # draw 850hPa temperature field\n image = thermal.draw_temp_high(\n data['t850'].values, data['t850']['lon'].values, data['t850']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"850hPa T | 500hPa GH\", 'time': date_obj})\n images['850hPa_Temperature'] = image\n\n # draw 850hPa relative humidity\n rh = calc.relative_humidity_from_specific_humidity(850 * units.hPa, data['t850'], data['q850']) * 100\n image = moisture.draw_rh_high(\n data['u850'].values, data['v850'].values, rh.values,\n data['u850']['lon'].values, data['u850']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"850hPa RH | Wind | 500hPa GH\", 'time': date_obj})\n images['850hPa_Relative_Humidity'] = image\n\n # draw 850hPa specific field\n image = moisture.draw_sp_high(\n data['u850'].values, data['v850'].values, data['q850'].values*1000.,\n data['q850']['lon'].values, data['q850']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"850hPa SP | Wind | 500hPa GH\", 'time': date_obj})\n images['850hPa_Specific_Humidity'] = image\n\n # draw 925hPa temperature field\n image = thermal.draw_temp_high(\n data['t925'].values, data['t925']['lon'].values, data['t925']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"925hPa T | 500hPa GH\", 'time': date_obj})\n images['925hPa_Temperature'] = image\n\n # draw 925hPa wind field\n image = dynamics.draw_wind_high(\n data['u925'].values, data['v925'].values, \n data['u925']['lon'].values, data['u925']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"925hPa Wind | 500hPa GH\", 'time': date_obj})\n images['925hPa_Wind'] = image\n\n # draw 925hPa relative humidity\n rh = calc.relative_humidity_from_specific_humidity(925 * units.hPa, data['t925'], data['q925']) * 100\n image = moisture.draw_rh_high(\n data['u925'].values, data['v925'].values, rh.values,\n data['u925']['lon'].values, data['u925']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"925hPa RH | Wind | 500hPa GH\", 'time': date_obj})\n images['925hPa_Relative_Humdity'] = image\n\n # draw 925hPa specific field\n image = moisture.draw_sp_high(\n data['u925'].values, data['v925'].values, data['q925'].values*1000.,\n data['q925']['lon'].values, data['q925']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"925hPa SP | Wind | 500hPa GH\", 'time': date_obj})\n images['925hPa_Specific_Humidity'] = image\n\n # draw precipitable water field\n image = moisture.draw_pwat(\n data['pwat'].values, data['pwat']['lon'].values, data['pwat']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"Precipitable Water | 500hPa GH\", 'time': date_obj})\n images['Precipitable_Water'] = image\n\n # draw mean sea level pressure field\n image = dynamics.draw_mslp(\n data['mslp'].values, data['mslp']['lon'].values, data['mslp']['lat'].values,\n gh=data['gh500'].values, map_region=map_region,\n title_kwargs={'name':'CFSR', 'head': \"MSLP | 500hPa GH\", 'time': date_obj})\n images['Mean_Sea_Level_Pressure'] = image\n\n return_dict[0] = images", "def make_figure(self, N):\n fig = go.Figure()\n fig.add_trace(go.Scatter(y=N['odds'], x=np.linspace(1, 11, 6), name='odd year population',\n hovertemplate = 'Year: %{x}'+ '<br>Pop: %{y}'))\n fig.add_trace(go.Scatter(y=N['evens'], x=np.linspace(2, 12, 6), name='even year population',\n hovertemplate = 'Year: %{x}'+ '<br>Pop: %{y}'))\n fig.add_shape(type='line',\n xref='x', yref='paper',\n x0=2.5, y0=0, x1=2.5, y1=1,\n line=dict(color='Black', width=3))\n return fig", "def index_figures(): \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graph_one = []\n graph_one.append(\n go.Bar(\n x = genre_names,\n y = genre_counts\n )\n ) \n layout_one = dict(title = 'Distribution of Message Genres',\n yaxis = dict(title = 'Count'),\n xaxis = dict(title = 'Genre')\n )\n \n category_values = df.iloc[:,4:].sum().sort_values(ascending=False).head()\n category_names = list(category_values.index)\n \n graph_two = []\n graph_two.append(\n go.Pie(\n values=category_values,\n labels=category_names\n )\n )\n layout_two = dict(title = 'Top Categories',\n yaxis = dict(title = 'Count'),\n xaxis = dict(title = 'Category')\n )\n \n graphs = []\n graphs.append(dict(data=graph_one, layout=layout_one))\n graphs.append(dict(data=graph_two, layout=layout_two))\n return graphs", "def map_plot(iso3_codes, countries_organisations_amount,countries_list):\n d = {'ISO-3': iso3_codes, 'spending': countries_organisations_amount, 'countries': countries_list}\n df = pd.DataFrame(data=d)\n fig = px.choropleth(df,\n locations='ISO-3',\n color=\"spending\",\n scope=\"world\",\n labels={'spending': 'Amount of organisations'},\n height=500,\n hover_name=df['countries'],\n hover_data=['spending'],\n custom_data=['spending','countries']\n )\n\n fig.update_layout(\n title_text='Number of organisations lobbying in the EU',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'))\n fig.update_traces(hovertemplate=\"<b> %{customdata[1]} </b> : Number of organisations: %{customdata[0]}\")\n return fig", "def offline_plotly_scatter3d(df, x=0, y=1, z=-1):\n data = []\n # clusters = []\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)']\n\n # df.columns = clean_columns(df.columns)\n\n x = get_array(df, x, default=0)\n y = get_array(df, y, default=1)\n z = get_array(df, z, default=-1)\n for i in range(len(df['name'].unique())):\n name = df['Name'].unique()[i]\n color = colors[i]\n x = x[pd.np.array(df['name'] == name)]\n y = y[pd.np.array(df['name'] == name)]\n z = z[pd.np.array(df['name'] == name)]\n\n trace = dict(\n name=name,\n x=x, y=y, z=z,\n type=\"scatter3d\",\n mode='markers',\n marker=dict(size=3, color=color, line=dict(width=0)))\n data.append(trace)\n\n layout = dict(\n width=800,\n height=550,\n autosize=False,\n title='Iris dataset',\n scene=dict(\n xaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n yaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n zaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n aspectratio=dict(x=1, y=1, z=0.7),\n aspectmode='manual'\n ),\n )\n\n fig = dict(data=data, layout=layout)\n\n # IPython notebook\n # plotly.iplot(fig, filename='pandas-3d-iris', validate=False)\n\n url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False)\n return url", "def makeOverviewPage(orbit_list, mtpConstants, paths, occultationObservationDict, nadirObservationDict):\n mtpNumber = mtpConstants[\"mtpNumber\"]\n obsTypeNames = {\"ingress\":\"irIngressLow\", \"egress\":\"irEgressLow\"}\n\n \n #loop through once to find list of all orders measured\n ordersAll = []\n for orbit in orbit_list:\n occultationObsTypes = [occultationType for occultationType in orbit[\"allowedObservationTypes\"][:] if occultationType in [\"ingress\", \"egress\"]] \n for occultationObsType in occultationObsTypes:\n if occultationObsType in orbit.keys():\n obsTypeName = obsTypeNames[occultationObsType]\n \n orders = orbit[\"finalOrbitPlan\"][obsTypeName+\"Orders\"]\n if 0 in orders: #remove darks\n orders.remove(0)\n if \"COP#\" in \"%s\" %orders[0]: #remove manual COP selection\n orders = []\n ordersAll.extend(orders)\n uniqueOccultationOrders = sorted(list(set(ordersAll)))\n \n #loop through again to plot each order on a single graph\n for chosenOrder in uniqueOccultationOrders:\n title = \"Solar occultations for diffraction order %s\" %(chosenOrder)\n fig = plt.figure(figsize=(FIG_X, FIG_Y))\n ax = fig.add_subplot(111, projection=\"mollweide\")\n ax.grid(True)\n plt.title(title)\n \n lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect\n latsAll = []\n altsAll = []\n for orbit in orbit_list:\n occultationObsTypes = [occultationType for occultationType in orbit[\"allowedObservationTypes\"][:] if occultationType in [\"ingress\", \"egress\"]] \n for occultationObsType in occultationObsTypes:\n if occultationObsType in orbit.keys():\n obsTypeName = obsTypeNames[occultationObsType]\n \n orders = orbit[\"finalOrbitPlan\"][obsTypeName+\"Orders\"]\n if chosenOrder in orders:\n occultation = orbit[occultationObsType]\n \n #if lats/lons/alts not yet in orbitList, find and write to list\n if \"alts\" not in occultation.keys():\n #just plot the half of the occultation closest to the surface, not the high altitude bits\n #ignore merged or grazing occs at this point\n if occultationObsType == \"ingress\":\n ets = np.arange(occultation[\"etMidpoint\"], occultation[\"etEnd\"], OCCULTATION_SEARCH_STEP_SIZE)\n elif occultationObsType == \"egress\":\n ets = np.arange(occultation[\"etStart\"], occultation[\"etMidpoint\"], OCCULTATION_SEARCH_STEP_SIZE)\n lonsLatsLsts = np.asfarray([getLonLatLst(et) for et in ets])\n occultation[\"lons\"] = lonsLatsLsts[:, 0]\n occultation[\"lats\"] = lonsLatsLsts[:, 1]\n occultation[\"alts\"] = np.asfarray([getTangentAltitude(et) for et in ets])\n \n #else take lats/lons/alts from orbitList if already exists\n lonsAll.extend(occultation[\"lons\"])\n latsAll.extend(occultation[\"lats\"])\n altsAll.extend(occultation[\"alts\"])\n \n plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \\\n c=np.asfarray(altsAll), cmap=plt.cm.jet, marker='o', linewidth=0)\n \n cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04)\n cbar.set_label(\"Tangent Point Altitude (km)\", rotation=270, labelpad=20)\n fig.tight_layout()\n plt.savefig(os.path.join(paths[\"IMG_MTP_PATH\"], \"occultations_mtp%03d_order%i_altitude.png\" %(mtpNumber, chosenOrder)))\n plt.close()\n \n \n \n \"\"\"plot nadir orders\"\"\"\n #find all orders measured\n ordersAll = []\n for orbit in orbit_list:\n if \"dayside\" in orbit[\"irMeasuredObsTypes\"]:\n orders = orbit[\"finalOrbitPlan\"][\"irDaysideOrders\"]\n if 0 in orders: #remove darks\n orders.remove(0)\n if \"COP#\" in \"%s\" %orders[0]: #remove manual COP selection\n orders = []\n ordersAll.extend(orders)\n uniqueNadirOrders = sorted(list(set(ordersAll)))\n \n #plot each order\n for chosenOrder in uniqueNadirOrders:\n title = \"Dayside nadirs for diffraction order %s\" %(chosenOrder)\n fig = plt.figure(figsize=(FIG_X, FIG_Y))\n ax = fig.add_subplot(111, projection=\"mollweide\")\n ax.grid(True)\n plt.title(title)\n \n lonsAll = [] #pre-make list of all observing points of this order, otherwise colourbar scale will be incorrect\n latsAll = []\n anglesAll = []\n for orbit in orbit_list:\n if \"dayside\" in orbit[\"irMeasuredObsTypes\"]:\n orders = orbit[\"finalOrbitPlan\"][\"irDaysideOrders\"]\n if chosenOrder in orders:\n nadir = orbit[\"dayside\"]\n \n #if lats/lons/incidence angles not yet in orbitList, find and write to list\n if \"incidences\" not in nadir.keys():\n# print(orbit[\"orbitNumber\"])\n #nadir start/end times have been modified to fit thermal room\n realStartTime = nadir[\"obsStart\"] + PRECOOLING_TIME + INITIALISATION_TIME\n realEndTime = nadir[\"obsEnd\"]\n ets = np.arange(realStartTime, realEndTime, NADIR_SEARCH_STEP_SIZE)\n lonsLatsIncidencesLsts = np.asfarray([getLonLatIncidenceLst(et) for et in ets])\n nadir[\"lons\"] = lonsLatsIncidencesLsts[:, 0]\n nadir[\"lats\"] = lonsLatsIncidencesLsts[:, 1]\n nadir[\"incidences\"] = lonsLatsIncidencesLsts[:, 2]\n #else take lats/lons/incidence angles from orbitList if already exists\n lonsAll.extend(nadir[\"lons\"])\n latsAll.extend(nadir[\"lats\"])\n anglesAll.extend(nadir[\"incidences\"])\n \n plot1 = ax.scatter(np.asfarray(lonsAll)/sp.dpr(), np.asfarray(latsAll)/sp.dpr(), \\\n c=np.asfarray(anglesAll), cmap=plt.cm.jet, marker='o', linewidth=0)\n \n cbar = fig.colorbar(plot1, fraction=0.046, pad=0.04)\n cbar.set_label(\"Incidence Angle (degrees)\", rotation=270, labelpad=20)\n fig.tight_layout()\n plt.savefig(os.path.join(paths[\"IMG_MTP_PATH\"], \"dayside_nadirs_mtp%03d_order%i_incidence_angle.png\" %(mtpNumber, chosenOrder)))\n plt.close()\n\n \"\"\"write mtp overview page\"\"\"\n h = r\"\"\n h += r\"<h1>MTP%03d Overview</h1>\" %(mtpNumber)\n h += r\"<h2>Geometry</h2>\"+\"\\n\"\n \n imagename = \"mtp%03d_occultation_duration.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n imagename = \"mtp%03d_occultation_lat.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n imagename = \"mtp%03d_nadir_minimum_incidence_angle.png\" %(mtpNumber)\n h += r\"<img src='%s'>\" %imagename\n \n h += r\"<p>UVIS typically operates on all dayside nadirs and all occultations</p>\"+\"\\n\"\n \n h += r\"<h2>Solar Occultations</h2>\"+\"\\n\"\n \n h += r\"Solar occultation diffraction orders measured this MTP: \"+\"\\n\"\n for chosenOrder in sorted(uniqueOccultationOrders):\n h += \"%i, \" %chosenOrder\n h += r\"<br>\"+\"\\n\"\n \n for chosenOrder in sorted(uniqueOccultationOrders):\n h += \"<h3>Solar occultations for diffraction order %i</h3>\" %chosenOrder\n imagename = \"img/occultations_mtp%03d_order%i_altitude.png\" %(mtpNumber, chosenOrder)\n h += r\"<img src='%s'>\" %imagename\n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<h2>Dayside Nadirs</h2>\"+\"\\n\"\n \n h += r\"Dayside nadir diffraction orders measured this MTP: \"+\"\\n\"\n for chosenOrder in sorted(uniqueNadirOrders):\n h += \"%i, \" %chosenOrder\n h += r\"<br>\"+\"\\n\"\n \n for chosenOrder in sorted(uniqueNadirOrders):\n h += \"<h3>Dayside nadirs for diffraction order %i</h3>\" %chosenOrder\n imagename = \"img/dayside_nadirs_mtp%03d_order%i_incidence_angle.png\" %(mtpNumber, chosenOrder)\n h += r\"<img src='%s'>\" %imagename\n \n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n# h += r\"<h2>SO/LNO Observation Plan</h2>\"+\"\\n\"\n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<h2>SO/LNO Observation Dictionaries</h2>\"+\"\\n\"\n h += r\"<h3>Solar Occultation</h3>\"+\"\\n\"\n headers = [\"Name\", \"Diffraction Order 1\", \"Diffraction Order 2\", \"Diffraction Order 3\", \"Diffraction Order 4\", \"Diffraction Order 5\", \"Diffraction Order 6\", \"Integration Time\", \"Rhythm\", \"Detector Height\"]\n h += r\"<table border=1>\"+\"\\n\"\n h += r\"<tr>\"+\"\\n\"\n for header in headers:\n h += r\"<th>%s</th>\" %header\n h += r\"</tr>\"+\"\\n\"\n for key in sorted(occultationObservationDict.keys()):\n orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, occultationObservationDict)\n \n h += r\"<tr>\"+\"\\n\"\n h += r\"<td>%s</td>\" %(key)\n if \"COP\" in orders:\n h += r\"<td>%s (manual mode)</td>\" %(orders)\n for order in range(5):\n h += r\"<td>-</td>\"+\"\\n\"\n else: \n for order in orders:\n h += r\"<td>%s</td>\" %(order)\n for order in range(6-len(orders)):\n h += r\"<td>-</td>\"+\"\\n\"\n \n h += r\"<td>%i</td>\" %(integrationTime)\n h += r\"<td>%i</td>\" %(rhythm)\n h += r\"<td>%i</td>\" %(detectorRows)\n h += r\"</tr>\"+\"\\n\"\n h += r\"</table>\"+\"\\n\"\n \n \n h += r\"<h3>Nadir/Limb</h3>\"+\"\\n\"\n headers = [\"Name\", \"Diffraction Order 1\", \"Diffraction Order 2\", \"Diffraction Order 3\", \"Diffraction Order 4\", \"Diffraction Order 5\", \"Diffraction Order 6\", \"Integration Time\", \"Rhythm\", \"Detector Height\"]\n h += r\"<table border=1>\"+\"\\n\"\n h += r\"<tr>\"+\"\\n\"\n for header in headers:\n h += r\"<th>%s</th>\" %header\n h += r\"</tr>\"\n for key in sorted(nadirObservationDict.keys()):\n orders, integrationTime, rhythm, detectorRows, channelCode = getObsParameters(key, nadirObservationDict)\n \n h += r\"<tr>\"+\"\\n\"\n h += r\"<td>%s</td>\" %(key)\n if \"COP\" in orders:\n h += r\"<td>%s (manual mode)</td>\" %(orders)\n for order in range(5):\n h += r\"<td>-</td>\"+\"\\n\"\n else: \n for order in orders:\n h += r\"<td>%s</td>\" %(order)\n for order in range(6-len(orders)):\n h += r\"<td>-</td>\"+\"\\n\"\n \n h += r\"<td>%i</td>\" %(integrationTime)\n h += r\"<td>%i</td>\" %(rhythm)\n h += r\"<td>%i</td>\" %(detectorRows)\n h += r\"</tr>\"+\"\\n\"\n h += r\"</table>\"+\"\\n\"\n \n \n \n \n h += r\"<br>\"+\"\\n\"\n h += r\"<br>\"+\"\\n\"\n h += r\"<p>Page last modified: %s</p>\" %(datetime.now().strftime('%a, %d %b %Y %H:%M:%S')) +\"\\n\"\n \n with open(os.path.join(paths[\"HTML_MTP_PATH\"], \"nomad_mtp%03d_overview.html\" %(mtpNumber)), 'w') as f:\n f.write(h)", "def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text", "def tot_pop_tse_viz(city_id: int):\r\n df = pd.read_csv(DATA_FILEPATH2, encoding='utf-8')\r\n df2 = pd.read_csv(DATA_FILEPATH1, encoding='utf-8')\r\n df = df.loc[df['city_id'] == city_id]\r\n df2 = df2.loc[df2['city_id'] == city_id]\r\n x = df['year'].tolist()\r\n y = df2['total_pop'].tolist()\r\n y_hat = df['yhat'].tolist()\r\n y_upper = df['yhat_upper'].tolist()\r\n y_lower = df['yhat_lower'].tolist()\r\n\r\n fig = go.Figure([\r\n go.Scatter(\r\n x=x,\r\n y=y,\r\n line=dict(color='rgb(0,151,223)'),\r\n mode='lines',\r\n showlegend=False\r\n ),\r\n go.Scatter(\r\n x=x,\r\n y=y_hat,\r\n line=dict(color='rgb(0,151,223)'),\r\n mode='lines',\r\n showlegend=False\r\n ),\r\n go.Scatter(\r\n x=x+x[::-1], # x, then x reversed\r\n y=y_upper+y_lower[::-1], # upper, then lower reversed\r\n fill='toself',\r\n fillcolor='rgba(0,151,223,0.2)',\r\n line=dict(color='rgba(255,255,255,0)'),\r\n hoverinfo=\"skip\",\r\n showlegend=False\r\n )\r\n ])\r\n return fig.to_json()", "def plotly_map():\n df = process_life_expectancy_dataset(\"regression\")\n\n selected_df = convert_ohe_columns_into_one(df, \"x0\", \"country\")\n\n # Choosing year 1800 for map plots\n selected_df = selected_df[selected_df[\"year\"] == \"1800\"]\n\n # Plotting on Map\n fig = px.choropleth(selected_df, locations=\"country\", locationmode=\"country names\", color=\"value\",\n hover_name=\"country\", color_continuous_scale = px.colors.sequential.Plasma)\n\n return fig", "def plot_mult_locations(sf, df, data, dcounts, geoid, all_geoids, l, b, w_map = 2.5, w_time = 3, h=3, \n colors = ['orange','palevioletred','steelblue','olive'], \n markers = ['o','^','s','P']):\n #plot timeseries\n ax = None\n ax = plot_mult_timetrends(data, geoid, cols = [i for i in data.columns if (i.endswith('21day_avg') and\n i[:12] in geoid)],\n area = [l + w_map + 0.3,b + h/2, w_time, h/2], colors = colors,\n markers = markers, sharex = ax, ylim_bottom = -50, ylim_top = 50,\n xlabels=[''] * 6)\n \n # plot dcount timeseries\n ax = None\n ax = plot_mult_timetrends(dcounts, geoid, cols = [i for i in data.columns if (i.endswith('21day_avg') and\n i[:12] in geoid)],\n area = [l + w_map + 0.3,b,w_time,h/2], colors = colors, markers = markers, sharex = ax,\n ylim_bottom = 0, ylim_top = 200, ylabel = 'Device count',\n xlabels=data.index[np.arange(0,data.shape[0],28)].tolist())\n \n #plot map\n plt.axes([l,b,w_map,h])\n for i in df_edges[df_edges.ZIPR.isin(['98105','98195','98115','98102','98112'])].index:\n shape_ex = sf_edges.shape(i)\n x_lon = np.zeros((len(shape_ex.points),1))\n y_lat = np.zeros((len(shape_ex.points),1))\n for ip in range(len(shape_ex.points)):\n x_lon[ip] = shape_ex.points[ip][0]\n y_lat[ip] = shape_ex.points[ip][1]\n plt.plot(x_lon,y_lat, color = 'black')\n \n outline_geoids(sf = sf, df = df, geoids = all_geoids, include_labels=False)\n fill_blockgroups(sf = sf, df = df, geoids = geoid, colors=colors)\n \n \n plt.xlim(-122.325,-122.25)\n plt.ylim(47.645,47.68)\n plt.axis('off')", "def generateStationPlot(dir_path, traj_list, color_scheme='light'):\n\n\n # Choose the color scheme\n cs = MapColorScheme()\n \n if color_scheme == 'light':\n cs.light()\n\n else:\n cs.dark()\n\n\n plt.figure(figsize=(19.2, 10.8))\n\n # Init the map\n m = Basemap(projection='cyl', resolution='i')\n\n # Draw the coast boundary and fill the oceans with the given color\n m.drawmapboundary(fill_color=cs.map_background)\n\n # Fill continents, set lake color same as ocean color\n m.fillcontinents(color=cs.continents, lake_color=cs.lakes, zorder=1)\n\n # Draw country borders\n m.drawcountries(color=cs.countries)\n m.drawstates(color=cs.states, linestyle='--')\n\n\n\n ### PLOT WORLD MAP ###\n\n # Group stations into countries\n country_dict = {}\n for traj in traj_list:\n\n for obs in traj.observations:\n\n # Extract country code\n country_code = obs.station_id[:2]\n\n if country_code not in country_dict:\n country_dict[country_code] = {}\n \n\n if obs.station_id not in country_dict[country_code]:\n country_dict[country_code][obs.station_id] = [obs.lat, obs.lon]\n\n\n\n # Plot stations in all countries\n for country_code in country_dict:\n\n station_dict = country_dict[country_code]\n\n # Extract lat/lon\n lat = np.degrees([station_dict[station_id][0] for station_id in station_dict])\n lon = np.degrees([station_dict[station_id][1] for station_id in station_dict])\n\n # Convert lat/lon to x/y\n x, y = m(lon, lat)\n\n plt.scatter(x, y, s=0.75, zorder=5, label=\"{:s}: {:d}\".format(country_code, len(lat)))\n\n\n plt.legend(loc='lower left')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(dir_path, \"world_map.png\"), dpi=100)\n\n plt.close()\n\n ### ###", "def visualize_plotly(self, topics):\r\n \r\n df_palette = pd.DataFrame([\r\n [0, '#C03028'],\r\n [1, '#F08030'],\r\n [2, '#6890F0'],\r\n [3, '#78C850'],\r\n [4, '#A890F0'],\r\n [5, '#B22222'],\r\n [6, '#F8D030'],\r\n [7, '#D3D3D3'],\r\n [8, '#F85888'],\r\n [9, '#7FFFD4']])\r\n #[10, '#98D8D8']])\r\n \r\n #[11, '#A8B820'],\r\n #[12, '#7038F8'],\r\n #[13, '#705898'],\r\n #[14, '#705848'],\r\n #[15, '#B8B8D0'],\r\n #[16, '#A8A878'],\r\n #[17, '#EE99AC']])\r\n\r\n df_palette.columns = ['labels', 'typecolor']\r\n self.tweet_dataframe.merge(df_palette, on = 'labels')\r\n\r\n #Divide up the tsne data\r\n\r\n plot_list = []\r\n\r\n for idx, (label, color) in df_palette.iterrows():\r\n\r\n df_filter = self.tweet_dataframe[self.tweet_dataframe['labels'] == label]\r\n \r\n df_filter['custom_text'] = df_filter[['username', 'text']].apply('<br />'.join, axis = 1) \r\n sentiment_boxplot = go.Box(\r\n x = df_filter['vader_polarity'],\r\n name = \"{}\".format(topics[label]),\r\n #text = pd.Series(self.tweet_dataframe['text']),\r\n boxmean = True,\r\n jitter = .5,\r\n boxpoints = 'all',\r\n hoverinfo = 'x+text',\r\n text = df_filter['custom_text'],\r\n marker = dict(color = color) \r\n )\r\n plot_list.append(sentiment_boxplot) \r\n\r\n # Override plotly \r\n axis_layout = dict(zeroline=False, showaxeslabels=False, autotick = True, ticks = '', showticklabels=False, title='')\r\n\r\n layout = go.Layout(\r\n yaxis = axis_layout,\r\n hovermode = \"closest\",\r\n title = \"Sentiment distribution per topic\",\r\n showlegend = True)\r\n\r\n fig = dict(data=plot_list, layout=layout)\r\n #plot_url = py.plot(fig)\r\n offline_plot.plot(fig, filename='data/sentiment_boxplot.html', auto_open = False)\r\n\r\n return plot_list, layout", "def _make_ts_traces(ts_agent_list):\n # create traces for plots\n makespans_traces = [\n go.Scatter(x=[ts_agent.min_makespan_coordinates[0] for ts_agent in ts_agent_list],\n y=[ts_agent.min_makespan_coordinates[1] for ts_agent in ts_agent_list], mode='markers',\n name='best makespans')\n ]\n\n nh_sizes_traces = []\n tl_sizes_traces = []\n\n for i, ts_agent in enumerate(ts_agent_list):\n x_axis = list(range(ts_agent.benchmark_iterations))\n makespans_traces.append(\n go.Scatter(x=x_axis, y=ts_agent.seed_solution_makespan_v_iter, name=f'TS trace {i}'))\n nh_sizes_traces.append(\n go.Scatter(x=x_axis, y=ts_agent.neighborhood_size_v_iter, name=f'TS trace {i}'))\n tl_sizes_traces.append(go.Scatter(x=x_axis, y=ts_agent.tabu_size_v_iter, name=f'TS trace {i}'))\n\n # create layouts for plots\n makespans_layout = dict(title='Seed Solution Makespan vs Iteration',\n xaxis=dict(title='Iteration'),\n yaxis=dict(title='Makespans (minutes)'))\n nh_sizes_layout = dict(title='Neighborhood size vs Iteration',\n xaxis=dict(title='Iteration'),\n yaxis=dict(title='Size of Neighborhood'))\n tl_sizes_layout = dict(title='Tabu list size vs Iteration',\n xaxis=dict(title='Iteration'),\n yaxis=dict(title='Size of Tabu list'))\n\n return makespans_traces, makespans_layout, nh_sizes_traces, nh_sizes_layout, tl_sizes_traces, tl_sizes_layout" ]
[ "0.64221996", "0.6253128", "0.61704546", "0.61357796", "0.59865403", "0.5960612", "0.5946181", "0.59333766", "0.585216", "0.58064705", "0.5785414", "0.576179", "0.5730726", "0.57133067", "0.57080656", "0.5644872", "0.56262666", "0.5618747", "0.559435", "0.559427", "0.5580006", "0.55750346", "0.5541752", "0.5540376", "0.5534994", "0.55263966", "0.5507158", "0.5504196", "0.5503616", "0.5490828" ]
0.715962
0
Since virtual steppers are virtual, we don't need pins or step sequences. We're still using delay and n_steps to resemble physical steppers.
def __init__(self, name = None, n_steps = 256, delay = 1e-3): self.fig, self.ax = plt.subplots(figsize=(3, 3)) self.n_steps = n_steps self.delay = delay self.step_size = 2 * pi / self.n_steps if name is None: self.name = 'Stepper {}'.format(VirtualStepper.count + 1) self.angle = 0.0 self.check() self.inv = False VirtualStepper.count += 1 plt.ion() plt.show() self.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n if self.loop is True:\n ref_ind = ref_ind % traj_len\n else:\n if ref_ind > traj_len-1:\n ref_ind = traj_len-1\n if closest_ind == traj_len-1:\n self.at_dest = True\n else:\n ref_ind = closest_ind\n ref_state = self.np_trajectory[:, int(ref_ind)]\n\n # update vehicle state.\n '''if self.class_name == 'TruckVehicle':\n self.update_vehicle_state_qualisys()\n self.UDP_receive()\n if self.data == \"-1.00\":\n self.set_control_commands_pp(ref_state, ref_ind)\n else:\n steer = int(self.data[-6:-3])\n throttle = int(self.data[:-6]) + 5\n hw_port.set_command(throttle,steer,2)\n self.update_truck_hardware()\n else:\n self.set_control_commands(ref_state)\n self.update_vehicle_state()'''\n\n self.set_control_commands(ref_state, ref_ind)\n self.update_vehicle_state()\n\n # publish vehicle state.\n vehicle_state = msgs.VehicleState(self.vehicle_id, self.class_name,\n self.x, self.y, self.yaw, self.v)\n self.pub_state.publish(vehicle_state)\n self.update_current_node()\n\n #The way that the stop light waiting works, this is necessary\n if not self.waiting_at_stop:\n self.check_for_traffic_light()\n self.get_traffic()", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def get_next_steps(self, steps):\n for step in range(steps):\n # Actual calulation: Runge-Kutta 2\n\n # Step 1\n k1 = [\n self.vel * self.dt,\n self.get_next_acc() * self.dt\n ]\n\n # Step 2\n next_pos = self.pos + k1[0] * 0.5\n next_vel = self.vel + k1[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k2 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 3\n next_pos = self.pos + k2[0] * 0.5\n next_vel = self.vel + k2[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k3 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 4\n next_pos = self.pos + k3[0]\n next_vel = self.vel + k3[1]\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k4 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Move forward\n self.pos = self.pos + 1/6 * (k1[0] + 2*k2[0] + 2*k3[0] + k4[0])\n self.vel = self.vel + 1/6 * (k1[1] + 2*k2[1] + 2*k3[1] + k4[1])\n\n # Saving of statistics\n self.save_system_information(self.pos, self.vel)", "def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def simulationTwoDrugsDelayedTreatment(numTrials):\n # TODO", "def get_steps_num():\n return 0", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def beam_step(self, paths, extra):\n h_i, dec_out, context, src_mask = extra\n last = paths[:, :, -1].view(1, -1)\n dec_out, h_i = self.decode_rnn(context, h_i, dec_out, last, src_mask)\n probs = self.output(dec_out)\n dec_out = dec_out.squeeze(0)\n return probs, (h_i, dec_out, context, src_mask)", "def simulate():\n # Simulation set-up\n end_time = 50\n ts = numpy.linspace(0, end_time, end_time*10)\n dt = ts[1]\n dt_control = 1\n assert dt <= dt_control\n\n bioreactor, lin_model, K, _ = sim_base.get_parts(dt_control=dt_control)\n\n # Initial values\n us = [numpy.array([0.06, 0.2])]\n xs = [bioreactor.X.copy()]\n ys = [bioreactor.outputs(us[-1])]\n\n biass = []\n\n t_next = 0\n for t in tqdm.tqdm(ts[1:]):\n if t > t_next:\n U_temp = us[-1].copy()\n if K.y_predicted is not None:\n biass.append(lin_model.yn2d(ys[-1]) - K.y_predicted)\n\n u = K.step(lin_model.xn2d(xs[-1]), lin_model.un2d(us[-1]), lin_model.yn2d(ys[-1]))\n U_temp[lin_model.inputs] = lin_model.ud2n(u)\n us.append(U_temp.copy())\n t_next += dt_control\n else:\n us.append(us[-1])\n\n bioreactor.step(dt, us[-1])\n outputs = bioreactor.outputs(us[-1])\n ys.append(outputs.copy())\n xs.append(bioreactor.X.copy())\n\n ys = numpy.array(ys)\n us = numpy.array(us)\n biass = numpy.array(biass)\n\n print('Performance: ', sim_base.performance(ys[:, lin_model.outputs], lin_model.yd2n(K.ysp), ts))\n\n return ts, ys, lin_model, K, us, dt_control, biass, end_time", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def train_loop_pre(self, current_step):\r\n pass", "def getSteps():", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def steps(self,num_steps):\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()\n for step in range(num_steps):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def train_step(self):\n pass", "def simulate(self):\r\n\r\n for index in tqdm(range(self.steps)):\r\n\r\n S = 0.1 - 0.1 / self.steps * (index + 1)\r\n T = 0.5 / (np.log(2 + 0.2 * index))\r\n\r\n self.move(T, S)\r\n self.t_change.append(T)\r\n self.s_change.append(S)\r\n tot = calculate_total_energy(self.current_config)\r\n self.energies.append(tot)", "def _step(self) -> None:", "def step(self, rotor_speeds):\n reward = 0\n pose_all = []\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward(done) \n pose_all.append(self.sim.pose)\n v_length = self.vector_length(self.sim.v)\n self.max_v_length = self.max_v_length if self.max_v_length > v_length else v_length\n self.distance_to_target = self.vector_length(self.target_pos - self.sim.pose[:3])\n next_state = np.concatenate(pose_all)\n return next_state, reward, done", "def simulate(self, ntrs):\n self.trtimes = list(np.arange(ntrs)*self.expectedtr)", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def step(self, state):", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def generate(self, num_timesteps):\n self.north_arrivals = []\n self.south_arrivals = []\n self.east_arrivals = []\n self.west_arrivals = []\n self.total_cars = 0\n\n north_south = np.random.poisson(15)/50\n east_west = .5-north_south\n\n for i in range(num_timesteps):\n if i% 10==0:\n north_south = np.random.poisson(15)/50\n east_west = .5-north_south\n\n # Used to determine if a new car is added\n chance_token = random.random() \n\n # North South\n if chance_token <= north_south:\n self.north_arrivals.append(1)\n self.south_arrivals.append(1)\n self.total_cars += 2\n else:\n self.north_arrivals.append(0)\n self.south_arrivals.append(0)\n\n # East West\n if chance_token <= east_west:\n self.east_arrivals.append(1)\n self.west_arrivals.append(1)\n self.total_cars += 2\n else:\n self.east_arrivals.append(0)\n self.west_arrivals.append(0)", "def simulate_system(self, state, input, time_step = 0.01):\n\n #set arm position to x\n self.arm.reset(q=state[0:3],dq=state[3:6])\n\n #apply the control signal\n self.arm.apply_torque(input,time_step)\n\n #get the next step from the arm \n xnext = np.append(np.copy(self.arm.q),np.copy(self.arm.dq))\n\n return xnext", "def simulate_system(self, state, input, time_step = 0.01):\n\n #set arm position to x\n self.arm.reset(q=state[0:3],dq=state[3:6])\n\n #apply the control signal\n self.arm.apply_torque(input,time_step)\n\n #get the next step from the arm \n xnext = np.append(np.copy(self.arm.q),np.copy(self.arm.dq))\n\n return xnext", "def step(self, steps):\n nSteps = abs(steps)\n for s in xrange(0,nSteps):\n if (self.stepperMode==FULL_STEP):\n phase = s%4\n if (steps>0):\n self._fireSignal(self.A0,self.A1, self.fullStepCoilA[phase])\n self._fireSignal(self.B0,self.B1, self.fullStepCoilB[phase])\n else :\n self._fireSignal(self.A0,self.A1, self.fullStepCoilB[phase])\n self._fireSignal(self.B0,self.B1, self.fullStepCoilA[phase])\n sleep(self.delayLength)\n\n elif (self.stepperMode==HALF_STEP):\n phase = s%8\n if (steps>0):\n self._fireSignal(self.A0,self.A1, self.halfStepCoilA[phase])\n self._fireSignal(self.B0,self.B1, self.halfStepCoilB[phase])\n else :\n self._fireSignal(self.A0,self.A1, self.halfStepCoilB[phase])\n self._fireSignal(self.B0,self.B1, self.halfStepCoilA[phase])\n sleep(self.delayLength)", "def turn_steps(self, steps, delay_ms=1):\n if steps < 0:\n direction = -1\n else:\n direction = 1\n for _ in range(abs(int(steps))):\n self.current_step += direction\n element = STEP_ELEMENTS[self.current_step % N_STEP_ELEMENTS ]\n self.set_bits(element)\n time.sleep_ms(delay_ms)", "def run(self):\n\n # initializing random network activity\n s_rand_T = np.zeros((self.T, self.N_rand))\n p_rand_T = np.zeros((self.T, self.N_rand))\n r_rand_T = np.zeros((self.T, self.N_rand))\n\n s_rand_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_rand))\n\n # initializing sensory networks\n s_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n p_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n r_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n s_sens_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_sensory_nets * self.N_sensory))\n\n # extend input to be T timesteps and only nonzero for 100 ts\n s_ext_T = np.broadcast_to(self.s_ext, (self.T, self.N_sensory * self.N_sensory_nets)).copy()\n # stimulus is presented for 100 ms\n stim_T = int(200/self.rand_net.dt)\n s_ext_T[:100] = 0\n s_ext_T[100+stim_T:] = 0\n # s_ext_T *= 0\n\n for t in range(1, self.T):\n if (t + 1) % 100 == 0:\n print(f'step {t} of {self.T}')\n s_sens_prev = s_sens_T[t - 1]\n s_rand_prev = s_rand_T[t - 1]\n p_rand_prev = p_rand_T[t - 1]\n s_ext = s_ext_T[t - 1]\n step = self.forward(s_ext=s_ext, s_rand_prev=s_rand_prev, s_sens_prev=s_sens_prev, p_rand_prev=p_rand_prev)\n s_sens_T[t] = step['s_sens']\n p_sens_T[t] = step['p_sens']\n r_sens_T[t] = step['r_sens']\n s_rand_T[t] = step['s_rand']\n r_rand_T[t] = step['r_rand']\n p_rand_T[t] = step['p_rand']\n\n p_sens_T = p_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_ext_T = s_ext_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n r_sens_T = r_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_sens_T = s_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n\n return dict(\n n_sensory=self.N_sensory,\n n_rand=self.N_rand,\n mus=self.mus,\n sigma=self.sigma,\n s_ext=s_ext_T,\n s_sens=s_sens_T,\n r_sens=r_sens_T,\n p_sens=p_sens_T,\n s_rand=s_rand_T,\n r_rand=r_rand_T,\n p_rand=p_rand_T\n )" ]
[ "0.61042655", "0.58270293", "0.57595366", "0.5680195", "0.566837", "0.56112635", "0.5599372", "0.55904734", "0.5564814", "0.55495346", "0.55369097", "0.5532785", "0.552632", "0.55011874", "0.5483982", "0.5469438", "0.5464263", "0.54594064", "0.54537576", "0.5439023", "0.5409216", "0.5398548", "0.53962415", "0.53957087", "0.5386075", "0.5384714", "0.5384714", "0.5383385", "0.53704345", "0.53636825" ]
0.5976446
1
Rotate the stepper by this angle (radians unless specified) Positive angles rotate clockwise, negative angles rotate counterclockwise
def rotate_by(self, angle, degrees = False): target = angle * pi / 180 if degrees else angle if self.inv: target = -target if target > 0: n = int(target // self.step_size) + 1 for _ in range(n): self.step_c() else: n = int(-target // self.step_size) + 1 for _ in range(n): self.step_cc() if self.inv: diff = -diff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle", "def rotate(self, direction):\n electro = pygame.mixer.Sound('resources/Electro_Motor.wav')\n electro.set_volume(0.2)\n self.rotation += min(max(direction, -1), 1)\n if self.rotation >= 4:\n self.rotation = 0\n elif self.rotation <= -1:\n self.rotation = 3\n if self.speakers:\n self.speakers.play(electro)\n new_turn = \"r={}\".format(self.rotation)\n self._call_gamelog_callbacks(new_turn)", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)", "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "def rotate(self,angle):\n origin = copy.deepcopy(self._current)\n\n q = [origin.orientation.x,\n origin.orientation.y,\n origin.orientation.z,\n origin.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)\n\n atTarget=False\n\n currentAngle=yaw\n angle=angle+currentAngle\n\n if(angle==currentAngle):\n w=0\n elif(angle>currentAngle):\n w=1\n elif(angle<currentAngle):\n w=-1\n\n move_msg=Twist()\n move_msg.linear.x=0\n move_msg.angular.z=w\n\n\n stop_msg =Twist()\n stop_msg.linear.x=0\n stop_msg.angular.z=0\n\n while(not atTarget and not rospy.is_shutdown()):\n if(currentAngle>=angle):\n atTarget=True\n self._vel_pub.publish(stop_msg)\n print('rotate: stoped')\n else:\n origin = copy.deepcopy(self._current)\n\n q = [origin.orientation.x,\n origin.orientation.y,\n origin.orientation.z,\n origin.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)\n\n currentAngle=yaw\n self._vel_pub.publish(move_msg)\n rospy.sleep(.15)\n print('rotate: moving')\n print('angle: '+str(angle)+'currentAngle: '+str(currentAngle))", "def rotate(self, angle):\n self.call('rotate', angle)", "def rotate(self, clockwise=True):\n\t\tsign = 1 if clockwise else -1\n\t\tangle = self.ROTATION_SPEED * sign\n\t\tself.direction.rotate_ip(angle)", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def rotate_clockwise(self, angle):\r\n angle = degrees_to_radians(angle)\r\n current_angle = atan(self.x / self.y)\r\n angle += current_angle\r\n\r\n length = self.length\r\n self.x = length*sin(angle)\r\n self.y = length*cos(angle)", "def steps_to_angle():\n pass", "def right(self, angle):\r\n self.rotation += angle", "def clockwise_rotate(self, speed):\n\t\tif self._last_dir != 'c': # \"c\" indicates that the last rotation of this wheel was clockwise.\n\t\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\t\tGPIO.output(self._dir_pin_2, GPIO.LOW)\n\t\t\tself._last_dir = 'c'\n\n\t\tself._current_dc_val = speed\n\t\tif self._current_dc_val != self._last_dc_val:\n\t\t\tself._motor_pwm.ChangeDutyCycle(speed) # 0.0 - 100.0\n\t\t\tself._last_dc_val = self._current_dc_val", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self,direction, speed=50):\n if direction == 1: \n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def rotate(self, direction, speed):\n self.motor_A(direction, speed)\n self.motor_B(direction * (-1), speed)", "def rotate(self):\n pass", "def rotate(self,amount):\n self.angle += amount\n if self.drawn == True:\n self.draw()", "def rotate(self, radians):\n self._impl.rotate(radians)", "async def rotate(self, angle: float, duration: float) -> None:\n angle *= self._ratio\n if duration < 0:\n raise ValueError\n if angle == 0:\n if duration > 0:\n await asyncio.sleep(duration)\n return\n if duration == 0 or angle / duration > self._max_speed:\n duration = abs(angle / self._max_speed)\n start = time.perf_counter()\n sequence_count = 0\n if angle > 0:\n plus_minus = 1\n else:\n plus_minus = -1\n # Times 2 because half-step\n steps = 2 * abs(int(float(angle) / 360 * self.STEPS_PER_REV))\n for i in range(steps):\n for pin in range(4):\n current_pin = self._pins[pin]\n if self.SEQUENCE[sequence_count][pin] != 0:\n GPIO.output(current_pin, True)\n else:\n GPIO.output(current_pin, False)\n sequence_count += plus_minus\n # If we reach the end of the sequence start again\n if sequence_count == self.rotation_seq_count:\n sequence_count = 0\n if sequence_count < 0:\n sequence_count = self.rotation_seq_count - 1\n # Wait to match entered duration\n wait = (float(i) / steps * duration) - (time.perf_counter() - start)\n if wait > 0:\n await asyncio.sleep(wait)\n for pin in self._pins:\n GPIO.output(pin, False)", "def rotate90(self):", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate_left_right(self):\n\t\treturn", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def rel_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n self.steps(steps)", "def rotate_to(self, angle, degrees = False):\n\t\ttarget = angle * pi / 180 if degrees else angle\n\n\t\tcurr = self.angle\n\t\tdiff = (target - curr) % (2*pi)\n\t\tif abs(diff - (2*pi)) < diff:\n\t\t\tdiff = diff - (2*pi)\n\t\tself.rotate_by(diff)", "def rotate(self, axis, theta):\n return NotImplemented", "def turn_by(self, dangle, dt):\n # Don't turn too fast\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)\n\n # Keep angle in range [-pi, pi)\n self.angle = normalize_angle(self.angle)" ]
[ "0.7070411", "0.7032392", "0.6987201", "0.6970376", "0.69328016", "0.6915016", "0.6913845", "0.68389475", "0.68369746", "0.682694", "0.6704316", "0.6675216", "0.6641125", "0.66407424", "0.66319656", "0.66140467", "0.65792656", "0.65759706", "0.6568908", "0.6567404", "0.6517435", "0.65064853", "0.64987177", "0.64987177", "0.649342", "0.6484487", "0.64514595", "0.64414734", "0.6430149", "0.6406284" ]
0.730979
0
convert csv into numpy
def csv_2_numpy(file, path=INPUT_PATH, sep=',', type='int8'): file_path = path + file reader = csv.reader(open(file_path, "r"), delimiter=sep) x = list(reader) dataset = numpy.array(x).astype(type) return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(csvfilename):\r\n with open(csvfilename, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n #reader = csv.reader(f, delimiter=';', quotechar=\"'\")\r\n data = list(reader)\r\n # transform data into numpy array\r\n data = np.array(data).astype(float)\r\n return data", "def csvToArray(filename):\n (num_rows, num_cols) = xFileInfo(filename)\n X = numpy.zeros((num_rows, num_cols), dtype=float) #[row_i][col_i] : float\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n for (row_i, row) in enumerate(reader):\n col_i = 0\n for val in row:\n if val: #ignore empty strings (e.g. at end of row)\n X[row_i, col_i] = float(val)\n col_i += 1\n f.close()\n return X", "def load_csv(fn):\n def iter_func():\n with open(fn, 'r') as infile:\n for line in infile:\n line = line.rstrip().split(',')\n for item in line:\n yield float(item)\n load_csv.rowlength = len(line)\n data = np.fromiter(iter_func(), dtype=float)\n data = data.reshape((-1, load_csv.rowlength))\n return data", "def load_file(file_name) -> np.ndarray:\r\n reader = csv.reader(open(file_name, \"r\"), delimiter=',')\r\n x_rdr = list(reader)\r\n return np.array(x_rdr).astype('float')", "def read_csv(path_to_file):\n position = []\n classification = []\n with open(path_to_file, 'r') as csv_file:\n reader = csv.reader(csv_file)\n next(reader, None) # skip the header\n\n for row in reader:\n position.append(np.array([float(row[0]), float(row[1])]))\n classification.append(float(row[2]))\n\n return np.array(position), np.array(classification, dtype='uint8')", "def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data", "def load_metrics(fp):\r\n with open(fp) as csvfile:\r\n read = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\r\n lst = []\r\n for i in read:\r\n new_row = i[0:2] + i[7:-1]\r\n lst.append(new_row)\r\n data = np.array(lst)\r\n return data", "def read_csv(path):\n rows = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file)\n header = reader.next()\n if header[0].isdigit():\n print \"Warning: Discarding header that looks like numbers.\"\n for line in reader:\n rows.append(map(float, line))\n return np.array(rows)", "def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y", "def load_csv(path):\n points = []\n with open(path, 'r') as infile:\n for line in infile:\n line = line.strip().split(',')\n entry = [int(line[0]), int(line[1]), int(line[2]), int(line[3])]\n points.append(entry)\n points = np.array(points)\n return points", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def read(filename):\n records = Parser.__load_csv(filename)\n return np.array(records)", "def load_data_from_csv(f_name):\n data = []\n f = open(f_name, \"r\")\n reader = csv.reader(f,delimiter=\",\")\n for row in reader:\n data.append([float(i) for i in row])\n f.close()\n data = np.array(data)\n x = data[0,:]\n data = data[1:,:].swapaxes(0,1)\n return x, data", "def csvToVec(filename):\n X = csvToArray(filename)\n assert X.shape[0] == 1, 'file %s must have 1 row' % filename\n y = X[0,:]\n return y", "def read_csv():", "def import_data(fndata):\n with open(fndata, 'rb') as f:\n # split lines\n lsdata = [line.split(',') for line in f.read().splitlines()]\n # map to float\n lsdata = [map(float, row) for row in lsdata]\n\n # use numpy array\n arrdata = np.array(lsdata)\n\n return arrdata", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def readData(fname):\n pd = pandas.read_csv(fname)\n return [numpy.array(pd[colname]) for colname in pd.columns[1:]]", "def read_data(path, filename, drop_col=\"index\", dt=\"float32\"):\n\tdata = pd.read_csv(path + filename, sep=\",\", dtype=dt)\n\tdata = data.drop(drop_col, axis=1)\n\treturn data.as_matrix()", "def line_to_data(line, np_array=True, dtype=int):\n if np_array:\n return np.fromstring(line, dtype=dtype, sep=\" \")\n else:\n return [dtype(x) for x in line.split(\" \")]", "def read_data(filepath, d = ','):\n return np.genfromtxt(filepath, delimiter=d, dtype=None)", "def load_csv(filename):\r\n dataset = list()\r\n with open(filename, 'r') as file:\r\n csv_reader = reader(file, delimiter='\\t')\r\n for row in csv_reader:\r\n if not row:\r\n continue\r\n dataset.append([float(i) for i in row])\r\n return dataset", "def readCSVasFloat(filename):\n returnArray = []\n lines = open(filename).readlines()\n for line in lines:\n line = line.strip().split(\",\")\n if len(line) > 0:\n returnArray.append(np.array([np.float32(x) for x in line]))\n\n returnArray = np.array(returnArray)\n return returnArray", "def get_data(filepath):\n with open(filepath, 'r') as f:\n lines = [l.strip().split(',') for l in f.readlines()]\n data_set = [np.array(l, dtype=float) for l in lines]\n return np.array(data_set)", "def csv2npy():\n train_p_reader = csv.reader(open(train_p_path))\n train_u_reader = csv.reader(open(train_u_path))\n\n p_list = []\n u_list = []\n\n for ele in tqdm.tqdm(islice(train_p_reader, 1, None)):\n p_list.append([float(i) for i in ele[1:]])\n for ele in tqdm.tqdm(islice(train_u_reader, 1, None)):\n u_list.append([float(i) for i in ele[1:]])\n\n p_npy = np.array(p_list)\n u_npy = np.array(u_list)\n np.save(\"./processed_data/train/raw/train_p.npy\", p_npy)\n np.save(\"./processed_data/train/raw/train_u.npy\", u_npy)\n print(u_npy[-50:])", "def parse_sas_data_line(line):\n cols = line.split()\n\n ncols = len(cols) \n \n if ncols < 2:\n data = np.array([],dtype=np.float)\n else:\n if ncols > 3:\n ncols = 3\n\n try:\n data = np.array(cols[0:ncols], dtype=np.float)\n except:\n data = np.array([],dtype=np.float)\n \n return data", "def import_data(address):\n try:\n inputcsv = csv.reader(open(address, \"r\"), delimiter=\";\", lineterminator=\"\\n\")\n except IOError:\n print \"File not exists or is unreadable, please check it.\"\n exit(1)\n\n data = list() # all data\n item = list() # each tabular\n count = 0\n subcount = 0\n try:\n for row in inputcsv:\n if count < 2 : # read Time period and number of product\n data.append(int(row[1]))\n else :\n item.append(row[1:])\n subcount +=1 \n if subcount == data[1]:\n data.append(np.array(item, dtype=float))\n item = list()\n subcount = 0\n count += 1\n if (data[1] > 1):\n data.append(np.array(item, dtype=float)) # manage the last tabular\n except:\n print \"File is not well formated, please correct it.\"\n exit(1)\n return data", "def read_to_np(path):\n data = [(int(user), int(item), float(rating))\n for user, item, rating in map(lambda r: r.split(','), read_lines(path, header=False))]\n shape = max(set(t[0] for t in data))+1, max(set(t[1] for t in data))+1 # get data shape (rows, columns)\n ratings = np.zeros(shape)\n for user, item, rating in data: # fill array with data\n ratings[user, item] = rating\n return ratings", "def loadCSV(input_file):", "def load_simulator_data(self, csvfname):\n data = []\n with open(csvfname, 'r') as csvfile:\n data_tmp = list(csv.reader(csvfile, delimiter=','))\n for row in data_tmp:\n x7 = [float(x) for x in row[7].split(':')]\n x8 = [float(x) for x in row[8].split(':')]\n\n data.append(((row[0], row[1], row[2]),\n np.array([float(row[3]), float(row[4]), float(row[5]), float(row[6])] + x7 + x8)))\n\n return data" ]
[ "0.7606892", "0.7327829", "0.727883", "0.7161398", "0.71550566", "0.69989276", "0.69635636", "0.68933666", "0.6836764", "0.6802852", "0.6801808", "0.67944103", "0.6787268", "0.67241", "0.6684425", "0.66639805", "0.6646328", "0.6636542", "0.6630825", "0.658941", "0.6581193", "0.6562403", "0.65378904", "0.653485", "0.65345865", "0.651143", "0.6469379", "0.64666927", "0.6459928", "0.6458817" ]
0.81163687
0
Builds a vocabulary mapping from word to index based on the sentences. Returns vocabulary mapping and inverse vocabulary mapping.
def build_vocab(sentences): # Build vocabulary word_counts = Counter(itertools.chain(*sentences)) # 实际没用到 # Mapping from index to word vocabulary_inv = [x[0] for x in word_counts.most_common()] vocabulary_inv = list(sorted(vocabulary_inv)) # 加入 <UNK> vocabulary_inv.insert(0, '</s>') # Mapping from word to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return [vocabulary, vocabulary_inv]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vocab(self, sentences):\n\t\t# Build the vocab\n\t\tword_counts = collections.Counter(sentences)\n\n\t\t# Mapping from index to word (get the indices of most common words)\n\t\tvocab_inv = [x[0] for x in word_counts.most_common()] # Do we need this?\n\t\tvocab_inv = list(sorted(vocab_inv))\n\n\t\t# Mapping from word to index\n\n\t\tvocab = {x: i for i,x in enumerate(vocab_inv)}\n\n\t\treturn [vocab, vocab_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def word2vec_mapping_func():\n return {\"belonging to\": \"belonging\", \"parked on\": \"parked\", \"growing on\": \"growing\", \"standing on\": \"standing\",\n \"made of\": \"made\", \"attached to\": \"attached\", \"hanging from\": \"hanging\", \"in front of\": \"front\",\n \"lying on\": \"lying\", \"flying in\": \"flying\", \"looking at\": \"looking\", \"on back of\": \"back\",\n \"laying on\": \"laying\", \"walking on\": \"walking\", \"walking in\": \"walking\", \"sitting on\": \"sitting\",\n \"covered in\": \"covered\", \"part of\": \"part\", \"painted on\": \"painted\", \"mounted on\": \"mounted\"}", "def _build_vocab(self, sentences, markers=[]):\n from snorkel.learning.pytorch.rnn.utils import SymbolTable\n\n vocab = Counter()\n for sent in sentences:\n for w in sent:\n vocab[w] += 1\n word_dict = SymbolTable()\n list(map(word_dict.get, vocab))\n list(map(word_dict.get, markers))\n return word_dict", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def build_vocab(sentences, max_num_words):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)).most_common()\n if max_num_words != 0 and max_num_words < len(word_counts):\n word_counts = word_counts[:max_num_words]\n\n # Mapping from index to word\n vocabulary = dict()\n index = 0\n for x in word_counts:\n vocabulary[index] = x[0]\n index += 1\n\n return vocabulary", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def make_idx2word():\n idx2word = {}\n d = train_data.shared['word2idx']\n for word, idx in d.items():\n print(word)\n idx2word[idx] = word\n if config.use_glove_for_unk:\n d2 = train_data.shared['new_word2idx']\n for word, idx in d2.items():\n print(word)\n idx2word[idx+len(d)] = word\n return idx2word", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def get_sentence_to_context_map(sentences):\n # Load the vocab\n en_vocab = get_english_vocab(DATA_DIR,VOCAB_SIZE)\n\n # Allocate the sentences to buckets\n bucketed = {}\n for sentence in sentences:\n bucket_id = get_bucket(en_vocab,sentence)\n bucketed.setdefault(bucket_id,[])\n bucketed[bucket_id].append(sentence)\n\n mapped = {}\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(sess, True, train_dir=TRAIN_DIR)\n model.batch_size = BATCH_SIZE # We decode 64 sentence at a time.\n # Iterate over each bucket\n for bucket_id,sentences in bucketed.iteritems():\n for batch in chunker(sentences,BATCH_SIZE):\n data = []\n # Tokenize each sentence\n for sentence in batch:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)\n expected_output = []\n data.append((token_ids, expected_output))\n # Use the model to obtain contexts for each sentence in the batch\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: data}, bucket_id)\n contexts = model.step_context(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id)\n features = np.hstack(contexts)\n print 'Encoded {0} sentences into {1} dimensional vectors'.format(*features.shape)\n # Now we align sentences with their contexts\n for i,sentence in enumerate(batch):\n mapped[sentence] = features[i,:].tolist()\n return mapped", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n dico['<UNK>'] = 10000000\n word_to_id, id_to_word = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words))\n )\n return dico, word_to_id, id_to_word", "def build_Wordv(word2vec_dict, k):\r\n vocab_size = len(word2vec_dict)\r\n word2id_dict = dict()\r\n W = np.zeros(shape=(vocab_size + 1, k))\r\n W[0] = np.zeros(k)\r\n i = 1\r\n for word in word2vec_dict:\r\n # print type(word), ' | ', word\r\n W[i] = word2vec_dict[word]\r\n # print type(W[i]), \" | \", W[i]\r\n word2id_dict[word] = i\r\n i += 1\r\n return W, word2id_dict", "def index2words(index_sentence, vcb_file):\n\n sentence = ''\n indx_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n indx_dict[int(line[0])] = line[1]\n\n for word in index_sentence:\n\n if word == -1:\n sentence += '_eps_' + ' '\n else:\n sentence += indx_dict[word] + ' '\n return sentence", "def build_idx(vocab):\n word2index = {}\n index2word = {}\n\n word2index['PAD'] = 0\n index2word[0] = 'PAD'\n\n word2index['UNK'] = 1\n index2word[1] = 'UNK'\n\n for i,word in enumerate(vocab):\n word2index[word.lower()] = i+2\n index2word[i+2] = word.lower()\n\n return word2index, index2word", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n\n dico['<PAD>'] = 10000001\n dico['<UNK>'] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3}\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n return dico, word_to_id, id_to_word", "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def inverted_word_index(idx):\n words, idxs = zip(*tokenizer.word_index.items())\n inverted_word_index = dict(zip(idxs, words))\n return inverted_word_index.get(idx)", "def convert_words_to_index(sentences_list, dictionary):\n return [[dictionary[word]\n if word in dictionary else 0\n for word in sentence] for sentence in sentences_list]", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()" ]
[ "0.79638803", "0.7715133", "0.7715133", "0.7715133", "0.76599747", "0.7653059", "0.7552094", "0.74614453", "0.69518715", "0.6917704", "0.68646234", "0.68494624", "0.67772037", "0.6662719", "0.6608852", "0.65526927", "0.65498227", "0.6457259", "0.6451463", "0.6442439", "0.64400077", "0.6429191", "0.6417719", "0.64065063", "0.63711977", "0.634279", "0.63223284", "0.6310788", "0.6237771", "0.6236734" ]
0.7844205
1
Estimate the true signal mean and interpolate bad channels. This function implements the functionality of the `performReference` function as part of the PREP pipeline on mne raw object. Notes This function calls robust_reference first Currently this function only implements the functionality of default settings, i.e., doRobustPost
def perform_reference(self): # Phase 1: Estimate the true signal mean with robust referencing self.robust_reference() if self.noisy_channels["bad_all"]: self.raw.info["bads"] = self.noisy_channels["bad_all"] self.raw.interpolate_bads() self.reference_signal = ( np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) rereferenced_index = [ self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels ] self.EEG = self.remove_reference( self.EEG, self.reference_signal, rereferenced_index ) # Phase 2: Find the bad channels and interpolate self.raw._data = self.EEG * 1e-6 noisy_detector = NoisyChannels(self.raw) noisy_detector.find_all_bads(ransac=self.ransac) # Record Noisy channels and EEG before interpolation self.bad_before_interpolation = noisy_detector.get_bads(verbose=True) self.EEG_before_interpolation = self.EEG.copy() bad_channels = _union(self.bad_before_interpolation, self.unusable_channels) self.raw.info["bads"] = bad_channels self.raw.interpolate_bads() reference_correct = ( np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) self.EEG = self.raw.get_data() * 1e6 self.EEG = self.remove_reference( self.EEG, reference_correct, rereferenced_index ) # reference signal after interpolation self.reference_signal_new = self.reference_signal + reference_correct # MNE Raw object after interpolation self.raw._data = self.EEG * 1e-6 # Still noisy channels after interpolation self.interpolated_channels = bad_channels noisy_detector = NoisyChannels(self.raw) noisy_detector.find_all_bads(ransac=self.ransac) self.still_noisy_channels = noisy_detector.get_bads() self.raw.info["bads"] = self.still_noisy_channels return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal", "def calibrate(raw_data, white_reference, dark_reference):\n # Auto-increment device\n params.device += 1\n\n # Collect the number of wavelengths present\n num_bands = len(white_reference.wavelength_dict)\n den = white_reference.array_data - dark_reference.array_data\n\n # Calibrate using reflectance = (raw data - dark reference) / (white reference - dark reference)\n output_num = []\n for i in range(0, raw_data.lines):\n ans = raw_data.array_data[i,].astype(np.float16) - dark_reference.array_data\n output_num.append(ans)\n num = np.stack(output_num, axis=2)\n output_calibrated = []\n for i in range(0, raw_data.lines):\n ans1 = raw_data.array_data[i,] / den\n output_calibrated.append(ans1)\n\n # Reshape into hyperspectral datacube\n scalibrated = np.stack(output_calibrated, axis=2)\n calibrated_array = np.transpose(scalibrated[0], (1, 0, 2))\n calibrated_array[np.where(calibrated_array < 0)] = 0\n\n # Find array min and max values\n max_pixel = float(np.amax(calibrated_array))\n min_pixel = float(np.amin(calibrated_array))\n\n # Make a new class instance with the calibrated hyperspectral image\n calibrated = Spectral_data(array_data=calibrated_array, max_wavelength=raw_data.max_wavelength,\n min_wavelength=raw_data.min_wavelength, max_value=max_pixel, min_value=min_pixel,\n d_type=raw_data.d_type,\n wavelength_dict=raw_data.wavelength_dict, samples=raw_data.samples,\n lines=raw_data.lines, interleave=raw_data.interleave,\n wavelength_units=raw_data.wavelength_units, array_type=raw_data.array_type,\n pseudo_rgb=None, filename=None, default_bands=raw_data.default_bands)\n\n # Make pseudo-rgb image for the calibrated image\n calibrated.pseudo_rgb = _make_pseudo_rgb(spectral_array=calibrated)\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(calibrated.pseudo_rgb)\n elif params.debug == \"print\":\n print_image(calibrated.pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_calibrated_rgb.png\"))\n\n return calibrated", "def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,\n tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,\n exclude_artifacts=True, checkresults=True, return_raw=False,\n complementary_signal=False, fnout=None, verbose=False):\n\n if type(complementary_signal) != bool:\n raise ValueError(\"Argument complementary_signal must be of type bool\")\n\n # handle error if Raw object passed with file list\n if raw and isinstance(fname_raw, list):\n raise ValueError('List of file names cannot be combined with'\n 'one Raw object')\n\n # handle error if return_raw is requested with file list\n if return_raw and isinstance(fname_raw, list):\n raise ValueError('List of file names cannot be combined return_raw.'\n 'Please pass one file at a time.')\n\n # handle error if Raw object is passed with detrending option\n #TODO include perform_detrending for Raw objects\n if raw and detrending:\n raise ValueError('Please perform detrending on the raw file directly.'\n 'Cannot perform detrending on the raw object')\n\n # Handle combinations of fname_raw and raw object:\n if fname_raw is not None:\n fnraw = get_files_from_list(fname_raw)\n have_input_file = True\n elif raw is not None:\n if 'filename' in raw.info:\n fnraw = [os.path.basename(raw.filenames[0])]\n else:\n fnraw = raw._filenames[0]\n warnings.warn('Setting file name from Raw object')\n have_input_file = False\n if fnout is None and not return_raw:\n raise ValueError('Refusing to waste resources without result')\n else:\n raise ValueError('Refusing Creatio ex nihilo')\n\n # loop across all filenames\n for fname in fnraw:\n\n if verbose:\n print(\"########## Read raw data:\")\n\n tc0 = time.perf_counter()\n tw0 = time.time()\n\n if raw is None:\n if detrending:\n raw = perform_detrending(fname, save=False)\n else:\n raw = mne.io.Raw(fname, preload=True)\n else:\n # perform sanity check to make sure Raw object and file are same\n if 'filename' in raw.info:\n fnintern = [os.path.basename(raw.filenames[0])]\n else:\n fnintern = raw._filenames[0]\n if os.path.basename(fname) != os.path.basename(fnintern):\n warnings.warn('The file name within the Raw object and provided\\n '\n 'fname are not the same. Please check again.')\n\n tc1 = time.perf_counter()\n tw1 = time.time()\n\n if verbose:\n print(\">>> loading raw data took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tc0), (tw1 - tw0)))\n\n # Time window selection\n # weights are calc'd based on [tmin,tmax], but applied to the entire data set.\n # tstep is used in artifact detection\n # tmin,tmax variables must not be changed here!\n if tmin is None:\n itmin = 0\n else:\n itmin = int(floor(tmin * raw.info['sfreq']))\n if tmax is None:\n itmax = raw.last_samp - raw.first_samp\n else:\n itmax = int(ceil(tmax * raw.info['sfreq']))\n\n if itmax - itmin < 2:\n raise ValueError(\"Time-window for noise compensation empty or too short\")\n\n if verbose:\n print(\">>> Set time-range to [%7.3f,%7.3f]\" % \\\n (raw.times[itmin], raw.times[itmax]))\n\n if signals is None or len(signals) == 0:\n sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,\n eog=False, exclude='bads')\n else:\n sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,\n raw.info.get('bads'))\n nsig = len(sigpick)\n if nsig == 0:\n raise ValueError(\"No channel selected for noise compensation\")\n\n if noiseref is None or len(noiseref) == 0:\n # References are not limited to 4D ref-chans, but can be anything,\n # incl. ECG or powerline monitor.\n if verbose:\n print(\">>> Using all refchans.\")\n refexclude = \"bads\"\n refpick = mne.pick_types(raw.info, ref_meg=True, meg=False,\n eeg=False, stim=False,\n eog=False, exclude='bads')\n else:\n refpick = channel_indices_from_list(raw.info['ch_names'][:],\n noiseref, raw.info.get('bads'))\n nref = len(refpick)\n if nref == 0:\n raise ValueError(\"No channel selected as noise reference\")\n\n if verbose:\n print(\">>> sigpick: %3d chans, refpick: %3d chans\" % (nsig, nref))\n badpick = np.intersect1d(sigpick, refpick, assume_unique=False)\n if len(badpick) > 0:\n raise Warning(\"Intersection of signal and reference channels not empty\")\n\n if reflp is None and refhp is None and refnotch is None:\n use_reffilter = False\n use_refantinotch = False\n else:\n use_reffilter = True\n if verbose:\n print(\"########## Filter reference channels:\")\n\n use_refantinotch = False\n if refnotch is not None:\n if reflp is not None or reflp is not None:\n raise ValueError(\"Cannot specify notch- and high-/low-pass\"\n \"reference filter together\")\n nyquist = (0.5 * raw.info['sfreq'])\n if isinstance(refnotch, list):\n notchfrqs = refnotch\n else:\n notchfrqs = [ refnotch ]\n notchfrqscln = []\n for nfrq in notchfrqs:\n if not isinstance(nfrq,float) and not isinstance(nfrq,int):\n raise ValueError(\"Illegal entry for notch-frequency (\",nfrq,\")\")\n if nfrq >= nyquist:\n warnings.warn('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)\n else:\n notchfrqscln.append(nfrq)\n if len(notchfrqscln) == 0:\n raise ValueError(\"Notch frequency list is (now) empty\")\n use_refantinotch = True\n if verbose:\n print(\">>> notches at freq \", notchfrqscln)\n else:\n if verbose:\n if reflp is not None:\n print(\">>> low-pass with cutoff-freq %.1f\" % reflp)\n if refhp is not None:\n print(\">>> high-pass with cutoff-freq %.1f\" % refhp)\n\n # Adapt followg drop-chans cmd to use 'all-but-refpick'\n droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]\n tct = time.perf_counter()\n twt = time.time()\n fltref = raw.copy().drop_channels(droplist)\n if use_refantinotch:\n rawref = raw.copy().drop_channels(droplist)\n fltref.notch_filter(notchfrqscln, fir_design='firwin', fir_window='hann', \\\n picks=np.array(range(nref)), method='fir')\n fltref._data = (rawref._data - fltref._data)\n else:\n fltref.filter(refhp, reflp, fir_design='firwin', fir_window='hann', \\\n picks=np.array(range(nref)), method='fir')\n tc1 = time.perf_counter()\n tw1 = time.time()\n if verbose:\n print(\">>> filtering ref-chans took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tct), (tw1 - twt)))\n\n if verbose:\n print(\"########## Calculating sig-ref/ref-ref-channel covariances:\")\n # Calculate sig-ref/ref-ref-channel covariance:\n # (there is no need to calc inter-signal-chan cov,\n # but there seems to be no appropriat fct available)\n # Here we copy the idea from compute_raw_data_covariance()\n # and truncate it as appropriate.\n tct = time.perf_counter()\n twt = time.time()\n # The following reject and infosig entries are only\n # used in _is_good-calls.\n # _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to\n # ignore ref-channels (not covered by dict) and checks individual\n # data segments - artifacts across a buffer boundary are not found.\n reject = dict(grad=4000e-13, # T / m (gradiometers)\n mag=4e-12, # T (magnetometers)\n eeg=40e-6, # uV (EEG channels)\n eog=250e-6) # uV (EOG channels)\n\n infosig = copy.copy(raw.info)\n infosig['chs'] = [raw.info['chs'][k] for k in sigpick]\n # the below fields are updated automatically when 'chs' is updated\n # infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]\n # infosig['nchan'] = len(sigpick)\n idx_by_typesig = channel_indices_by_type(infosig)\n\n # Read data in chunks:\n tstep = 0.2\n itstep = int(ceil(tstep * raw.info['sfreq']))\n sigmean = 0\n refmean = 0\n sscovdata = 0\n srcovdata = 0\n rrcovdata = 0\n n_samples = 0\n\n for first in range(itmin, itmax, itstep):\n last = first + itstep\n if last >= itmax:\n last = itmax\n raw_segmentsig, times = raw[sigpick, first:last]\n if use_reffilter:\n raw_segmentref, times = fltref[:, first:last]\n else:\n raw_segmentref, times = raw[refpick, first:last]\n\n if not exclude_artifacts or \\\n _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,\n ignore_chs=raw.info['bads']):\n sigmean += raw_segmentsig.sum(axis=1)\n refmean += raw_segmentref.sum(axis=1)\n sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)\n srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)\n rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)\n n_samples += raw_segmentsig.shape[1]\n else:\n logger.info(\"Artefact detected in [%d, %d]\" % (first, last))\n if n_samples <= 1:\n raise ValueError('Too few samples to calculate weights')\n sigmean /= n_samples\n refmean /= n_samples\n sscovdata -= n_samples * sigmean[:] * sigmean[:]\n sscovdata /= (n_samples - 1)\n srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]\n srcovdata /= (n_samples - 1)\n rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]\n rrcovdata /= (n_samples - 1)\n sscovinit = np.copy(sscovdata)\n if verbose:\n print(\">>> Normalize srcov...\")\n\n rrslope = copy.copy(rrcovdata)\n for iref in range(nref):\n dtmp = rrcovdata[iref, iref]\n if dtmp > TINY:\n srcovdata[:, iref] /= dtmp\n rrslope[:, iref] /= dtmp\n else:\n srcovdata[:, iref] = 0.\n rrslope[:, iref] = 0.\n\n if verbose:\n print(\">>> Number of samples used : %d\" % n_samples)\n tc1 = time.perf_counter()\n tw1 = time.time()\n print(\">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tct), (tw1 - twt)))\n\n if checkresults:\n if verbose:\n print(\"########## Calculated initial signal channel covariance:\")\n # Calculate initial signal channel covariance:\n # (only used as quality measure)\n print(\">>> initl rt(avg sig pwr) = %12.5e\" % np.sqrt(np.mean(sscovdata)))\n for i in range(min(5,nsig)):\n print(\">>> initl signal-rms[%3d] = %12.5e\" % (i, np.sqrt(sscovdata.flatten()[i])))\n print(\">>>\")\n\n U, s, V = np.linalg.svd(rrslope, full_matrices=True)\n if verbose:\n print(\">>> singular values:\")\n print(s)\n print(\">>> Applying cutoff for smallest SVs:\")\n\n dtmp = s.max() * SVD_RELCUTOFF\n s *= (abs(s) >= dtmp)\n sinv = [1. / s[k] if s[k] != 0. else 0. for k in range(nref)]\n if verbose:\n print(\">>> singular values (after cutoff):\")\n print(s)\n\n stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))\n if verbose:\n print(\">>> Testing svd-result: %s\" % stat)\n if not stat:\n print(\" (Maybe due to SV-cutoff?)\")\n\n # Solve for inverse coefficients:\n # Set RRinv.tr=U diag(sinv) V\n RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))\n if checkresults:\n stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))\n if stat:\n if verbose:\n print(\">>> Testing RRinv-result (should be unit-matrix): ok\")\n else:\n print(\">>> Testing RRinv-result (should be unit-matrix): failed\")\n print(np.transpose(np.dot(RRinv, rrslope)))\n print(\">>>\")\n\n if verbose:\n print(\"########## Calc weight matrix...\")\n\n # weights-matrix will be somewhat larger than necessary,\n # (to simplify indexing in compensation loop):\n weights = np.zeros((raw._data.shape[0], nref))\n for isig in range(nsig):\n for iref in range(nref):\n weights[sigpick[isig],iref] = np.dot(srcovdata[isig,:], RRinv[:,iref])\n\n if verbose:\n print(\"########## Compensating signal channels:\")\n if complementary_signal:\n print(\">>> Caveat: REPLACING signal by compensation signal\")\n\n tct = time.perf_counter()\n twt = time.time()\n\n # Work on entire data stream:\n for isl in range(raw._data.shape[1]):\n slice = np.take(raw._data, [isl], axis=1)\n if use_reffilter:\n refslice = np.take(fltref._data, [isl], axis=1)\n refarr = refslice[:].flatten() - refmean\n # refarr = fltres[:,isl]-refmean\n else:\n refarr = slice[refpick].flatten() - refmean\n subrefarr = np.dot(weights[:], refarr)\n\n if not complementary_signal:\n raw._data[:, isl] -= subrefarr\n else:\n raw._data[:, isl] = subrefarr\n\n if (isl % 10000 == 0) and verbose:\n print(\"\\rProcessed slice %6d\" % isl)\n\n if verbose:\n print(\"\\nDone.\")\n tc1 = time.perf_counter()\n tw1 = time.time()\n print(\">>> compensation loop took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tct), (tw1 - twt)))\n\n if checkresults:\n if verbose:\n print(\"########## Calculating final signal channel covariance:\")\n # Calculate final signal channel covariance:\n # (only used as quality measure)\n tct = time.perf_counter()\n twt = time.time()\n sigmean = 0\n sscovdata = 0\n n_samples = 0\n for first in range(itmin, itmax, itstep):\n last = first + itstep\n if last >= itmax:\n last = itmax\n raw_segmentsig, times = raw[sigpick, first:last]\n # Artifacts found here will probably differ from pre-noisered artifacts!\n if not exclude_artifacts or \\\n _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,\n flat=None, ignore_chs=raw.info['bads']):\n sigmean += raw_segmentsig.sum(axis=1)\n sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)\n n_samples += raw_segmentsig.shape[1]\n if n_samples <= 1:\n raise ValueError('Too few samples to calculate final signal channel covariance')\n sigmean /= n_samples\n sscovdata -= n_samples * sigmean[:] * sigmean[:]\n sscovdata /= (n_samples - 1)\n if verbose:\n print(\">>> no channel got worse: \", np.all(np.less_equal(sscovdata, sscovinit)))\n print(\">>> final rt(avg sig pwr) = %12.5e\" % np.sqrt(np.mean(sscovdata)))\n for i in range(min(5,nsig)):\n print(\">>> final signal-rms[%3d] = %12.5e\" % (i, np.sqrt(sscovdata.flatten()[i])))\n tc1 = time.perf_counter()\n tw1 = time.time()\n print(\">>> signal covar-calc took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tct), (tw1 - twt)))\n print(\">>>\")\n\n if fnout is not None:\n fnoutloc = fnout\n elif return_raw:\n fnoutloc = None\n elif have_input_file:\n fnoutloc = fname[:fname.rfind('-raw.fif')] + ',nr-raw.fif'\n else:\n fnoutloc = None\n\n if fnoutloc is not None:\n if verbose:\n print(\">>> Saving '%s'...\" % fnoutloc)\n raw.save(fnoutloc, overwrite=True)\n\n tc1 = time.perf_counter()\n tw1 = time.time()\n if verbose:\n print(\">>> Total run took %.1f ms (%.2f s walltime)\" % (1000. * (tc1 - tc0), (tw1 - tw0)))\n\n if return_raw:\n if verbose:\n print(\">>> Returning raw object...\")\n return raw", "def _fitgeometry_refband(ellipsefit, geometry0, majoraxis, refband='r', verbose=False,\n integrmode='median', sclip=3, nclip=2):\n smamax = majoraxis # inner, outer radius\n #smamax = 1.5*majoraxis\n smamin = ellipsefit['psfsize_{}'.format(refband)] / ellipsefit['refpixscale']\n\n if smamin > majoraxis:\n print('Warning! this galaxy is smaller than three times the seeing FWHM!')\n \n t0 = time.time()\n print('Finding the mean geometry using the reference {}-band image...'.format(refband), end='')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n factor = np.arange(1.0, 6, 0.5) # (1, 2, 3, 3.5, 4, 4.5, 5, 10)\n for ii, fac in enumerate(factor): # try a few different starting sma0\n sma0 = smamin*fac\n try:\n iso0 = ellipse0.fit_image(sma0, integrmode=integrmode, sclip=sclip, nclip=nclip)\n except:\n iso0 = []\n sma0 = smamin\n if len(iso0) > 0:\n break\n print('...took {:.3f} sec'.format(time.time()-t0))\n\n if len(iso0) == 0:\n print('Initial ellipse-fitting failed.')\n else:\n # Try to determine the mean fitted geometry, for diagnostic purposes,\n # masking out outliers and the inner part of the galaxy where seeing\n # dominates.\n good = (iso0.sma > smamin) * (iso0.stop_code <= 4)\n #good = ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.sma > smamin) * (iso0.stop_code <= 4) * ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.sma > 3 * ellipsefit['psfsigma_{}'.format(refband)]) * ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.stop_code < 4) * ~sigma_clip(iso0.pa, sigma=3).mask\n\n ngood = np.sum(good)\n if ngood == 0:\n print('Too few good measurements to get ellipse geometry!')\n else:\n ellipsefit['success'] = True\n ellipsefit['init_smamin'] = iso0.sma[good].min()\n ellipsefit['init_smamax'] = iso0.sma[good].max()\n\n ellipsefit['x0_median'] = np.mean(iso0.x0[good])\n ellipsefit['y0_median'] = np.mean(iso0.y0[good])\n ellipsefit['x0_err'] = np.std(iso0.x0[good]) / np.sqrt(ngood)\n ellipsefit['y0_err'] = np.std(iso0.y0[good]) / np.sqrt(ngood)\n\n ellipsefit['pa_moment'] = (np.degrees(np.mean(iso0.pa[good]))+90) % 180\n ellipsefit['pa_moment_err'] = np.degrees(np.std(iso0.pa[good])) / np.sqrt(ngood)\n ellipsefit['eps_moment'] = np.mean(iso0.eps[good])\n ellipsefit['eps_moment_err'] = np.std(iso0.eps[good]) / np.sqrt(ngood)\n\n if verbose:\n print(' x0 = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['x0_median'], ellipsefit['x0_err'], ellipsefit['x0_moment']))\n print(' y0 = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['y0_median'], ellipsefit['y0_err'], ellipsefit['y0_moment']))\n print(' PA = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['pa_moment'], ellipsefit['pa_moment_err'], np.degrees(geometry0.pa)+90))\n print(' eps = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['eps_moment'], ellipsefit['eps_moment_err'], geometry0.eps))\n\n return ellipsefit", "def refere(eeg, channels, mode='contralateral'):\n\tbipolar_map = {'Fp1':'Fp2', 'Fp2':'Fp2', 'F3':'F4', 'F4':'F4', 'C3':'C4', 'C4':'C4', 'T3':'T4', 'T4':'T4', 'P3':'P4', 'P4':'P4', 'O1':'O2', 'O2':'O2'}\n\tif mode not in ['monopolar', 'contralateral', 'bipolar', 'linked', 'average']:\n\t\tprint 'WARNING - refere(): parameter \"mode\" can only be \"monopolar\", \"contralateral\", \"bipolar\" or \"linked\". Using \"contralateral\"!'\n\t\tmode = 'contralateral'\n\tif mode == 'linked':\t\t\n\t\treference = (eeg[:,channels.index('A1')] + eeg[:,channels.index('A2')])/2.\n\tif mode == 'average':\n\t\treference = np.zeros(len(eeg), dtype=np.float32)\n\t\tchcounter = 0\n\t\tfor channel in range(len(channels)):\n\t\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t\treference += eeg[:, channel]\n\t\t\t\tchcounter += 1\n\t\treference /= chcounter\n\tfor channel in range(len(channels)):\n\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t# mindenkit referalunk kiveve magukat a referencia csatornakat\n\t\t\tif mode == 'contralateral':\n\t\t\t\tif (channels[channel] in ['Fp2', 'F4', 'C4', 'T4', 'P4', 'O2']):\n\t\t\t\t\tref_channel = channels.index('A1')\n\t\t\t\telif (channels[channel] in ['Fp1', 'F3', 'C3', 'T3', 'P3', 'O1']):\n\t\t\t\t\tref_channel = channels.index('A2')\n\t\t\t\telse:\n\t\t\t\t\tprint \"Error: what kind of channel is this: \", channels[channel], \" cannot reference!!!!\"\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\tif mode == 'bipolar':\n\t\t\t\tref_channel = channels.index(bipolar_map[channels[channel]])\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\teeg[:, channel] -= reference", "def reref_data(self, data):\n if self._ref_channels is not None or self._channels_to_ref is not None:\n if self._ref_channels is None: # Re-reference to global average.\n self._ref_channels = [range(data.shape[1])]\n if self._channels_to_ref is None: # Re-reference all channels.\n self._channels_to_ref = [range(data.shape[1])]\n d = np.copy(data) # create copy to avoid using re-referenced data\n for ref, chans in zip(self._ref_channels, self._channels_to_ref):\n data[:, list(chans)] -= np.mean(d[:, list(ref)], axis=1, keepdims=True)\n return data", "def incumbent(self):\n return self.boundary_handler.repair(self.mean)", "def run(self):\n old_sampling = rospy.Time(0)\n while not rospy.is_shutdown():\n self.mutex.acquire()\n reference_received = all(self.reference_flags.values())\n if reference_received:\n if not self.ready:\n # first value of ni_ref\n self.ni_ref.last_value[0:3] = self.eta1_ref_body.dot + self.speed_ref\n self.ni_ref.last_value[3:6] = self.controller.vehicle.ned2body_angular(self.eta2_ref.dot, self.eta2)\n self.ni_ref.last_sampling = rospy.Time.now()\n\n # error\n old_sampling = rospy.Time.now()\n\n # Node is ready to call controller\n self.ready = True\n else:\n # Set ni_ref\n self.ni_ref.value[0:3] = self.eta1_ref_body.dot + self.speed_ref\n self.ni_ref.value[3:6] = self.controller.vehicle.ned2body_angular(self.eta2_ref.dot, self.eta2)\n dt = rospy.Time.now() - self.ni_ref.last_sampling\n\n # compute derivative of ni_ref\n self.ni_ref.dot = (self.ni_ref.value - self.ni_ref.last_value) / dt.to_sec()\n self.ni_ref.last_value = deepcopy(self.ni_ref.value)\n self.ni_ref.last_sampling = rospy.Time.now()\n\n # Set PI of controller with error value\n self.ni_tilde = self.ni - self.ni_ref.value\n dt = rospy.Time.now() - old_sampling\n self.controller.PI.update(self.ni_tilde, dt.to_sec())\n\n # compute tau with eta2, ni and ni_ref_dot\n tau = self.controller.control_law(self.eta2, self.ni, self.ni_ref.dot)\n\n # publish messages\n self.publish(tau)\n self.tester(tau)\n\n self.mutex.release()\n self.node_loop.sleep()", "def applyNormalization(ds, reference, target=-1):\n print 'normalization of', ds.title\n # Store reference name for later\n refname = str(reference)\n # Normalization\n reference = getattr(ds,reference)\n\n # check if reference/target is a number\n # TODO: gumpy doesn't allow us to handle a scalar with variance\n # for multiplying arrays, so we can't propagate variance at present\n numericReference = isinstance(reference, (int, long, float))\n \n # check arguments\n if not numericReference:\n if reference.ndim != 1:\n raise AttributeError('reference.ndim != 1')\n if reference.shape[0] != ds.shape[0]:\n raise AttributeError('reference.shape[0] != ds.shape[0] (%d != %d)' % (reference.shape[0],ds.shape[0]))\n\n def do_norm(rs, f, varf):\n # We propagate errors in the data, but not in\n # the ancillary values\n print 'In do_norm, given %f(%f)' % (f,varf)\n # Funny syntax below to make sure we write into the original area,\n # not assign a new value\n rs.var *= f * f\n rs.var += varf * rs * rs\n rs.storage *= f\n try: #These may be absent in some cases\n rs.bm1_counts *= f\n rs.bm2_counts *= f\n rs.bm3_counts *= f\n rs.detector_time *= f\n rs.total_counts *= f\n except AttributeError:\n pass\n \n # normalization\n rs = ds.__copy__()\n copy_metadata_deep(rs,ds) #NeXuS metadata\n rs.copy_cif_metadata(ds) #CIF metadata\n if numericReference and target > 0:\n # We have a single number to refer to for normalisation, so\n # we are effectively scaling everything by a single number\n scale_factor = float(target)/reference\n variance = scale_factor * target/(reference*reference)\n do_norm(rs, scale_factor, variance)\n info_string = \"Data multiplied by %f with variance %f\" % (scale_factor,variance)\n elif not numericReference:\n # Each step has a different value, and we manually perform the\n # error propagation \n reference = Data(reference)\n if target <= 0:\n target = reference.max()\n for i in xrange(rs.shape[0]):\n # handle unexpected zero values\n one_reference = reference[i]\n if one_reference == 0:\n one_reference = 0.1 #so small it is like zero\n print \"Warning: zero monitor counts found at step %d\" % i\n f = float(target)/one_reference\n v = f*target/(one_reference*one_reference)\n # Funny syntax below to make sure we write into the original area,\n # not assign a new value\n tar_shape = [1,rs.shape[1],rs.shape[2]]\n tar_origin = [i,0,0]\n rss = rs.storage.get_section(tar_origin,tar_shape).get_reduced()\n rsv = rs.var.get_section(tar_origin,tar_shape).get_reduced()\n rs.var[i] = rsv*f * f\n rs.var[i] += v * rss * rss\n rs.storage[i] = rs.storage[i]*f\n info_string = \"Data normalised to %f on %s with error propagation assuming counting statistics\" % (float(target),refname)\n else:\n # interesting note - if we get here, we are passed a single reference number\n # and a negative target, meaning that we use the reference as the target and\n # end up multiplying by 1.0, so no need to do anything at all.\n target = reference\n info_string = \"No normalisation applied to data.\"\n rs.add_metadata('_pd_proc_info_data_reduction',info_string, append=True)\n print 'normalized:', ds.title\n return rs,target", "def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n #See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note\n #that we're using some different normalisations for the operators\n #so there are some extra factors of m_x and m_p lurking around...\n \n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n #Proton and neutron g-factors\n gp = 5.59\n gn = -3.83\n \n #Bohr Magneton\n #Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))\n #muB = 5.7883818e-5*eV/Tesla # Bohr magneton\n mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))\n\n cp = [E*0.0 for i in range(11)]\n cn = [E*0.0 for i in range(11)]\n \n #Operator 1\n cp[0] = e*(mu_x*mu_B)/(2.0*m_x)\n \n #Operator 5\n cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n \n #Operator 4\n cp[3] = gp*e*(mu_x*mu_B)/m_p\n cn[3] = gn*e*(mu_x*mu_B)/m_p\n \n #Operator 6\n cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n\n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)", "def __call__(self,raw):\n\n #replace any \"out of range\" values in T_Arrays by NaN's\n self.cleanup(raw)\n\n #for those variables that are best represented as sums,\n #multiply by ntimes_ave to compensate for pre averaging\n \n #if hasattr(raw,'seeded_shots'):\n # raw.seeded_shots*=self.ntime_ave\n #if hasattr(raw,'shot_count'):\n # raw.shot_count*=self.ntime_ave\n\n if self.post_operator:\n self.post_operator(raw)\n return raw", "def bipolar_reference(raw, dist_thresh=0.01, verbose=True):\n raw.load_data()\n ch_names = [name.replace(' ', '') for name in raw.ch_names] # no spaces\n bipolar_names = list()\n locs = list()\n data = list()\n for i, ch in enumerate(ch_names):\n elec_name = ''.join([letter for letter in ch if\n not letter.isdigit()]).rstrip()\n number = ''.join([letter for letter in ch if\n letter.isdigit()]).rstrip()\n pair = f'{elec_name}{int(number) + 1}'\n if pair not in ch_names:\n continue\n j = ch_names.index(pair)\n loc = raw.info['chs'][i]['loc'][:3]\n loc2 = raw.info['chs'][j]['loc'][:3]\n if np.linalg.norm(loc - loc2) > dist_thresh:\n continue\n data.append(raw._data[i] - raw._data[j])\n locs.append((loc + loc2) / 2)\n bipolar_names.append(f'{ch}-{pair}')\n if verbose:\n print(f'Bipolar referencing {ch} and {pair}')\n bipolar_info = mne.create_info(bipolar_names, raw.info['sfreq'], 'seeg')\n for loc, ch in zip(locs, bipolar_info['chs']):\n ch['loc'][:3] = loc\n return mne.io.RawArray(np.array(data), bipolar_info, raw.first_samp)", "def control(self, state, reference):\n\n self.ref[-1] = reference[self.ref_idx] # Set the reference\n\n epsilon_d = state[self.eps_idx] * self.limit[self.eps_idx] + self.dead_time * self.tau * state[self.omega_idx] * \\\n self.limit[self.omega_idx] * self.mp['p'] # Calculate delta epsilon\n\n # Iterate through high-level controller\n if self.omega_control:\n for i in range(len(self.overlaid_controller) + 1, 1, -1):\n # Calculate reference\n self.ref[i] = self.overlaid_controller[i-2].control(state[self.ref_state_idx[i + 1]], self.ref[i + 1])\n\n # Check limits and integrate\n if (0.85 * self.state_space.low[self.ref_state_idx[i]] <= self.ref[i] <= 0.85 *\n self.state_space.high[self.ref_state_idx[i]]) and self.overlaid_type[i - 2]:\n self.overlaid_controller[i - 2].integrate(state[self.ref_state_idx[i + 1]], self.ref[i + 1])\n else:\n self.ref[i] = np.clip(self.ref[i], self.nominal_values[self.ref_state_idx[i]] / self.limit[\n self.ref_state_idx[i]] * self.state_space.low[self.ref_state_idx[i]],\n self.nominal_values[self.ref_state_idx[i]] / self.limit[\n self.ref_state_idx[i]] * self.state_space.high[self.ref_state_idx[i]])\n\n # Calculate reference values for i_d and i_q\n if self.torque_control:\n torque = self.ref[2] * self.limit[self.torque_idx]\n self.ref[0], self.ref[1] = self.torque_controller.control(state, torque)\n\n # Calculate action for continuous action space\n if self.has_cont_action_space:\n\n # Decouple the two current components\n if self.decoupling:\n self.u_sd_0 = -state[self.omega_idx] * self.mp['p'] * self.mp['l_q'] * state[self.i_sq_idx]\\\n * self.limit[self.i_sq_idx] / self.limit[self.u_sd_idx] * self.limit[self.omega_idx]\n self.u_sq_0 = state[self.omega_idx] * self.mp['p'] * (\n state[self.i_sd_idx] * self.mp['l_d'] * self.limit[self.u_sd_idx] + self.psi_p) / self.limit[\n self.u_sq_idx] * self.limit[self.omega_idx]\n\n # Calculate action for u_sd\n if self.torque_control:\n u_sd = self.d_controller.control(state[self.i_sd_idx], self.ref[1]) + self.u_sd_0\n else:\n u_sd = self.d_controller.control(state[self.i_sd_idx], reference[self.ref_d_idx]) + self.u_sd_0\n\n # Calculate action for u_sq\n u_sq = self.q_controller.control(state[self.i_sq_idx], self.ref[0]) + self.u_sq_0\n\n # Shifting the reference potential\n action_temp = self.backward_transformation((u_sd, u_sq), epsilon_d)\n action_temp = action_temp - 0.5 * (max(action_temp) + min(action_temp))\n\n # Check limit and integrate\n action = np.clip(action_temp, self.action_space.low[0], self.action_space.high[0])\n if (action == action_temp).all():\n if self.torque_control:\n self.d_controller.integrate(state[self.i_sd_idx], self.ref[1])\n else:\n self.d_controller.integrate(state[self.i_sd_idx], reference[self.ref_d_idx])\n self.q_controller.integrate(state[self.i_sq_idx], self.ref[0])\n\n # Calculate action for discrete action space\n else:\n ref = self.ref[1] if self.torque_control else reference[self.ref_d_idx]\n ref_abc = self.backward_transformation((ref, self.ref[0]), epsilon_d)\n action = 0\n for i in range(3):\n action += (2 ** (2 - i)) * self.abc_controller[i].control(state[self.i_abc_idx[i]], ref_abc[i])\n\n # Plot overlaid reference values\n plot(external_reference_plots=self.external_ref_plots, state_names=self.state_names, external_data=self.get_plot_data(),\n visualization=True)\n\n return action", "def _run(example_file_name, use_shortwave, num_examples,\n choose_max_heating_rate, max_noise_k_day01, pressure_cutoffs_pa,\n pressure_spacings_pa, first_interp_method_name,\n second_interp_method_name, interp_fluxes, output_dir_name):\n\n if interp_fluxes:\n max_noise_k_day01 = 0.\n\n error_checking.assert_is_greater(num_examples, 0)\n error_checking.assert_is_geq(max_noise_k_day01, 0.)\n\n error_checking.assert_is_geq_numpy_array(pressure_cutoffs_pa, 0.)\n error_checking.assert_is_greater_numpy_array(\n numpy.diff(pressure_cutoffs_pa), 0.\n )\n error_checking.assert_is_greater_numpy_array(pressure_spacings_pa, 0.)\n\n num_spacings = len(pressure_spacings_pa)\n expected_dim = numpy.array([num_spacings + 1], dtype=int)\n error_checking.assert_is_numpy_array(\n pressure_cutoffs_pa, exact_dimensions=expected_dim\n )\n\n high_res_pressures_pa = numpy.array([], dtype=float)\n\n for i in range(num_spacings):\n this_num_pressures = int(numpy.ceil(\n 1 + (pressure_cutoffs_pa[i + 1] - pressure_cutoffs_pa[i]) /\n pressure_spacings_pa[i]\n ))\n these_pressures_pa = numpy.linspace(\n pressure_cutoffs_pa[i], pressure_cutoffs_pa[i + 1],\n num=this_num_pressures, dtype=float\n )\n\n if i != num_spacings - 1:\n these_pressures_pa = these_pressures_pa[:-1]\n\n high_res_pressures_pa = numpy.concatenate((\n high_res_pressures_pa, these_pressures_pa\n ))\n\n print('Number of levels in high-resolution grid = {0:d}'.format(\n len(high_res_pressures_pa)\n ))\n\n if high_res_pressures_pa[0] < TOLERANCE:\n high_res_pressures_pa[0] = 0.5 * high_res_pressures_pa[1]\n\n high_res_pressures_pa = high_res_pressures_pa[::-1]\n high_res_heights_m_asl = standard_atmo.pressure_to_height(\n high_res_pressures_pa\n )\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name\n )\n\n print('Reading data from: \"{0:s}\"...'.format(example_file_name))\n example_dict = example_io.read_file(example_file_name)\n\n heating_rate_matrix_k_day01 = example_utils.get_field_from_dict(\n example_dict=example_dict,\n field_name=\n example_utils.SHORTWAVE_HEATING_RATE_NAME if use_shortwave\n else example_utils.LONGWAVE_HEATING_RATE_NAME\n )\n\n if choose_max_heating_rate:\n hr_criterion_by_example = numpy.max(heating_rate_matrix_k_day01, axis=1)\n else:\n abs_diff_matrix = numpy.absolute(\n numpy.diff(heating_rate_matrix_k_day01[:, :-1], axis=1)\n )\n hr_criterion_by_example = numpy.max(abs_diff_matrix, axis=1)\n\n good_indices = numpy.argsort(-1 * hr_criterion_by_example)\n good_indices = good_indices[:num_examples]\n example_dict = example_utils.subset_by_index(\n example_dict=example_dict, desired_indices=good_indices\n )\n\n num_examples = len(good_indices)\n max_differences_k_day01 = numpy.full(num_examples, numpy.nan)\n\n for i in range(num_examples):\n max_differences_k_day01[i] = _run_experiment_one_example(\n example_dict=example_dict, example_index=i,\n max_noise_k_day01=max_noise_k_day01,\n high_res_pressures_pa=high_res_pressures_pa,\n high_res_heights_m_asl=high_res_heights_m_asl,\n first_interp_method_name=first_interp_method_name,\n second_interp_method_name=second_interp_method_name,\n interp_fluxes=interp_fluxes, output_dir_name=output_dir_name\n )\n\n print('Average max difference = {0:.4f} K day^-1'.format(\n numpy.mean(max_differences_k_day01)\n ))\n print('Median max difference = {0:.4f} K day^-1'.format(\n numpy.median(max_differences_k_day01)\n ))\n print('Max max difference = {0:.4f} K day^-1'.format(\n numpy.max(max_differences_k_day01)\n ))", "def detect_badchannels(raw, picks, ref_meg=\"auto\", significance_level=0.05):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n ch_names = np.array(raw.ch_names)[chinds]\n\n bdinds = sails.utils.detect_artefacts(\n raw.get_data(picks=chinds),\n axis=0,\n reject_mode=\"dim\",\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n s = \"Modality {0} - {1}/{2} channels rejected ({3:02f}%)\"\n pc = (bdinds.sum() / len(bdinds)) * 100\n logger.info(s.format(picks, bdinds.sum(), len(bdinds), pc))\n\n # concatenate newly found bads to existing bads\n if np.any(bdinds):\n raw.info[\"bads\"].extend(list(ch_names[np.where(bdinds)[0]]))\n\n return raw", "def _calibrate_measurement(self):\n\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15,\n self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15,\n self.wbb.data.wavelength)\n\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n\n calibration_slope = ((warm_blackbody - cold_blackbody) /\n (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum))\n calibration_offset = warm_blackbody - (self.wbb.data.average_spectrum * \n calibration_slope)\n\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n\n plate_temperature = self.dwr.header.spare_f[0]\n if (self.plate == -1) :\n plate_emissivity = self.dwr.header.spare_f[1]\n\n plate_blackbody = bb_radiance(plate_temperature + 273.15,\n self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n\n self.dwr.data.average_spectrum = ((self.dwr.data.average_spectrum - \n plate_emission) / (1 - plate_emissivity))", "def _get_reference_fit(self, img):\n bw_img = 255 * (img >= self.contrast)\n fit = [center_on_box(bw_img, self.radius, self.min_ref, *ref) for ref in self.refzone]\n meanfit = num.mean(num.ma.masked_array(fit, fit == -9999), axis=0).astype('i')\n if meanfit[0] is num.ma.masked:\n raise StandardError('At least one reference box match required')\n\n return meanfit, fit", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def mean_bayesian_posterior_old(posterior_obj, center = \"naive\", verbose = True, tol=0.1):#1E-5):\n \n posterior = copy.copy(posterior_obj.normed_posterior)\n \n sample_p0 = posterior_obj.sample_p0\n sample_psi0 = posterior_obj.sample_psi0\n \n # put on [-pi/2, pi/2] grid\n #sample_psi0 = polarization_tools.mod_halfpolar_center_0(sample_psi0)\n \n # Sampling widths\n pdx = sample_p0[1] - sample_p0[0]\n psidx = sample_psi0[1] - sample_psi0[0]\n \n # pMB integrand is p0*B2D. This can happen once only, before centering. # note: circularize psi integral?\n pMB_integrand = posterior*sample_p0\n pMB_integrated_over_psi0 = posterior_obj.integrate_highest_dimension(pMB_integrand, dx = psidx)\n pMB = posterior_obj.integrate_highest_dimension(pMB_integrated_over_psi0, dx = pdx)\n \n if verbose is True:\n print(\"Sampling pdx is {}, psidx is {}\".format(pdx, psidx))\n \n # Test that normed posterior is normed\n if verbose is True:\n norm_posterior_test = test_normalization(posterior_obj, pdx, psidx)\n \n # pre-centering test\n nocenter_psiMB_integrand = posterior_obj.normed_posterior*sample_psi0[:, np.newaxis]\n nocenter_pdf = np.trapz(nocenter_psiMB_integrand, dx = pdx, axis=0)\n nocenter_psi0_ludo_new = 0.5*np.arctan2(np.sum(np.sin(2*sample_psi0)*nocenter_pdf), np.sum(np.cos(2*sample_psi0)*nocenter_pdf))\n \n sin_nocenter_psiMB_integrand = posterior_obj.normed_posterior*np.sin(2*sample_psi0[:, np.newaxis])\n cos_nocenter_psiMB_integrand = posterior_obj.normed_posterior*np.cos(2*sample_psi0[:, np.newaxis])\n sin_nocenter_pdf = np.trapz(sin_nocenter_psiMB_integrand, dx = pdx, axis=0)\n cos_nocenter_pdf = np.trapz(cos_nocenter_psiMB_integrand, dx = pdx, axis=0)\n my_new_psi0 = 0.5*np.arctan2(np.sum(sin_nocenter_pdf), np.sum(cos_nocenter_pdf))\n \n print(\"nocenter_psi0_ludo_new\", np.mod(nocenter_psi0_ludo_new, np.pi))\n print(\"my_new_psi0\", np.mod(my_new_psi0, np.pi))\n \n # Center on the naive psi\n if center == \"naive\":\n if verbose is True:\n print(\"Centering initial integral on naive psi\")\n #rolled_sample_psi0, rolled_posterior = center_posterior_naive_psi(posterior_obj, sample_psi0, posterior, verbose = verbose)\n #pnaive, psinaive = naive_planck_measurements(posterior_obj.hp_index)\n psinaive = posterior_obj.psimeas\n \n # testing ludo's method (maybe?)\n #psinaive = polarization_tools.mod_halfpolar_center_0(psinaive)\n \n pnaive = posterior_obj.pmeas\n psi0new, centered_posterior = center_posterior_psi_given(sample_psi0, posterior, psinaive, verbose = verbose)\n #print(\"max psi0new: \", np.max(psi0new))\n psidx = psi0new[1] - psi0new[0]\n \n if verbose is True:\n print(\"psinaive = {}, pnaive = {}\".format(psinaive, pnaive))\n \n elif center == \"MAP\":\n print(\"WARNING: MAP center may not be correctly implemented\")\n if verbose is True:\n print(\"Centering initial integral on psi_MAP\")\n rolled_sample_psi0, rolled_posterior = center_posterior_psi_MAP(posterior_obj, sample_psi0, posterior, verbose = verbose)\n \n \n # Ludo's method\n \"\"\"\n v0 = psiref\n dpsi = ((psi_grid)[1]-(psi_grid)[0])\n v0_new = total(pol_angle_diff(psi_grid,v0)*pdf) * dpsi\n v0 = v0_new + v0\n ok = 0\n while ok eq 0 do begin\n v0_new = total(pol_angle_diff(psi_grid,v0)*pdf) * dpsi\n if v0_new le dpsi then ok = 1\n v0 = v0_new + v0\n endwhile\n \"\"\"\n \n v0 = posterior_obj.psimeas\n v0 = polarization_tools.mod_halfpolar_center_0(v0)\n print(v0)\n psiMB_integrand = posterior_obj.normed_posterior*sample_psi0[:, np.newaxis]\n pdf = np.trapz(psiMB_integrand, dx = pdx, axis=0)\n \n # center pdf\n #psi0new = np.linspace(v0 - np.pi/2, v0 + np.pi/2, len(sample_psi0), endpoint=True)\n #pdf = np.interp(psi0new, sample_psi0, pdf, period=np.pi)\n #sample_psi0 = polarization_tools.mod_halfpolar_center_0(psi0new)\n \n # don't center pdf\n #pdf = sample_psi0\n \n # normalize pdf? doesn't matter.\n #pdf = pdf/np.sum(pdf)\n \n # psi_MB = 0.5 * atan2 ( total(sin(2*psi_grid) * pdf_psi ), total(cos(2*psi_grid) * pdf_psi ) )\n \n v0_new = np.sum(angle_residual(sample_psi0, v0, degrees=False)*pdf) * psidx\n print(v0_new)\n v0 = v0_new + v0\n print(v0)\n ok = 0\n i = 0\n while ok is 0:\n v0_new = np.sum(angle_residual(sample_psi0, v0, degrees=False)*pdf) * psidx\n if v0_new <= psidx:\n ok = 1\n v0 = v0_new + v0\n print(v0)\n i = i + 1\n print(i)\n print(\"psi0 determined ludo's way: {}\".format(v0))\n \n psi0_ludo_new = 0.5*np.arctan2(np.sum(np.sin(2*sample_psi0)*pdf), np.sum(np.cos(2*sample_psi0)*pdf))\n print(\"psi0 determined ludo's new way: {}\".format(psi0_ludo_new))\n \n #posterior = rolled_posterior\n #sample_psi0 = rolled_sample_psi0\n \n # Integrate over p\n #pMB1 = np.trapz(posterior, dx = psidx, axis = 0)\n #pMB1 = np.trapz(centered_posterior, psi0new, axis=0)\n \n # Integrate over psi\n #pMB = np.trapz(pMB1*sample_p0, dx = pdx)\n \n # Integrate over p\n #psiMB1 = np.trapz(posterior, dx = pdx, axis = 1)\n #psiMB1 = np.trapz(centered_posterior, dx = pdx, axis = 1)\n \n # Integrate over psi\n #psiMB = np.trapz(psiMB1*sample_psi0, dx = psidx)\n #psiMB = np.trapz(psiMB1*psi0new, psi0new)\n\n #test\n if psidx != psi0new[1] - psi0new[0]:\n print(\"Caution: old psidx = {}, new psidx = {}\".format(psidx, psi0new[1] - psi0new[0]))\n \n # testing ludo's method (maybe?)\n psi0new = polarization_tools.mod_halfpolar_center_0(psi0new)\n \n # psiMB integrand is psi0*B2D.\n psiMB_integrand = centered_posterior*psi0new[:, np.newaxis]\n psiMB_integrated_over_psi0 = posterior_obj.integrate_highest_dimension(psiMB_integrand, dx=psidx)\n psiMB = posterior_obj.integrate_highest_dimension(psiMB_integrated_over_psi0, dx=pdx)\n \n if verbose is True:\n print(\"initial pMB is {}\".format(pMB))\n print(\"initial psiMB is {}\".format(psiMB))\n \n # Set parameters for convergence\n psi_last = copy.copy(psinaive) #+ tol*2\n i = 0\n itertol = 3000#10#0\n if verbose is True:\n print(\"Using tolerance of {}\".format(tol))\n \n while (np.abs(angle_residual(psi_last, psiMB, degrees = False)) > tol) and (i < itertol):\n if verbose is True:\n print(\"Last: {}, now: {}, Convergence at {}\".format(psi_last, psiMB, np.abs(angle_residual(psi_last, psiMB, degrees = False))))\n print(\"i = {}\".format(i))\n print(\"centering on psi = {}\".format(psiMB))\n psi_last = copy.copy(psiMB) # to compare next round with\n \n psi0new, centered_posterior = center_posterior_psi_given(psi0new, centered_posterior, psiMB, verbose = verbose)\n # testing ludo's method (maybe?)\n psi0new = polarization_tools.mod_halfpolar_center_0(psi0new)\n \n #print(\"max psi0new: \", np.max(psi0new))\n\n psiMB_integrand = centered_posterior*psi0new[:, np.newaxis]\n psiMB_integrated_over_psi0 = posterior_obj.integrate_highest_dimension(psiMB_integrand, dx=psidx)\n psiMB = posterior_obj.integrate_highest_dimension(psiMB_integrated_over_psi0, dx=pdx)\n \n if verbose is True:\n print(\"Iterating. New psiMB is {}\".format(psiMB))\n i += 1\n \n #if i > itertol-1:\n # print(\"CAUTION: i is now {}. Index {} may not converge\".format(i, posterior_obj.hp_index))\n # print(\"psi initial = {}, psi last = {}, psiMB = {}\".format(psinaive, np.mod(psi_last, np.pi), np.mod(psiMB, np.pi)))\n # print(\"greater than tol: {}\".format(np.abs(angle_residual(np.mod(psi_last, np.pi), np.mod(psiMB, np.pi), degrees = False)))) \n \n #print(\"difference between original and final psi is {}\".format(angle_residual(psiMB, psinaive, degrees=False)))\n #print(\"difference between original and final p is {}\".format(pMB - pnaive))\n #if i > itertol-1:\n # pMB = copy.copy(pnaive)\n # psiMB = copy.copy(psinaive)\n # print(\"Iteration tolerance reached. setting naive values\")\n print(i) \n return pMB, psiMB, my_new_psi0#, pMB1, psiMB1, sample_psi0, sample_p0", "def updateReferenceAndNormalize(m, ref, thresh):\n ref = list(ref)\n thresh = max(thresh,0)\n totals = np.sum(m[:4,], axis=0)\n idx = 0;\n b2i = BaseToInt()\n for i in totals:\n if i < thresh:\n bases = np.array(b2i.getPositions(ref[idx].capitalize()))\n m[:4, idx] = 0\n m[bases, idx] = 1.0/len(bases)\n else:\n m[:4,idx] = m[:4,idx]/i\n\n #DEBUG CODE#\n if (m[:4,idx] > 1).any():\n print(i)\n print (m[:4,idx])\n print(totals)\n #END DEBUG CODE#\n \n idx += 1", "def noiseon(delay=2.0, reference=False, subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, True, reference)\n multiSubarray('rfPower', subarray, False)\n sleep(delay) # Temporary - to allow for delay in correlator", "def analyze(ctx, filename, trigger, threshold, eyecandy, ignore_extra=False,\n fix_missing=False, output=None, notebook=None,\n configuration=None, verbose=False, debug=False,processes=None,\n by_channel=False, integrity_filter=0.0, analog_idx=1,\n default_channel_map=False, dev=False):\n print(\"version 0.5.1\")\n init_logging(filename, processes, verbose, debug)\n #### FILEPATHS\n logger.debug(str(filename) + \" \" + str(os.path.curdir))\n if not os.path.isfile(filename):\n try:\n filename = glia.match_filename(filename,\"txt\")\n except:\n try:\n filename = glia.match_filename(filename,\"bxr\")\n except:\n filename = glia.match_filename(filename,\"csv\")\n \n data_directory, data_name = os.path.split(filename)\n name, extension = os.path.splitext(data_name)\n # ignore first of two extensions (if applicable)\n name, _ = os.path.splitext(name)\n analog_file = os.path.join(data_directory, name +'.analog')\n if not os.path.isfile(analog_file):\n # use 3brain analog file\n analog_file = os.path.join(data_directory, name +'.analog.brw')\n\n if not os.path.isfile(analog_file):\n # Tyler's format; used if files were split for example\n analog_file = os.path.join(data_directory, name +'.analog.npz')\n\n stimulus_file = os.path.join(data_directory, name + \".stim\")\n ctx.obj = {\"filename\": os.path.join(data_directory,name)}\n print(f\"Analyzing {name}\")\n\n if configuration!=None:\n with open(configuration, 'r') as f:\n user_config = yaml.safe_load(f)\n config.user_config = user_config\n if \"analog_calibration\" in user_config:\n config.analog_calibration = user_config[\"analog_calibration\"]\n if \"notebook\" in user_config:\n notebook = user_config[\"notebook\"]\n if \"eyecandy\" in user_config:\n eyecandy = user_config[\"eyecandy\"]\n if \"processes\" in user_config:\n processes = user_config[\"processes\"]\n if \"integrity_filter\" in user_config:\n integrity_filter = user_config[\"integrity_filter\"]\n if \"by_channel\" in user_config:\n by_channel = user_config[\"by_channel\"]\n\n if not notebook:\n notebook = glia.find_notebook(data_directory)\n\n lab_notebook = glia.open_lab_notebook(notebook)\n logger.info(f\"{name=}\")\n experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)\n flicker_version = experiment_protocol[\"flickerVersion\"]\n\n\n #### LOAD STIMULUS\n try:\n metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)\n ctx.obj[\"stimulus_list\"] = stimulus_list\n ctx.obj[\"metadata\"] = metadata\n # assert method=='analog-flicker'\n except:\n print(\"No .stim file found. Creating from .analog file.\".format(trigger))\n if flicker_version==0.3:\n metadata, stimulus_list = glia.create_stimuli(\n analog_file, stimulus_file, notebook, name, eyecandy, analog_idx, ignore_extra,\n config.analog_calibration, threshold)\n ctx.obj[\"stimulus_list\"] = stimulus_list\n ctx.obj[\"metadata\"] = metadata\n print('finished creating .stim file')\n elif trigger == \"ttl\":\n raise ValueError('not implemented')\n else:\n raise ValueError(\"invalid trigger: {}\".format(trigger))\n \n # look for .frames file\n try:\n lab_notebook_notype = glia.open_lab_notebook(notebook, convert_types=False)\n protocol_notype = glia.get_experiment_protocol(lab_notebook_notype,\n name)\n date_prefix = os.path.join(data_directory,\n protocol_notype['date'].replace(':','_'))\n frames_file = date_prefix + \"_eyecandy_frames.log\"\n video_file = date_prefix + \"_eyecandy.mkv\"\n frame_log = pd.read_csv(frames_file)\n frame_log = frame_log[:-1] # last frame is not encoded for some reason\n ctx.obj[\"frame_log\"] = frame_log\n ctx.obj[\"video_file\"] = video_file\n except Exception as e:\n extype, value, tb = sys.exc_info()\n traceback.print_exc()\n print(e)\n ctx.obj[\"frame_log\"] = None\n ctx.obj[\"video_file\"] = None\n print(\"Attempting to continue without frame log...\")\n \n #### LOAD SPIKES\n spyking_regex = re.compile('.*\\.result.hdf5$')\n eye = experiment_protocol['eye']\n experiment_n = experiment_protocol['experimentNumber']\n\n date = experiment_protocol['date'].date().strftime(\"%y%m%d\")\n\n retina_id = date+'_R'+eye+'_E'+experiment_n\n if extension == \".txt\":\n ctx.obj[\"units\"] = glia.read_plexon_txt_file(filename,retina_id, channel_map)\n elif extension == \".bxr\":\n if default_channel_map:\n channel_map_3brain = config.channel_map_3brain\n else:\n channel_map_3brain = None\n ctx.obj[\"units\"] = glia.read_3brain_spikes(filename, retina_id,\n channel_map_3brain, truncate=dev)\n elif extension == \".csv\":\n ctx.obj[\"units\"] = glia.read_csv_spikes(filename, retina_id) \n elif re.match(spyking_regex, filename):\n ctx.obj[\"units\"] = glia.read_spyking_results(filename)\n else:\n raise ValueError(f'could not read {extension=}. Is it a plexon or spyking circus file?')\n\n #### DATA MUNGING OPTIONS\n if integrity_filter>0.0:\n good_units = solid.filter_units_by_accuracy(\n ctx.obj[\"units\"], ctx.obj['stimulus_list'], integrity_filter)\n filter_good_units = glia.f_filter(lambda u,v: u in good_units)\n ctx.obj[\"units\"] = filter_good_units(ctx.obj[\"units\"])\n\n if by_channel:\n ctx.obj[\"units\"] = glia.combine_units_by_channel(ctx.obj[\"units\"])\n\n\n # prepare_output\n plot_directory = os.path.join(data_directory, name+\"-plots\")\n config.plot_directory = plot_directory\n\n os.makedirs(plot_directory, exist_ok=True)\n os.chmod(plot_directory, 0o777)\n\n if output == \"pdf\":\n logger.debug(\"Outputting pdf\")\n ctx.obj[\"retina_pdf\"] = PdfPages(glia.plot_pdf_path(plot_directory, \"retina\"))\n ctx.obj[\"unit_pdfs\"] = glia.open_pdfs(plot_directory, list(ctx.obj[\"units\"].keys()), Unit.name_lookup())\n # c connotes 'continuation' for continuation passing style\n ctx.obj[\"c_unit_fig\"] = partial(glia.add_to_unit_pdfs,\n unit_pdfs=ctx.obj[\"unit_pdfs\"])\n ctx.obj[\"c_retina_fig\"] = lambda x: ctx.obj[\"retina_pdf\"].savefig(x)\n\n elif output == \"png\":\n logger.debug(\"Outputting png\")\n ctx.obj[\"c_unit_fig\"] = glia.save_unit_fig\n ctx.obj[\"c_retina_fig\"] = glia.save_retina_fig\n os.makedirs(os.path.join(plot_directory,\"00-all\"), exist_ok=True)\n\n for unit_id in ctx.obj[\"units\"].keys():\n name = unit_id\n os.makedirs(os.path.join(plot_directory,name), exist_ok=True)", "def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]", "def _recov_int_const(self, model, task): # TODO: document e_err_inconsist return\n\n gdml = GDMLPredict(\n model, max_processes=self._max_processes\n ) # , use_torch=self._use_torch\n n_train = task['E_train'].shape[0]\n\n R = task['R_train'].reshape(n_train, -1)\n\n E_pred, _ = gdml.predict(R)\n E_ref = np.squeeze(task['E_train'])\n\n e_fact = np.linalg.lstsq(\n np.column_stack((E_pred, np.ones(E_ref.shape))), E_ref, rcond=-1\n )[0][0]\n corrcoef = np.corrcoef(E_ref, E_pred)[0, 1]\n\n # import matplotlib.pyplot as plt\n # plt.plot(E_ref-np.mean(E_ref))\n # plt.plot(E_pred-np.mean(E_pred))\n # plt.show()\n\n if np.sign(e_fact) == -1:\n self.log.warning(\n 'The provided dataset contains gradients instead of force labels (flipped sign). Please correct!\\n'\n + ui.color_str('Note:', bold=True)\n + 'Note: The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n if corrcoef < 0.95:\n self.log.warning(\n 'Inconsistent energy labels detected!\\n'\n + 'The predicted energies for the training data are only weakly correlated with the reference labels (correlation coefficient {:.2f}) which indicates that the issue is most likely NOT just a unit conversion error.\\n\\n'.format(\n corrcoef\n )\n + ui.color_str('Troubleshooting tips:\\n', bold=True)\n + ui.wrap_indent_str(\n '(1) ',\n 'Verify the correct correspondence between geometries and labels in the provided dataset.',\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(2) ', 'Verify the consistency between energy and force labels.'\n )\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Correspondence correct?')\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Same level of theory?')\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Accuracy of forces (if numerical)?')\n + '\\n'\n + ui.wrap_indent_str(\n '(3) ',\n 'Is the training data spread too broadly (i.e. weakly sampled transitions between example clusters)?',\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(4) ', 'Are there duplicate geometries in the training data?'\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(5) ', 'Are there any corrupted data points (e.g. parsing errors)?'\n )\n + '\\n\\n'\n + ui.color_str('Note:', bold=True)\n + ' The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n if np.abs(e_fact - 1) > 1e-1:\n self.log.warning(\n 'Different scales in energy vs. force labels detected!\\n'\n + 'The integrated forces differ from the energy labels by factor ~{:.2f}, meaning that the trained model will likely fail to predict energies accurately.\\n\\n'.format(\n e_fact\n )\n + ui.color_str('Troubleshooting tips:\\n', bold=True)\n + ui.wrap_indent_str(\n '(1) ', 'Verify consistency of units in energy and force labels.'\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(2) ',\n 'Is the training data spread too broadly (i.e. weakly sampled transitions between example clusters)?',\n )\n + '\\n\\n'\n + ui.color_str('Note:', bold=True)\n + ' The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n # Least squares estimate for integration constant.\n return np.sum(E_ref - E_pred) / E_ref.shape[0]", "def analyze(ctx, filename, trigger, threshold, eyecandy, ignore_extra=False,\n fix_missing=False, window_height=None, window_width=None, output=None, notebook=None,\n calibration=None, distance=None, verbose=False, debug=False,processes=None,\n by_channel=False, integrity_filter=0.0): \n #### FILEPATHS\n if not os.path.isfile(filename):\n filename = match_filename(filename)\n data_directory, data_name = os.path.split(filename)\n name, extension = os.path.splitext(data_name)\n analog_file = os.path.join(data_directory, name +'.analog')\n stimulus_file = os.path.join(data_directory, name + \".stimulus\")\n ctx.obj = {\"filename\": os.path.join(data_directory,name)}\n\n if not notebook:\n notebook = find_notebook(data_directory)\n\n #### LOGGING CONFIGURATION\n fh = logging.FileHandler(os.path.join(data_directory,name + '.log'))\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n if verbose:\n ch.setLevel(logging.INFO)\n # tracemalloc.start()\n elif debug:\n ch.setLevel(logging.DEBUG)\n\n else:\n ch.setLevel(logging.WARNING)\n if processes!=None:\n config.processes = processes\n formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s', '%H:%M:%S')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n logger.info(\"Verbose logging on\")\n\n lab_notebook = glia.open_lab_notebook(notebook)\n experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)\n flicker_version = experiment_protocol[\"flickerVersion\"]\n\n\n #### LOAD STIMULUS\n try:\n ctx.obj[\"stimulus_list\"] = glia.load_stimulus(stimulus_file)\n except OSError:\n print(\"No .stimulus file found. Attempting to create from .analog file.\".format(trigger))\n if flicker_version==0.3:\n ctx.obj[\"stimulus_list\"] = glia.create_stimulus_list(\n analog_file, stimulus_file, notebook, name, eyecandy, ignore_extra,\n calibration, distance, threshold)\n print('finished creating stimulus list')\n elif trigger == \"ttl\":\n raise ValueError('not implemented')\n else:\n raise ValueError(\"invalid trigger: {}\".format(trigger))\n\n #### LOAD SPIKES\n spyking_regex = re.compile('.*\\.result.hdf5$')\n eye = experiment_protocol['eye']\n experiment_n = experiment_protocol['experimentNumber']\n\n date = experiment_protocol['date'].date().strftime(\"%y%m%d\")\n\n retina_id = date+'_R'+eye+'_E'+experiment_n\n if extension == \".txt\":\n ctx.obj[\"units\"] = glia.read_plexon_txt_file(filename,retina_id, channel_map)\n elif re.match(spyking_regex, filename):\n ctx.obj[\"units\"] = glia.read_spyking_results(filename)\n else:\n raise ValueError('could not read {}. Is it a plexon or spyking circus file?')\n\n #### DATA MUNGING OPTIONS\n if integrity_filter>0.0:\n good_units = solid.filter_units_by_accuracy(\n ctx.obj[\"units\"], ctx.obj['stimulus_list'], integrity_filter)\n filter_good_units = glia.f_filter(lambda u,v: u in good_units)\n ctx.obj[\"units\"] = filter_good_units(ctx.obj[\"units\"])\n\n if by_channel:\n ctx.obj[\"units\"] = glia.combine_units_by_channel(ctx.obj[\"units\"])\n\n\n # prepare_output\n plot_directory = os.path.join(data_directory, name+\"-plots\")\n config.plot_directory = plot_directory\n\n os.makedirs(plot_directory, exist_ok=True)\n os.chmod(plot_directory, 0o777)\n\n if output == \"pdf\":\n logger.debug(\"Outputting pdf\")\n ctx.obj[\"retina_pdf\"] = PdfPages(glia.plot_pdf_path(plot_directory, \"retina\"))\n ctx.obj[\"unit_pdfs\"] = glia.open_pdfs(plot_directory, list(ctx.obj[\"units\"].keys()), Unit.name_lookup())\n # c connotes 'continuation'\n ctx.obj[\"c_unit_fig\"] = partial(glia.add_to_unit_pdfs,\n unit_pdfs=ctx.obj[\"unit_pdfs\"])\n ctx.obj[\"c_retina_fig\"] = lambda x: ctx.obj[\"retina_pdf\"].savefig(x)\n \n elif output == \"png\":\n logger.debug(\"Outputting png\")\n ctx.obj[\"c_unit_fig\"] = glia.save_unit_fig\n ctx.obj[\"c_retina_fig\"] = glia.save_retina_fig\n os.makedirs(os.path.join(plot_directory,\"00-all\"), exist_ok=True)\n\n for unit_id in ctx.obj[\"units\"].keys():\n name = unit_id\n os.makedirs(os.path.join(plot_directory,name), exist_ok=True)", "def fit_reference(self, img):\n if self.refzone:\n meanfit, fit = self._get_reference_fit(img)\n img = self._overlay_ref_fit(img, meanfit, fit)\n self._set_offset(*meanfit)\n\n return img", "def test_properlyAveraged(self):\n r0 = self.singleReader\n r1 = DetectorReader(DET_FILES['bwr1'])\n r1.read()\n for detName in self.sampler.detectors:\n expectedTallies, expectedErrors = (_getExpectedAverages(\n r0.detectors[detName], r1.detectors[detName]))\n uniq = self.sampler.detectors[detName]\n assert_allclose(uniq.tallies, expectedTallies, err_msg='tallies',\n **TOLERANCES['tallies'])\n assert_allclose(uniq.errors, expectedErrors, err_msg='errrors',\n **TOLERANCES['errors'])", "def _process_references0(self, references):\n if \"zarr_consolidated_format\" in references:\n # special case for Ike prototype\n references = _unmodel_hdf5(references)\n self.references = references", "def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)", "def infer_mean(k, stimulus, ref, C):\n target = np.zeros(C.shape[0])\n for j in range(C.shape[0]):\n parameters = np.hstack([k[:4], k[4 + j]])\n p = model(parameters, stimulus,\n ref[:, j].reshape(ref.shape[0], 1),\n C[j, j])\n target[j] = np.mean(ref[:, j] * p) / np.mean(p)\n return target" ]
[ "0.72033346", "0.51534164", "0.51021534", "0.5064311", "0.50264406", "0.50084466", "0.49690974", "0.49434143", "0.4942057", "0.4890532", "0.4875339", "0.4820547", "0.4813191", "0.47748223", "0.47736692", "0.47729418", "0.47667706", "0.4733884", "0.47221825", "0.47158694", "0.4676857", "0.46721825", "0.46698228", "0.4633652", "0.4631414", "0.4626771", "0.46173108", "0.4603661", "0.46029362", "0.45857465" ]
0.80031914
0
Detect bad channels and estimate the robust reference signal. This function implements the functionality of the `robustReference` function as part of the PREP pipeline on mne raw object.
def robust_reference(self): raw = self.raw.copy() raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq) # Determine unusable channels and remove them from the reference channels noisy_detector = NoisyChannels(raw, do_detrend=False) noisy_detector.find_all_bads(ransac=self.ransac) self.noisy_channels_original = { "bad_by_nan": noisy_detector.bad_by_nan, "bad_by_flat": noisy_detector.bad_by_flat, "bad_by_deviation": noisy_detector.bad_by_deviation, "bad_by_hf_noise": noisy_detector.bad_by_hf_noise, "bad_by_correlation": noisy_detector.bad_by_correlation, "bad_by_ransac": noisy_detector.bad_by_ransac, "bad_all": noisy_detector.get_bads(), } self.noisy_channels = self.noisy_channels_original.copy() logger.info("Bad channels: {}".format(self.noisy_channels)) self.unusable_channels = _union( noisy_detector.bad_by_nan, noisy_detector.bad_by_flat ) # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR) self.reference_channels = _set_diff( self.reference_channels, self.unusable_channels ) # Get initial estimate of the reference by the specified method signal = raw.get_data() * 1e6 self.reference_signal = ( np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) reference_index = [ self.ch_names_eeg.index(ch) for ch in self.reference_channels ] signal_tmp = self.remove_reference( signal, self.reference_signal, reference_index ) # Remove reference from signal, iteratively interpolating bad channels raw_tmp = raw.copy() iterations = 0 noisy_channels_old = [] max_iteration_num = 4 while True: raw_tmp._data = signal_tmp * 1e-6 noisy_detector = NoisyChannels(raw_tmp) noisy_detector.find_all_bads(ransac=self.ransac) self.noisy_channels["bad_by_nan"] = _union( self.noisy_channels["bad_by_nan"], noisy_detector.bad_by_nan ) self.noisy_channels["bad_by_flat"] = _union( self.noisy_channels["bad_by_flat"], noisy_detector.bad_by_flat ) self.noisy_channels["bad_by_deviation"] = _union( self.noisy_channels["bad_by_deviation"], noisy_detector.bad_by_deviation ) self.noisy_channels["bad_by_hf_noise"] = _union( self.noisy_channels["bad_by_hf_noise"], noisy_detector.bad_by_hf_noise ) self.noisy_channels["bad_by_correlation"] = _union( self.noisy_channels["bad_by_correlation"], noisy_detector.bad_by_correlation, ) self.noisy_channels["bad_by_ransac"] = _union( self.noisy_channels["bad_by_ransac"], noisy_detector.bad_by_ransac ) self.noisy_channels["bad_all"] = _union( self.noisy_channels["bad_all"], noisy_detector.get_bads() ) logger.info("Bad channels: {}".format(self.noisy_channels)) if ( iterations > 1 and ( not self.noisy_channels["bad_all"] or set(self.noisy_channels["bad_all"]) == set(noisy_channels_old) ) or iterations > max_iteration_num ): break noisy_channels_old = self.noisy_channels["bad_all"].copy() if raw_tmp.info["nchan"] - len(self.noisy_channels["bad_all"]) < 2: raise ValueError( "RobustReference:TooManyBad " "Could not perform a robust reference -- not enough good channels" ) if self.noisy_channels["bad_all"]: raw_tmp._data = signal * 1e-6 raw_tmp.info["bads"] = self.noisy_channels["bad_all"] raw_tmp.interpolate_bads() signal_tmp = raw_tmp.get_data() * 1e6 else: signal_tmp = signal self.reference_signal = ( np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0) * 1e6 ) signal_tmp = self.remove_reference( signal, self.reference_signal, reference_index ) iterations = iterations + 1 logger.info("Iterations: {}".format(iterations)) logger.info("Robust reference done") return self.noisy_channels, self.reference_signal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_reference(self):\n # Phase 1: Estimate the true signal mean with robust referencing\n self.robust_reference()\n if self.noisy_channels[\"bad_all\"]:\n self.raw.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n self.raw.interpolate_bads()\n self.reference_signal = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n rereferenced_index = [\n self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels\n ]\n self.EEG = self.remove_reference(\n self.EEG, self.reference_signal, rereferenced_index\n )\n\n # Phase 2: Find the bad channels and interpolate\n self.raw._data = self.EEG * 1e-6\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n\n # Record Noisy channels and EEG before interpolation\n self.bad_before_interpolation = noisy_detector.get_bads(verbose=True)\n self.EEG_before_interpolation = self.EEG.copy()\n\n bad_channels = _union(self.bad_before_interpolation, self.unusable_channels)\n self.raw.info[\"bads\"] = bad_channels\n self.raw.interpolate_bads()\n reference_correct = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n self.EEG = self.raw.get_data() * 1e6\n self.EEG = self.remove_reference(\n self.EEG, reference_correct, rereferenced_index\n )\n # reference signal after interpolation\n self.reference_signal_new = self.reference_signal + reference_correct\n # MNE Raw object after interpolation\n self.raw._data = self.EEG * 1e-6\n\n # Still noisy channels after interpolation\n self.interpolated_channels = bad_channels\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.still_noisy_channels = noisy_detector.get_bads()\n self.raw.info[\"bads\"] = self.still_noisy_channels\n return self", "def detect_badchannels(raw, picks, ref_meg=\"auto\", significance_level=0.05):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n ch_names = np.array(raw.ch_names)[chinds]\n\n bdinds = sails.utils.detect_artefacts(\n raw.get_data(picks=chinds),\n axis=0,\n reject_mode=\"dim\",\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n s = \"Modality {0} - {1}/{2} channels rejected ({3:02f}%)\"\n pc = (bdinds.sum() / len(bdinds)) * 100\n logger.info(s.format(picks, bdinds.sum(), len(bdinds), pc))\n\n # concatenate newly found bads to existing bads\n if np.any(bdinds):\n raw.info[\"bads\"].extend(list(ch_names[np.where(bdinds)[0]]))\n\n return raw", "def bipolar_reference(raw, dist_thresh=0.01, verbose=True):\n raw.load_data()\n ch_names = [name.replace(' ', '') for name in raw.ch_names] # no spaces\n bipolar_names = list()\n locs = list()\n data = list()\n for i, ch in enumerate(ch_names):\n elec_name = ''.join([letter for letter in ch if\n not letter.isdigit()]).rstrip()\n number = ''.join([letter for letter in ch if\n letter.isdigit()]).rstrip()\n pair = f'{elec_name}{int(number) + 1}'\n if pair not in ch_names:\n continue\n j = ch_names.index(pair)\n loc = raw.info['chs'][i]['loc'][:3]\n loc2 = raw.info['chs'][j]['loc'][:3]\n if np.linalg.norm(loc - loc2) > dist_thresh:\n continue\n data.append(raw._data[i] - raw._data[j])\n locs.append((loc + loc2) / 2)\n bipolar_names.append(f'{ch}-{pair}')\n if verbose:\n print(f'Bipolar referencing {ch} and {pair}')\n bipolar_info = mne.create_info(bipolar_names, raw.info['sfreq'], 'seeg')\n for loc, ch in zip(locs, bipolar_info['chs']):\n ch['loc'][:3] = loc\n return mne.io.RawArray(np.array(data), bipolar_info, raw.first_samp)", "def refere(eeg, channels, mode='contralateral'):\n\tbipolar_map = {'Fp1':'Fp2', 'Fp2':'Fp2', 'F3':'F4', 'F4':'F4', 'C3':'C4', 'C4':'C4', 'T3':'T4', 'T4':'T4', 'P3':'P4', 'P4':'P4', 'O1':'O2', 'O2':'O2'}\n\tif mode not in ['monopolar', 'contralateral', 'bipolar', 'linked', 'average']:\n\t\tprint 'WARNING - refere(): parameter \"mode\" can only be \"monopolar\", \"contralateral\", \"bipolar\" or \"linked\". Using \"contralateral\"!'\n\t\tmode = 'contralateral'\n\tif mode == 'linked':\t\t\n\t\treference = (eeg[:,channels.index('A1')] + eeg[:,channels.index('A2')])/2.\n\tif mode == 'average':\n\t\treference = np.zeros(len(eeg), dtype=np.float32)\n\t\tchcounter = 0\n\t\tfor channel in range(len(channels)):\n\t\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t\treference += eeg[:, channel]\n\t\t\t\tchcounter += 1\n\t\treference /= chcounter\n\tfor channel in range(len(channels)):\n\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t# mindenkit referalunk kiveve magukat a referencia csatornakat\n\t\t\tif mode == 'contralateral':\n\t\t\t\tif (channels[channel] in ['Fp2', 'F4', 'C4', 'T4', 'P4', 'O2']):\n\t\t\t\t\tref_channel = channels.index('A1')\n\t\t\t\telif (channels[channel] in ['Fp1', 'F3', 'C3', 'T3', 'P3', 'O1']):\n\t\t\t\t\tref_channel = channels.index('A2')\n\t\t\t\telse:\n\t\t\t\t\tprint \"Error: what kind of channel is this: \", channels[channel], \" cannot reference!!!!\"\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\tif mode == 'bipolar':\n\t\t\t\tref_channel = channels.index(bipolar_map[channels[channel]])\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\teeg[:, channel] -= reference", "def sim12_r_reference(datafiles, tolerances):\n catalog = Table.read(datafiles / 'sim12' / 'ref' / 'sim12_r_reference.fits')\n bright_filter = catalog['FLUX_ISO'] / catalog['FLUXERR_ISO'] >= tolerances['signal_to_noise']\n return catalog[bright_filter]", "def detect_bad_channels_ibl(\n raw,\n fs,\n psd_hf_threshold,\n dead_channel_thr=-0.5,\n noisy_channel_thr=1.0,\n outside_channel_thr=-0.75,\n n_neighbors=11,\n nyquist_threshold=0.8,\n welch_window_ms=0.3,\n):\n _, nc = raw.shape\n raw = raw - np.mean(raw, axis=0)[np.newaxis, :]\n nperseg = int(welch_window_ms * fs / 1000)\n import scipy.signal\n\n fscale, psd = scipy.signal.welch(raw, fs=fs, axis=0, window=\"hann\", nperseg=nperseg)\n\n # compute similarities\n ref = np.median(raw, axis=1)\n xcorr = np.sum(raw * ref[:, np.newaxis], axis=0) / np.sum(ref**2)\n\n # compute coherence\n xcorr_neighbors = detrend(xcorr, n_neighbors)\n xcorr_distant = xcorr - detrend(xcorr, n_neighbors) - 1\n\n # make recommendation\n psd_hf = np.mean(psd[fscale > (fs / 2 * nyquist_threshold), :], axis=0)\n\n ichannels = np.zeros(nc, dtype=int)\n idead = np.where(xcorr_neighbors < dead_channel_thr)[0]\n inoisy = np.where(np.logical_or(psd_hf > psd_hf_threshold, xcorr_neighbors > noisy_channel_thr))[0]\n\n ichannels[idead] = 1\n ichannels[inoisy] = 2\n\n # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency\n # the chanels outide need to be at either extremes of the probe\n ioutside = np.where(xcorr_distant < outside_channel_thr)[0]\n if ioutside.size > 0 and (ioutside[-1] == (nc - 1) or ioutside[0] == 0):\n a = np.cumsum(np.r_[0, np.diff(ioutside) - 1])\n ioutside = ioutside[a == np.max(a)]\n ichannels[ioutside] = 3\n\n return ichannels", "def calibrate(raw_data, white_reference, dark_reference):\n # Auto-increment device\n params.device += 1\n\n # Collect the number of wavelengths present\n num_bands = len(white_reference.wavelength_dict)\n den = white_reference.array_data - dark_reference.array_data\n\n # Calibrate using reflectance = (raw data - dark reference) / (white reference - dark reference)\n output_num = []\n for i in range(0, raw_data.lines):\n ans = raw_data.array_data[i,].astype(np.float16) - dark_reference.array_data\n output_num.append(ans)\n num = np.stack(output_num, axis=2)\n output_calibrated = []\n for i in range(0, raw_data.lines):\n ans1 = raw_data.array_data[i,] / den\n output_calibrated.append(ans1)\n\n # Reshape into hyperspectral datacube\n scalibrated = np.stack(output_calibrated, axis=2)\n calibrated_array = np.transpose(scalibrated[0], (1, 0, 2))\n calibrated_array[np.where(calibrated_array < 0)] = 0\n\n # Find array min and max values\n max_pixel = float(np.amax(calibrated_array))\n min_pixel = float(np.amin(calibrated_array))\n\n # Make a new class instance with the calibrated hyperspectral image\n calibrated = Spectral_data(array_data=calibrated_array, max_wavelength=raw_data.max_wavelength,\n min_wavelength=raw_data.min_wavelength, max_value=max_pixel, min_value=min_pixel,\n d_type=raw_data.d_type,\n wavelength_dict=raw_data.wavelength_dict, samples=raw_data.samples,\n lines=raw_data.lines, interleave=raw_data.interleave,\n wavelength_units=raw_data.wavelength_units, array_type=raw_data.array_type,\n pseudo_rgb=None, filename=None, default_bands=raw_data.default_bands)\n\n # Make pseudo-rgb image for the calibrated image\n calibrated.pseudo_rgb = _make_pseudo_rgb(spectral_array=calibrated)\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(calibrated.pseudo_rgb)\n elif params.debug == \"print\":\n print_image(calibrated.pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_calibrated_rgb.png\"))\n\n return calibrated", "def ccm_unred(wave, flux, ebv, r_v=\"\"):\n import numpy as np\n wave = np.array(wave, float)\n flux = np.array(flux, float)\n \n if wave.size != flux.size: raise TypeError, 'ERROR - wave and flux vectors must be the same size'\n \n if not bool(r_v): r_v = 3.1\n \n x = 10000.0/wave\n npts = wave.size\n a = np.zeros(npts, float)\n b = np.zeros(npts, float)\n \n ###############################\n #Infrared\n \n good = np.where( (x > 0.3) & (x < 1.1) )\n a[good] = 0.574 * x[good]**(1.61)\n b[good] = -0.527 * x[good]**(1.61)\n \n ###############################\n # Optical & Near IR\n \n good = np.where( (x >= 1.1) & (x < 3.3) )\n y = x[good] - 1.82\n \n c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \\\n -1.718, -0.827, 1.647, -0.505 ])\n c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \\\n 11.102, 5.491, -10.805, 3.347 ] )\n \n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n ###############################\n # Mid-UV\n \n good = np.where( (x >= 3.3) & (x < 8) )\n y = x[good]\n F_a = np.zeros(np.size(good),float)\n F_b = np.zeros(np.size(good),float)\n good1 = np.where( y > 5.9 )\n \n if np.size(good1) > 0:\n y1 = y[good1] - 5.9\n F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3\n F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3\n \n a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a\n b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b\n \n ###############################\n # Far-UV\n \n good = np.where( (x >= 8) & (x <= 11) )\n y = x[good] - 8.0\n c1 = [ -1.073, -0.628, 0.137, -0.070 ]\n c2 = [ 13.670, 4.257, -0.420, 0.374 ]\n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n # Applying Extinction Correction\n \n a_v = r_v * ebv\n a_lambda = a_v * (a + b/r_v)\n \n funred = flux * 10.0**(0.4*a_lambda) \n \n return funred", "def scoreCirc_CmosVoltageReference_2(circuit, gen, indi, MOEAMODE):\n \n if debug > 2:\n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi)\n #----------#\n VREF = 1.5\n #----------#\n\n #---------------------------------------------------------BigMatrix stuff, check short-circuits, matrix density, matrix identifier (obsolete) \n FullBigCircuitMatrix = copy(circuit.fullRedundancyMatrix)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #--------------------------------------------------------- \n \n score = np.array([0,0,0], dtype=\"float64\") if MOEAMODE == 1 else 0\n \n score += 2e4*np.exp(OcSc)\n results = None\n if OcSc > 1:\n score += 1e4*np.exp(OcSc)\n else:\n #----------------------------------------------------------Try to make netlist and evaluate the individual\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n #----------------------------------------------------------Start of results analysis and objectives creation\n disfCount = 0\n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n psrr = results['psrr']['nominal']\n# if np.isnan(np.array(psrr, dtype=float)):\n# disfCount = disfCount + 1\n# psr = 0\n# else:\n# psr = 1.0/psrr #abs(90 - psrr) if psrr < 90 else 0 #tole kot objective ni ok. ker je opravljena meritev samo pri vdd=15 je to precej stala.\n\n\n #----------------------------------------------------------Score function SINGLE-OBJECTIVE\n if MOEAMODE == 0:\n score =(vdd_s_t1 + 5*vdd_s_t1_d +\n\t 2*vdd_s_t2 + 2*vdd_s_t2_d +\n\t vdd_s_t3 + 5*vdd_s_t3_d +\n\t #vdd_s_r1 + 2*vdd_s_r1_d +\n\t #vdd_s_r2 + 2*vdd_s_r2_d + \n\t #vdd_s_r3 + 2*vdd_s_r3_d + \n\t (100*powe)\n )\n if disfCount > 0:\n\tscore = 0 + np.exp(disfCount) * 1e3\n\t\n #----------------------------------------------------------Score function MULTI-OBJECTIVE\t\n else: #MOEAMODE == 1:\n oMediana = vdd_s_t1 + vdd_s_t2 + vdd_s_t3\n oPsrr = vdd_s_t1_d + vdd_s_t2_d + vdd_s_t3_d\t#DC rejection\n #oPsrr = psr\n oP = powe\n\t\t\t\t\t #add constraints\n score = (np.array([oMediana, oPsrr, oP]) \t+ (oMediana if oMediana > 4 else 0) + \n\t\t\t\t\t\t#+ (oPsrr*1000 if oPsrr > 1.0/40 else 0) +\n\t\t\t\t\t\t+ (oPsrr if oPsrr > 3 else 0) +\n\t\t\t\t\t\t+ (oP if oP > 1e-1 else 0)\n )\n if disfCount > 0:\n\tscore = (np.array([0,0,0])+np.exp(disfCount) * 1e3) + random.randint(0, 200)\n\n #-------------------------------------------------------------------\n if debug > 2: \n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n \n \n # TRIGGER STOP SIGNAL if:\n if (vdd_s_t2 <= 0.001 and \n\tpsrr >= 80 and \n\tpowe <= 1e-5):\n globalVars.DONE = 1 # End evolution, feasible solution evolved.\n \n\n return score, results", "def sim12_g_reference(datafiles, tolerances):\n catalog = Table.read(datafiles / 'sim12' / 'ref' / 'sim12_g_reference.fits')\n bright_filter = catalog['FLUX_ISO'] / catalog['FLUXERR_ISO'] >= tolerances['signal_to_noise']\n return catalog[bright_filter]", "def detectByRefAdv(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tseuil = kargs.get('seuil', 100)\n\t\tref = kargs.get('ref', this._REF)\n\t\tframe = kargs.get('frame', this._FRAME)\n\t\tcoef = kargs.get('coef', 1)\n\t\t\n\t\t# On fait la différence et on extrait les composantes RGB\n\t\tdiff = cv2.absdiff(frame, ref)\n\t\t\n\t\t# Zblah\n\t\tsat = diff.copy()\n\t\tweight = 1 + (cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY) / 255.0) * coef\n\t\tsat[:,:,0] *= weight\n\t\tsat[:,:,1] *= weight\n\t\tsat[:,:,2] *= weight\n\t\t\n\t\t# Petit seuillage des familles\n\t\tthis._BINARY = delta = EmptyFrom(sat, 1)\n\t\tdelta[:,:] = ((sat[:,:,2] + sat[:,:,1] + sat[:,:,0]) > seuil) * 255\n\t\t\n\t\treturn pyon(\n\t\t\tAbsDiff = diff,\n\t\t\tWeight = weight % 1,\n\t\t\tWeighted = sat,\n\t\t\tThreshold = delta\n\t\t)", "def test_wrong_ref_power_mfcc():\n with raises(FeatureParamsError):\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def _recov_int_const(self, model, task): # TODO: document e_err_inconsist return\n\n gdml = GDMLPredict(\n model, max_processes=self._max_processes\n ) # , use_torch=self._use_torch\n n_train = task['E_train'].shape[0]\n\n R = task['R_train'].reshape(n_train, -1)\n\n E_pred, _ = gdml.predict(R)\n E_ref = np.squeeze(task['E_train'])\n\n e_fact = np.linalg.lstsq(\n np.column_stack((E_pred, np.ones(E_ref.shape))), E_ref, rcond=-1\n )[0][0]\n corrcoef = np.corrcoef(E_ref, E_pred)[0, 1]\n\n # import matplotlib.pyplot as plt\n # plt.plot(E_ref-np.mean(E_ref))\n # plt.plot(E_pred-np.mean(E_pred))\n # plt.show()\n\n if np.sign(e_fact) == -1:\n self.log.warning(\n 'The provided dataset contains gradients instead of force labels (flipped sign). Please correct!\\n'\n + ui.color_str('Note:', bold=True)\n + 'Note: The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n if corrcoef < 0.95:\n self.log.warning(\n 'Inconsistent energy labels detected!\\n'\n + 'The predicted energies for the training data are only weakly correlated with the reference labels (correlation coefficient {:.2f}) which indicates that the issue is most likely NOT just a unit conversion error.\\n\\n'.format(\n corrcoef\n )\n + ui.color_str('Troubleshooting tips:\\n', bold=True)\n + ui.wrap_indent_str(\n '(1) ',\n 'Verify the correct correspondence between geometries and labels in the provided dataset.',\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(2) ', 'Verify the consistency between energy and force labels.'\n )\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Correspondence correct?')\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Same level of theory?')\n + '\\n'\n + ui.wrap_indent_str(' - ', 'Accuracy of forces (if numerical)?')\n + '\\n'\n + ui.wrap_indent_str(\n '(3) ',\n 'Is the training data spread too broadly (i.e. weakly sampled transitions between example clusters)?',\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(4) ', 'Are there duplicate geometries in the training data?'\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(5) ', 'Are there any corrupted data points (e.g. parsing errors)?'\n )\n + '\\n\\n'\n + ui.color_str('Note:', bold=True)\n + ' The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n if np.abs(e_fact - 1) > 1e-1:\n self.log.warning(\n 'Different scales in energy vs. force labels detected!\\n'\n + 'The integrated forces differ from the energy labels by factor ~{:.2f}, meaning that the trained model will likely fail to predict energies accurately.\\n\\n'.format(\n e_fact\n )\n + ui.color_str('Troubleshooting tips:\\n', bold=True)\n + ui.wrap_indent_str(\n '(1) ', 'Verify consistency of units in energy and force labels.'\n )\n + '\\n'\n + ui.wrap_indent_str(\n '(2) ',\n 'Is the training data spread too broadly (i.e. weakly sampled transitions between example clusters)?',\n )\n + '\\n\\n'\n + ui.color_str('Note:', bold=True)\n + ' The energy prediction accuracy of the model will thus neither be validated nor tested in the following steps!'\n )\n return None\n\n # Least squares estimate for integration constant.\n return np.sum(E_ref - E_pred) / E_ref.shape[0]", "def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def deal_with_bad_channels(self, selection_method, plot=True, threshold_sd_of_mean=40, interpolate=True,\n file_path=None):\n # TODO: (Everyone) Check how well the automatic detection works on your data\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n \n if selection_method == \"automatic\":\n if self.epochs is None:\n raise AttributeError('Please create epochs first, as the automatic algorithm needs them to work.')\n else:\n df = self.epochs.to_data_frame()\n\n group = df.groupby('epoch')\n mean = group.mean()\n\n a = mean.std()\n a = a[1:]\n print('standard deviation of mean across epochs:')\n print(np.mean(a), np.std(a))\n print('higher than %s:' % threshold_sd_of_mean)\n print(a[a > threshold_sd_of_mean].index)\n\n for i in a[a > threshold_sd_of_mean].index:\n self.raw.info['bads'].append(i)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n elif selection_method == \"file\":\n bads = pd.read_csv(file_name)\n self.raw.info['bads'] = list(bads['bad_channels'].values)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n elif selection_method != \"manual\":\n ValueError(\"selection_method can be automatic, file, or manual\")\n\n if plot or selection_method == \"manual\":\n self.raw.plot(block=True)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n if interpolate:\n \"Interpolating bad channels...\"\n if len(self.raw.info['bads']) > 0:\n self.raw.interpolate_bads(reset_bads=True)", "def testIsRef(self):\n self.assertFalse(\n self.cd.is_ref\n )\n\n self.cd.cc = cdl_convert.ColorCorrectionRef('001')\n\n self.assertTrue(\n self.cd.is_ref\n )", "def safe_calibrate(self):\n\n status = -1\n while status < 3:\n ifMutexAcquire(self.use_mutex)\n try:\n new_status = self.BNO055.get_calibration_status()[3]\n except:\n new_status = -1\n finally:\n ifMutexRelease(self.use_mutex)\n if new_status != status:\n status = new_status", "def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n #See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note\n #that we're using some different normalisations for the operators\n #so there are some extra factors of m_x and m_p lurking around...\n \n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n #Proton and neutron g-factors\n gp = 5.59\n gn = -3.83\n \n #Bohr Magneton\n #Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))\n #muB = 5.7883818e-5*eV/Tesla # Bohr magneton\n mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))\n\n cp = [E*0.0 for i in range(11)]\n cn = [E*0.0 for i in range(11)]\n \n #Operator 1\n cp[0] = e*(mu_x*mu_B)/(2.0*m_x)\n \n #Operator 5\n cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n \n #Operator 4\n cp[3] = gp*e*(mu_x*mu_B)/m_p\n cn[3] = gn*e*(mu_x*mu_B)/m_p\n \n #Operator 6\n cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n\n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)", "def _robustness(\n landscape: flexs.Landscape,\n make_explorer: Callable[[flexs.Model, float], flexs.Explorer],\n):\n results = []\n\n for ss in [0.0, 1.0]:\n print(f\"Evaluating for robustness with model accuracy; signal_strength: {ss}\")\n\n model = baselines.models.NoisyAbstractModel(landscape, signal_strength=ss)\n explorer = make_explorer(model, ss, tag=f\"ss{ss}\")\n res = explorer.run(landscape, verbose=False)\n\n results.append((ss, res))\n\n cnn_ensemble = flexs.Ensemble(\n [\n baselines.models.CNN(\n len(wt),\n alphabet=s_utils.DNAA,\n num_filters=32,\n hidden_size=100,\n loss=\"MSE\",\n )\n for i in range(3)\n ]\n )\n explorer = make_explorer(cnn_ensemble, ss, tag=\"cnn\")\n res = explorer.run(landscape, verbose=False)\n\n results.append((None, res))\n\n return results", "def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):\n\t\t\tiso = ee.Image(f_iso)\n\t\t\tgeo = ee.Image(f_geo)\n\t\t\tvol = ee.Image(f_vol)\n\t\t\tpred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])\n\t\t\tpred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])\n\t\t\tcfac = pred0.divide(pred).rename(['cfac'])\n\t\t\tcorr = image.select(band_name).multiply(cfac).rename([band_name])\n\t\t\treturn corr", "def ruze_eff(freqs,freq_ref,ref_eff,srms):\n\n R_ref = np.exp(-4.0*np.pi*(srms/(const.c/(freq_ref*1.0e9*u.s**-1))).value) #\n Gnot = ref_eff / R_ref\n \n tran = freqs*0.0 + 1.0 # Let the transmission be unity everywhere.\n Larr = const.c.value/(freqs*1.0e9) # Keep calm and carry on.\n ### Old formula:\n #Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n ### Correct formula: (10 April 2018)\n Ruze = Gnot * np.exp(-(4.0*np.pi*srms.value/Larr)**2)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n\n return band", "def detect_badsegments(\n raw,\n picks,\n segment_len=1000,\n significance_level=0.05,\n metric='std',\n ref_meg='auto',\n mode=None,\n detect_zeros=True,\n):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"emg\":\n chinds = mne.pick_types(raw.info, emg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n\n if mode is None:\n if detect_zeros:\n bdinds_maxfilt = detect_maxfilt_zeros(raw)\n else:\n bdinds_maxfilt = None\n XX, XX_times = raw.get_data(picks=chinds, reject_by_annotation='omit', return_times=True)\n elif mode == \"diff\":\n bdinds_maxfilt = None\n XX, XX_times = raw.get_data(picks=chinds, reject_by_annotation='omit', return_times=True)\n XX = np.diff(XX, axis=1)\n XX_times = XX_times[1:] # remove the first time point\n\n allowed_metrics = [\"std\", \"var\", \"kurtosis\"]\n if metric not in allowed_metrics:\n raise ValueError(f\"metric {metric} unknown.\")\n if metric == \"std\":\n metric_func = np.std\n elif metric == \"var\":\n metric_func = np.var\n else:\n def kurtosis(inputs):\n return stats.kurtosis(inputs, axis=None)\n metric_func = kurtosis\n \n bdinds = sails.utils.detect_artefacts(\n XX,\n axis=1,\n reject_mode=\"segments\",\n metric_func=metric_func,\n segment_len=segment_len,\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n for count, bdinds in enumerate([bdinds, bdinds_maxfilt]):\n if bdinds is None:\n continue\n if count==1:\n descp1 = count * 'maxfilter_' # when count==0, should be ''\n descp2 = ' (maxfilter)'\n else:\n descp1 = ''\n descp2 = ''\n onsets = np.where(np.diff(bdinds.astype(float)) == 1)[0]\n\n if bdinds[0]:\n onsets = np.r_[0, onsets]\n offsets = np.where(np.diff(bdinds.astype(float)) == -1)[0]\n\n if bdinds[-1]:\n offsets = np.r_[offsets, len(bdinds) - 1]\n assert len(onsets) == len(offsets)\n descriptions = np.repeat(\"{0}bad_segment_{1}\".format(descp1, picks), len(onsets))\n logger.info(\"Found {0} bad segments\".format(len(onsets)))\n\n onsets_secs = raw.first_samp/raw.info[\"sfreq\"] + XX_times[onsets.astype(int)]\n offsets_secs = raw.first_samp/raw.info[\"sfreq\"] + XX_times[offsets.astype(int)]\n durations_secs = offsets_secs - onsets_secs\n\n raw.annotations.append(onsets_secs, durations_secs, descriptions)\n\n mod_dur = durations_secs.sum()\n full_dur = raw.n_times / raw.info[\"sfreq\"]\n pc = (mod_dur / full_dur) * 100\n s = \"Modality {0}{1} - {2:02f}/{3} seconds rejected ({4:02f}%)\"\n logger.info(s.format(\"picks\", descp2, mod_dur, full_dur, pc))\n\n return raw", "def retrieve_REFC(\n ds,\n variable=\"zFactorFinal\",\n radar_frequency=\"Ku\",\n mask_bright_band=False,\n mask_solid_phase=False,\n mask_liquid_phase=False,\n):\n if mask_solid_phase and mask_liquid_phase:\n raise ValueError(\"Either specify 'mask_solid_phase' or 'mask_liquid_phase'.\")\n # Retrieve required DataArrays\n da = get_variable_dataarray(ds, variable=variable)\n if len(da[\"radar_frequency\"].data) != 1:\n da = da.sel({\"radar_frequency\": radar_frequency})\n # Mask bright band region\n if mask_bright_band:\n da_bright_band = get_bright_band_mask(ds)\n da = da.where(~da_bright_band)\n # Mask ice phase region\n if mask_solid_phase:\n da_mask = get_solid_phase_mask(ds)\n da = da.where(da_mask)\n # Mask liquid phase region\n if mask_liquid_phase:\n da_mask = get_liquid_phase_mask(ds)\n da = da.where(da_mask)\n # Compute maximum\n da_max = da.max(dim=\"range\")\n # Add attributes\n if mask_solid_phase:\n da_max.name = \"REFC_liquid\"\n elif mask_liquid_phase:\n da_max.name = \"REFC_solid\"\n else:\n da_max.name = \"REFC\"\n da_max.attrs[\"units\"] = \"dBZ\"\n return da_max", "def wabbit_error_vs_flusi(fname_wabbit, fname_flusi, norm=2, dim=2):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n if dim==3:\n print('I think due to fft2usapmle, this routine works only in 2D')\n raise ValueError\n\n # read in flusi's reference solution\n time_ref, box_ref, origin_ref, data_ref = insect_tools.read_flusi_HDF5( fname_flusi )\n print(data_ref.shape)\n ny = data_ref.shape[1]\n\n # wabbit field to be analyzed: note has to be full already\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_wabbit )\n Bs = data.shape[1]\n Jflusi = (np.log2(ny/(Bs-1)))\n print(\"Flusi resolution: %i %i %i so desired level is Jmax=%f\" % (data_ref.shape[0], data_ref.shape[2], data_ref.shape[2], Jflusi) )\n\n if dim==2:\n # squeeze 3D flusi field (where dim0 == 1) to true 2d data\n data_ref = data_ref[0,:,:].copy().transpose()\n box_ref = box_ref[1:2].copy()\n\n # convert wabbit to dense field\n data_dense, box_dense = dense_matrix( x0, dx, data, treecode, dim )\n \n if data_dense.shape[0] < data_ref.shape[0]:\n # both datasets have different size\n s = int( data_ref.shape[0] / data_dense.shape[0] )\n data_ref = data_ref[::s, ::s].copy()\n raise ValueError(\"ERROR! Both fields are not a the same resolutionn\")\n\n if data_dense.shape[0] > data_ref.shape[0]:\n warn(\"WARNING! The reference solution is not fine enough for the comparison! UPSAMPLING!\")\n import fourier_tools\n print(data_ref.shape)\n data_ref = fourier_tools.fft2_resample( data_ref, data_dense.shape[1] )\n\n err = np.ndarray.flatten(data_ref-data_dense)\n exc = np.ndarray.flatten(data_ref)\n\n err = np.linalg.norm(err, ord=norm) / np.linalg.norm(exc, ord=norm)\n print( \"error was e=%e\" % (err) )\n\n return err", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def remove_reference(signal, reference, index=None):\n if np.ndim(signal) != 2:\n raise ValueError(\n \"RemoveReference: EEG signal must be 2D array (channels * times)\"\n )\n if np.ndim(reference) != 1:\n raise ValueError(\"RemoveReference: Reference signal must be 1D array\")\n if np.shape(signal)[1] != np.shape(reference)[0]:\n raise ValueError(\n \"RemoveReference: The second dimension of EEG signal must be \"\n \"the same with the length of reference signal\"\n )\n if index is None:\n signal_referenced = signal - reference\n else:\n if not isinstance(index, list):\n raise TypeError(\n \"RemoveReference: Expected type list, got {} instead\".format(\n type(index)\n )\n )\n signal_referenced = signal.copy()\n signal_referenced[np.asarray(index), :] = (\n signal[np.asarray(index), :] - reference\n )\n return signal_referenced", "def find_reference(self, f, b, x_0, y_0, res=8e-3, calibrate_nr=3, confidence=0.7, plot=False):\n found = False\n counter = 0\n self.PG1['n_pts'] = f + b + 1\n self.PG2['n_pts'] = f + b + 1\n\n self.ref_path_x.append(x_0)\n self.ref_path_y.append(y_0)\n\n while not found:\n print('\\n#####################'\n '\\n Frame nr. {}'.format(counter+1))\n # update measurement information\n self.PG1['start'] = self.ref_path_x[-1] + (b + 0.5) * res\n self.PG1['stop'] = self.ref_path_x[-1] - (f + 0.5) * res\n self.PG1['mean'] = self.PG1['start'] + (self.PG1['stop'] - self.PG1['start']) / 2.\n\n self.PG2['start'] = self.ref_path_y[-1] + (b + 0.5) * res\n self.PG2['stop'] = self.ref_path_y[-1] - (f + 0.5) * res\n self.PG2['mean'] = self.PG2['start'] + (self.PG2['stop'] - self.PG2['start']) / 2.\n\n # define measurement object\n output_path = os.path.join(self.path_out, 'reference_' + str(counter))\n measurement = ScriptTools.MeasurementObject(\n self.path_in,\n output_path\n )\n\n # recalibrate QPC every 3 frames\n if counter % calibrate_nr == 0 or self.sweet_spot is None:\n calibrate = True\n gate_config = None\n\n else:\n calibrate = False\n gate_config = {\n self.gate_names['QPC_G']: self.sweet_spot\n }\n \n # perform measurement / get data\n measurement_signal = self.get_data_(measurement, output_path,\n DQD_log_channel=self.gate_names['I_DQD'],\n calibrate=calibrate, rescale=False,\n config=gate_config)\n self.ref_frames.append(measurement_signal['I_QPC'])\n I_DQD = measurement_signal['I_DQD']\n\n # reshape data in order to make it suitable for classifier\n reshaped_signal = self.ref_frames[-1].reshape((1, f + b, f + b, 1))\n\n # predict occupation state\n occupation = self.occupation_ref_rec.predict(reshaped_signal)[0]\n self.ref_classif.append(occupation)\n print('Classification confidences:\\n{}'.format(occupation))\n print('PG1: {}V\\n'\n 'PG2: {}V'.format(self.ref_path_x[-1], self.ref_path_y[-1]))\n counter += 1\n\n # plot measurement and visualize filters\n if plot:\n grid = plt.GridSpec(20, 20)\n fig = plt.figure()\n ax = plt.subplot(grid[:20, :20])\n axins1 = inset_axes(ax,\n width=\"3%\",\n height=\"100%\",\n loc='lower left',\n bbox_to_anchor=(1.01, 0., 1, 1),\n bbox_transform=ax.transAxes,\n borderpad=0,\n )\n\n im1 = ax.pcolormesh(self.ref_frames[-1][:, :], linewidth=0, rasterized=True)\n cbar = fig.colorbar(im1, cax=axins1)\n ax.axhline(y=16, color='black', linewidth=2)\n ax.axvline(x=16, color='black', linewidth=2)\n ax.plot(self.ref_path_x[-1], self.ref_path_y[-1])\n ax.set_ylim(0, 20)\n ax.set_xlim(0, 20)\n plt.title('Reference {}'.format(counter))\n plt.show()\n\n # If confidence that DQD empty is larger than a certain threshold -> terminate\n # Classification outcome: [1, 0] -> dots occupied, [0, 1] -> dots empty\n if(occupation[1] > confidence and not self.is_current_(I_DQD, threshold=7e-12)):\n found = True\n print(self.ref_path_x[-1])\n print(self.ref_path_y[-1])\n self.occupation_1 = 0\n self.occupation_2 = 0\n self.trans_path_x.append(self.ref_path_x[-1])\n self.trans_path_y.append(self.ref_path_y[-1])\n print('Found a reference point at\\n'\n 'PG1: {}V\\n'\n 'PG2: {}V'.format(self.ref_path_x[-1], self.ref_path_y[-1]))\n\n else:\n self.ref_path_x.append(self.ref_path_x[-1] - f*res/3)\n self.ref_path_y.append(self.ref_path_y[-1] - f*res/3)\n\n return self.ref_path_x[-1], self.ref_path_y[-1]", "def test_verify_reference(perfectModelEnsemble_initialized_control, reference):\n pm = perfectModelEnsemble_initialized_control.generate_uninitialized()\n skill = (\n pm.verify(\n metric=\"rmse\", comparison=\"m2e\", dim=[\"init\", \"member\"], reference=reference\n )\n .expand_dims([\"lon\", \"lat\"])\n .isel(lon=[0] * 2, lat=[0] * 2)\n ) # make geospatial\n if isinstance(reference, str):\n reference = [reference]\n elif reference is None:\n reference = []\n if len(reference) == 0:\n assert \"skill\" not in skill.dims\n else:\n assert skill.skill.size == len(reference) + 1\n # test skills not none\n assert skill.notnull().all()\n assert \"dayofyear\" not in skill.coords", "def test_wrong_ref_power_cqt():\n with raises(FeatureParamsError):\n CQT(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def test_cortical_signal_suppression():\n ave = read_evokeds(fname_evoked)[0]\n eeg_ind = pick_types(ave.info, eeg=True)\n mag_ind = pick_types(ave.info, meg=\"mag\")\n grad_ind = pick_types(ave.info, meg=\"grad\")\n ave.data[mag_ind][0, :] = np.sin(2 * np.pi * 40 * ave.times) * np.mean(\n np.abs(ave.data[mag_ind][0, :])\n )\n ave.data[mag_ind][1, :] = np.sin(2 * np.pi * 239 * ave.times) * np.mean(\n np.abs(ave.data[mag_ind][1, :])\n )\n ave.data[grad_ind][0, :] = np.sin(2 * np.pi * 40 * ave.times) * np.mean(\n np.abs(ave.data[grad_ind][0, :])\n )\n ave.data[eeg_ind][0, :] = np.sin(2 * np.pi * 40 * ave.times) * np.mean(\n np.abs(ave.data[eeg_ind][0, :])\n )\n ave.data[eeg_ind][1, :] = np.sin(2 * np.pi * 239 * ave.times) * np.mean(\n np.abs(ave.data[eeg_ind][1, :])\n )\n ave_f = cortical_signal_suppression(ave)\n cort_power = np.sum(np.abs(ave.data[eeg_ind][0, :]))\n deep_power = np.sum(np.abs(ave.data[eeg_ind][1, :]))\n cort_power_f = np.sum(np.abs(ave_f.data[eeg_ind][0, :]))\n deep_power_f = np.sum(np.abs(ave_f.data[eeg_ind][1, :]))\n rel_SNR_gain = (deep_power_f / deep_power) / (cort_power_f / cort_power)\n assert rel_SNR_gain > 0\n assert ave_f.data.shape == ave.data.shape" ]
[ "0.7477632", "0.6277125", "0.59613264", "0.58670795", "0.55491996", "0.53819615", "0.5299285", "0.5188852", "0.51496625", "0.5102384", "0.50921977", "0.5008404", "0.49466848", "0.4929399", "0.49203682", "0.49124527", "0.48961046", "0.48734668", "0.48694846", "0.48172373", "0.4796227", "0.4750502", "0.4748973", "0.47360262", "0.47298455", "0.4726316", "0.47244924", "0.47217792", "0.47193247", "0.46943516" ]
0.79005593
0
Remove the reference signal from the original EEG signal. This function implements the functionality of the `removeReference` function as part of the PREP pipeline on mne raw object.
def remove_reference(signal, reference, index=None): if np.ndim(signal) != 2: raise ValueError( "RemoveReference: EEG signal must be 2D array (channels * times)" ) if np.ndim(reference) != 1: raise ValueError("RemoveReference: Reference signal must be 1D array") if np.shape(signal)[1] != np.shape(reference)[0]: raise ValueError( "RemoveReference: The second dimension of EEG signal must be " "the same with the length of reference signal" ) if index is None: signal_referenced = signal - reference else: if not isinstance(index, list): raise TypeError( "RemoveReference: Expected type list, got {} instead".format( type(index) ) ) signal_referenced = signal.copy() signal_referenced[np.asarray(index), :] = ( signal[np.asarray(index), :] - reference ) return signal_referenced
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeReference(self, reference: ghidra.program.model.symbol.Reference) -> None:\n ...", "def removeReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_removeReferenceGlyph(self, *args)", "def _remove_reference(self, target):\n assert target in self._referenced_nodes\n assert self in target._referencing_nodes\n self._referenced_nodes.remove(target)\n target._referencing_nodes.remove(self)", "def suppression_article_ref(self, reference) :\n if self.get_Ref == reference:\n del self\n print(\" article supprimer\")", "def remove_refs(self):\n\n self.reference = None\n self.url = None", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def perform_reference(self):\n # Phase 1: Estimate the true signal mean with robust referencing\n self.robust_reference()\n if self.noisy_channels[\"bad_all\"]:\n self.raw.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n self.raw.interpolate_bads()\n self.reference_signal = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n rereferenced_index = [\n self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels\n ]\n self.EEG = self.remove_reference(\n self.EEG, self.reference_signal, rereferenced_index\n )\n\n # Phase 2: Find the bad channels and interpolate\n self.raw._data = self.EEG * 1e-6\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n\n # Record Noisy channels and EEG before interpolation\n self.bad_before_interpolation = noisy_detector.get_bads(verbose=True)\n self.EEG_before_interpolation = self.EEG.copy()\n\n bad_channels = _union(self.bad_before_interpolation, self.unusable_channels)\n self.raw.info[\"bads\"] = bad_channels\n self.raw.interpolate_bads()\n reference_correct = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n self.EEG = self.raw.get_data() * 1e6\n self.EEG = self.remove_reference(\n self.EEG, reference_correct, rereferenced_index\n )\n # reference signal after interpolation\n self.reference_signal_new = self.reference_signal + reference_correct\n # MNE Raw object after interpolation\n self.raw._data = self.EEG * 1e-6\n\n # Still noisy channels after interpolation\n self.interpolated_channels = bad_channels\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.still_noisy_channels = noisy_detector.get_bads()\n self.raw.info[\"bads\"] = self.still_noisy_channels\n return self", "def remove_contact_reference(self):\n self.reference_contact_datetime = None\n self.save()", "def drop_reference_points(self):\n self._cpp_obj.drop_reference_points()\n return self", "def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal", "def remove_rn(reference_node_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.remove_rn\")\n\n last_r = reference_node_name.rfind('R')\n rn_removed = reference_node_name[:last_r]\n\n flg.info(\"Converting {0} to {1}.\".format(reference_node_name, rn_removed))\n return rn_removed", "def removeSpeciesReferenceGlyph(self, *args):\n return _libsbml.ReactionGlyph_removeSpeciesReferenceGlyph(self, *args)", "def deleteReferenceImage(self, name):\n blobName = self._getReferenceImageBlobName(name)\n self.productSearch.productClient.delete_reference_image(name=name)\n self.productSearch.bucket.blob(blobName).delete()", "def remove_reference(self, dataset_id=None):\n if not dataset_id:\n raise aspecd.exceptions.MissingDatasetError\n for index, reference in enumerate(self.references):\n if dataset_id == reference.id:\n del self.references[index]\n break", "def prune_node(self, node, remove_backrefs=False):\n self.nodes = [x for x in self.nodes if x != node]\n if node in self.edges:\n # Remove add edges from this node if we're pruning it.\n self.edges.pop(node)\n\n for fro, connections in self.edges.items():\n # Remove any links to this node (if they exist)\n if node in self.edges[fro]:\n if remove_backrefs:\n # If we should remove backrefs:\n self.edges[fro].remove(node)\n else:\n # Let's raise an Exception\n raise ValueError(\"\"\"Attempting to remove a node with\n backrefs. You may consider setting\n `remove_backrefs` to true.\"\"\")", "def clean(self, ref):\n # NOTE: This currently only works on the top-most frame\n f1 = self.frames[0]\n f2 = ref.frames[0]\n f1.subtract(f2)", "def remove(self, cell, remove_references=True):\n if isinstance(cell, Cell):\n name = cell.name\n else:\n name = cell\n if name in self.cells:\n del self.cells[name]\n removed = 0\n if remove_references:\n for c in self.cells.values():\n removed += len(c.references)\n c.references = [\n ref\n for ref in c.references\n if name\n != (\n ref.ref_cell.name\n if isinstance(ref.ref_cell, Cell)\n else ref.ref_cell\n )\n ]\n removed -= len(c.references)\n return removed", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def removeCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_removeCompartmentReference(self, *args)", "def delete_reference_array(self):\r\n del self.pxarray\r\n return", "def unsetModelRef(self):\n return _libsbml.Submodel_unsetModelRef(self)", "def removeSpeciesReferenceGlyph(self, *args):\n return _libsbml.Layout_removeSpeciesReferenceGlyph(self, *args)", "def unsetModelRef(self):\n return _libsbml.ExternalModelDefinition_unsetModelRef(self)", "def clearReference( r):\r\n if r.ObjType == 3:\r\n try:\r\n r.ClearRef() # from GME8 on\r\n except:\r\n cout( \"Exception while clearing reference: \" + r.Name + \"!\", 3)\r\n raise\r\n return", "def revert(self, ref=None):\n # TODO\n raise NotImplementedError", "def unsetCompartmentReference(self):\n return _libsbml.MultiSimpleSpeciesReferencePlugin_unsetCompartmentReference(self)", "def unsetSubmodelRef(self):\n return _libsbml.Replacing_unsetSubmodelRef(self)", "def remove_reference(type):\n nake_type = remove_alias(type)\n if not is_reference(nake_type):\n return type\n else:\n return nake_type.base", "def remove_edge(self, edge: Edge) -> Edge:", "def remove(self,index=0):\n if index>self.size-1: raise IndexError(\"Index out of range.\")\n elif self.size==1: self.reference=None\n else:\n pointer = self.reference\n for i in range(index): pointer = pointer.next\n pointer.previous.next, pointer.next.previous = pointer.next, pointer.previous\n if index==0: self.reference=self.reference.next\n self.size-=1" ]
[ "0.7395361", "0.618159", "0.6136284", "0.6101331", "0.59427154", "0.58940864", "0.58332074", "0.57739156", "0.5770895", "0.5762816", "0.5671317", "0.56659806", "0.5665375", "0.5660799", "0.5658297", "0.5644026", "0.5601565", "0.5555541", "0.5552319", "0.5550075", "0.5489779", "0.5486409", "0.54856163", "0.5447224", "0.54384947", "0.5435353", "0.543204", "0.5407401", "0.54014987", "0.5390891" ]
0.7985783
0
Sets the buy list for the board
def setBuyList(self, buyList): parsedBuyList = [] for bought in buyList: if hasattr(bought, "unitType"): parsedBuyList.append(bought) elif isinstance(bought, dict) and u'unitType' in bought and u'territory' in bought: parsedBuyList.append(createBoughtUnitFromDict(bought, self.board.territories)) else: raise Exception("Invalid buy list", buyList) sumCost = self.costOfUnits(parsedBuyList) if sumCost <= self.board.currentCountry.money: self.board.buyList = parsedBuyList[:] # copy in buyList return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buys(self, buys):\n\n self._buys = buys", "def set_buy_sell_deal_account(self, account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.buy_sell_deal_account_dropdown_locator, account_list)\n self.wait_for_ajax_spinner_load()", "def set_target_buy_list(self, item_name, is_first_item):\n if is_first_item is True:\n self.single_selection_from_static_kendo_dropdown(self.target_buy_list_kendo_dropdown_arrow_locator, first_item=True)\n else:\n self.single_selection_from_static_kendo_dropdown(self.target_buy_list_kendo_dropdown_arrow_locator, item_name)", "def __init__(self, board):\n self.board = board\n self.tableaus = [] # any exposed card is clickable\n self.foundations = [] # only top card is clickable\n self.waste = [] # only top card is clickable\n self.stock = [] # only top card is clickable", "def set_board(board):", "def sells(self, sells):\n\n self._sells = sells", "def reset_counters_in_list(origin_champs_counters_to_buy_):\n logging.debug(\"Function reset_counters_in_list() called\")\n\n for champ_counter in origin_champs_counters_to_buy_:\n champ_counter.set(0)\n\n delete_all_buttons()\n\n logging.debug(\"Function reset_counters_in_list() end\")", "def set_blists(self, blists):\n self.blists = blists[:]", "def set_self_cross_list(self, self_cross_list):\n self.self_cross_list = self_cross_list\n self.reg_coupled_pair = False", "def set_target_stocks_list(self, list_of_stocks):\n self.target_stocks = list_of_stocks", "def add_buy(self, trade):\n trade = self._format_sql(trade, self.buy_table)\n self.buys[trade['id']] = trade", "def buyTradedVal(self, buyTradedVal):\n\n self._buyTradedVal = buyTradedVal", "def doBuyIn(self):\n self.protocol.sendPacket(networkpackets.PacketPokerBuyIn(amount=self.max_buy_in, **self._serial_and_game_id))\n self.protocol.sendPacket(networkpackets.PacketPokerAutoBlindAnte(**self._serial_and_game_id))", "def set_plugboard(self, lst_buttons):\r\n colors = ['purple', 'yellow', 'blue', 'orange', 'coral4', 'pink', 'cyan',\r\n 'SpringGreen2', 'red', 'green']\r\n used_colors = list(filter(lambda button_bg: button_bg != \"khaki\",\r\n [i['bg'] for i in lst_buttons]))\r\n for i in range(len(lst_buttons)):\r\n if chr(i + 65) not in self.simulator_enigma.plugboard.plugboard1 and \\\r\n chr(i + 65) not in self.simulator_enigma.plugboard.plugboard2:\r\n lst_buttons[i].config(bg=\"khaki\")\r\n\r\n for i in range(len(self.simulator_enigma.plugboard.plugboard1)):\r\n if lst_buttons[ord(self.simulator_enigma.plugboard.plugboard1[i]) - 65]['bg'] \\\r\n == \"khaki\" or \\\r\n lst_buttons[ord(self.simulator_enigma.plugboard.plugboard2[i]) - 65]['bg'] \\\r\n == \"khaki\":\r\n color_index = 0\r\n while used_colors.count(colors[color_index]) == 2:\r\n color_index += 1\r\n lst_buttons[ord(self.simulator_enigma.plugboard.plugboard1[i]) - 65]. \\\r\n config(bg=colors[color_index])\r\n used_colors.append(colors[color_index])\r\n if self.simulator_enigma.plugboard.plugboard2[i] is not None:\r\n lst_buttons[ord(self.simulator_enigma.plugboard.plugboard2[i]) - 65]. \\\r\n config(bg=colors[color_index])\r\n used_colors.append(colors[color_index])", "def __init__(self,board,mode):\n self.bluh=[]\n self.board=board\n self.mode=mode", "def init_shopping(self):\n all_items = self.db.get_items()\n scroll_parent = Window\n sw = self.ids.shopping_wrapper\n for t in all_items:\n item = ItemToBuy()\n state = self.string_to_bool(t[1])\n if t[1] == \"True\":\n item.bcg_clr = [0.5, 0.5, 0.5, 0.5]\n item.is_done.active = 1\n\n item.name = t[2]\n item.size_hint = [None, None]\n item.size = [scroll_parent.width / 1.35, dp(65)]\n\n sw.add_widget(item)", "def test_set_list(buttons):\n app = App()\n app[0, 0] = [buttons[0], buttons[1]]", "def reset(self):\n self.book = {}\n self.book[Trade.WAY_BUY] = []\n self.book[Trade.WAY_SELL] = []", "def set_checklist(twitchid, checklist):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"update mustard.users set checklist=%s where twitchid=%s\", (checklist, twitchid,))", "def slot_choke(self):\n if self.choke:\n _choke = [1 for x in range(8)]\n else:\n _choke = [random.randint(0,4) for x in range(8)]\n \n return _choke", "def setBoard(self, board):\n\t\tself.gameBoard = board", "def brands(self, brands):\n\n self._brands = brands", "def userBuyShipIndex(self, user : bbUser.bbUser, index : int):\n self.userBuyShipObj(user, self.shipsStock[index].item)", "def set_chanlist(self,loc,newchannel):\n # TODO, add checks and illegal arguments to protect Pi\n # TODO actually add the functionality\n # self.chanlist(loc) = newchannel", "def set_coupled_pair_list(self, coupled_pair_list):\n self.coupled_pair_list = coupled_pair_list\n self.reg_coupled_pair = True", "def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)", "def checklists(self, checklists):\n\n self._checklists = checklists", "def before_trading_start(context, data):\n pipe_bbands = algo.pipeline_output('pipe_bbands') \n\n # Find list of symbols to buy/sell.\n context.buy = pipe_bbands[pipe_bbands['buy']].index.tolist()\n context.sell = pipe_bbands[pipe_bbands['sell']].index.tolist()", "def _buy_and_add_ships(player, ships_list, game_data):\n\n # initialisation of the money of the player\n wallet = 100\n\n # separate all the ship bought\n for ship in ships_list.split(' '):\n # separate the name and the type of the ship\n if ship:\n name, ship_type = ship.split(':')\n # substract the price of the ship\n wallet -= game_data['ship_characteristics'][ship_type]['cost']\n if wallet >= 0:\n _add_ship(player, name, ship_type, game_data)", "def save_board_state(self):\n self.board_states.append([copy.deepcopy(self.stock), copy.deepcopy(self.wp), \n copy.deepcopy(self.foundations), copy.deepcopy(self.tableaus)])" ]
[ "0.6422962", "0.6265", "0.5775737", "0.5630926", "0.5596823", "0.5562822", "0.55483556", "0.5545518", "0.5418781", "0.53955275", "0.535245", "0.5310174", "0.5309824", "0.53068644", "0.5252064", "0.52498454", "0.5239531", "0.5211751", "0.5184596", "0.51813936", "0.5175984", "0.5160426", "0.5159727", "0.5149944", "0.5142869", "0.5142178", "0.5141508", "0.51277274", "0.51195157", "0.5119398" ]
0.6954832
0
Converts json string in related object object_to_serialize have to be an instace of the desired to convert object
def DeserializeJson(self, json_string, object_to_serialize): object_to_serialize.__dict__ = json.loads(str(json_string)) return object_to_serialize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_serializable(o: Any) -> Any:\n if isinstance(o, UUID):\n return str(o)\n if isinstance(o, datetime):\n return isoformat(o)\n if is_dataclass(o):\n return asdict(o)\n if hasattr(o, \"__json__\"):\n return o.__json__()\n if hasattr(o, \"to_dict\"):\n # api_client models all have a to_dict function\n return o.to_dict()\n if isinstance(o, BaseModel):\n return o.dict()\n raise TypeError(f\"Could not serialize object of type {o.__class__.__name__} to JSON\")", "def json_serial(obj):\n if isinstance(obj, LegipyModel):\n return obj.to_json()\n elif isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n raise TypeError(\"Type {0} not serializable\".format(repr(type(obj))))", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def json_serial2(self, obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n if isinstance(obj, complex):\n return str(obj)\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n elif isinstance(obj, decimal.Decimal):\n if obj % 1 == 0:\n return int(obj)\n else:\n return float(obj)\n\n elif isinstance(obj, bytes):\n try:\n s = obj.decode()\n return s\n except Exception:\n return str(obj)\n\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return str(obj) #.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\r\n\r\n\t\tif isinstance(obj,(datetime, date)):\r\n\t\t\treturn obj.isoformat()\r\n\t\traise TypeError (\"Type %s not serializable\" % type(obj))", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def json_deserialize(json_object):\n raise NotImplementedError('json_deserialize must be overriden')", "def serialize(self, obj):\n return obj", "def json_serializer(obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n if hasattr(obj, '_asdict'):\n return obj._asdict()", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date,date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def serialise(obj):\n if isinstance(obj, datetime.datetime):\n # maybe assume UTC (as deserialise does the reverse)\n return obj.replace(tzinfo=du_tz.tzutc()).isoformat()\n\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n\n if isinstance(obj, queue.Queue):\n return {}\n\n if isinstance(obj, (pagination.PaginatedResponse, BaseObject)):\n return obj.to_dict()\n\n try:\n return obj.to_dict()\n except AttributeError:\n pass\n\n raise TypeError(\"Object of type '%s' is not JSON serializable\" % obj.__class__.__name__)", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def serialize(self, obj):\n return json.dumps(obj)", "def _deserialize_object(value):\n return value", "def jsonSerial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n if isinstance(obj, enum.Enum):\n return obj.value\n\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def _deserialize(obj):\r\n from thunderdome.models import Element\r\n\r\n if isinstance(obj, dict) and '_id' in obj and '_type' in obj:\r\n return Element.deserialize(obj)\r\n elif isinstance(obj, dict):\r\n return {k:GremlinMethod._deserialize(v) for k,v in obj.items()}\r\n elif isinstance(obj, list):\r\n return [GremlinMethod._deserialize(v) for v in obj]\r\n else:\r\n return obj", "def json_serial(obj):\r\n\r\n if isinstance(obj, (datetime, date)):\r\n return obj.isoformat()\r\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def _json_to_obj(cls, serialized_str):\n json_dict = json.loads(serialized_str)\n if 'metadata' in json_dict.keys():\n metadata_dict = json_dict['metadata']\n return Metadata(metadata_dict)", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n\tif isinstance(obj, (dt.datetime, dt.date)):\n\t\treturn obj.isoformat()\n\traise TypeError (\"Type %s not serializable\" % type(obj))", "def convert_for_json(obj):\n if isinstance(obj, datetime.datetime):\n return obj.__str__()\n return obj" ]
[ "0.68756694", "0.6863126", "0.68572044", "0.68323946", "0.68267304", "0.6792545", "0.67513794", "0.6714095", "0.6697272", "0.66942084", "0.6680018", "0.6673534", "0.6665409", "0.6663698", "0.6646167", "0.6644696", "0.6639211", "0.6623753", "0.66212523", "0.66212523", "0.66212523", "0.66212523", "0.66184986", "0.66094315", "0.6593239", "0.6573212", "0.65650403", "0.65650403", "0.6515611", "0.6515553" ]
0.7826462
0
Constructs a DVR object
def DVR( domain=None, divs=None, classes=None, potential_function=None, g=None, g_deriv=None, scf=False, potential_optimize=False, **base_opts ): return DVRConstructor.construct( domain=domain, divs=divs, classes=classes, potential_function=potential_function, g=g, g_deriv=g_deriv, scf=scf, potential_optimize=potential_optimize, **base_opts )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, dzdt, v, e, D):\n self.V = dzdt\n self.v = v\n self.e = e\n self.D = D\n self.laminar_label = \"Laminar\"\n self.critical_label = \"Critical\"\n self.turbulent_label = \"Turbulent\"", "def __init__(self, dr_ds: DatasetReader) -> None:\n super().__init__()\n\n self.dr_ds = dr_ds\n try:\n self.cmap = dr_ds.colormap(1)\n except ValueError:\n pass\n\n crs = dr_ds.crs\n res = dr_ds.res[0]\n\n with WarpedVRT(dr_ds, crs=crs) as dr:\n minx, miny, maxx, maxy = dr.bounds\n\n mint: float = 0\n maxt: float = sys.maxsize\n\n coords = (minx, maxx, miny, maxy, mint, maxt)\n self.index.insert(0, coords, 'dr')\n\n self._crs = cast(CRS, crs)\n self.res = cast(float, res)", "def __init__(self,x,y,r,vx,vy):\n self.x = x\n self.y = y\n self.r = r\n self.vx = vx\n self.vy = vy", "def __init__(self, m,r,v):\n self.m = m\n self.r = r\n self.v = v\n self.rv = np.array([r,0,0,v])", "def __build(self,vs,ndarray):\n self.v = vs\n self.t = ndarray\n return self", "def __init__(self, model_type=DEFAULT_MODEL_TYPE):\n assert (model_type == 'SVR'), \"Model '{}' is not supported. \" \\\n \"We support only SVR for now.\".format(model_type)\n self._model_type = model_type\n self._model_params = BTCForecast.DEFAULT_SVR_MODEL_PARAMS\n\n # set up SVR pipeline\n self._scaler = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True)\n self._model = SVR(kernel=self._model_params['kernel'],\n epsilon=self._model_params['epsilon'],\n C=self._model_params['c'],\n gamma=self._model_params['gamma'])\n self._pipeline = make_pipeline(self._scaler, self._model)\n self.has_learned = False", "def __init__(self, name=\"uniformvelmodel\"):\n SpatialDBObj.__init__(self, name)\n return", "def __init__(self, vx, vy, vz):\n self.vx = vx\n self.vy = vy\n self.vz = vz", "def __init__(self, vrpdata):\n self.vrpdata = vrpdata\n self.objective = 0\n self.routes = []\n self.solutionValid = False", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def __init__(self, variables, vid, vtype, vname, init, status, timestamp, prec):\n super().__init__()\n self._id = vid\n self._init = init\n self._last_edited = timestamp\n self._last_update = now()\n self._last_changed = now()\n self._name = vname\n self._prec = prec\n self._status = status\n self._type = vtype\n self._variables = variables\n self.isy = variables.isy\n self.status_events = EventEmitter()", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def __init__(self):\n\n super().__init__(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=UnimodalVirtualSensorModel(\n virtual_sensor_model=[\n DoorVirtualSensorModel(modalities={\"image\"}),\n DoorVirtualSensorModel(modalities={\"pos\", \"sensors\"}),\n ],\n state_dim=3,\n ),\n )", "def __init__(self, name, type_name, diameter, radius_of_curvature, elbow_angle, orientation, surface_roughness):\n self.name = name\n self.type = type_name\n self.diameter = diameter\n self.radius_of_curvature = radius_of_curvature\n self.orientation = orientation\n self.surface_roughness = surface_roughness\n self.elbow_angle = elbow_angle\n self.RperD = radius_of_curvature / diameter\n self.surface_roughnessratio = surface_roughness / diameter", "def __init__(self, model):\n if model == \"biblis\" :\n data = [1.3200e+00, 2.7720e-01, 2.6562e-03, \\\n 7.1596e-02, 0.00000000, 0.00000000, \\\n 2.3106e-02] \n else :\n raise Exception(\"Reflector model not available\")\n self.model = model\n # Default group data.\n self.D1 = data[0]\n self.D2 = data[1]\n self.A1 = data[2]\n self.A2 = data[3] \n self.F1 = data[4]\n self.F2 = data[5] \n self.S12 = data[6] \n self.R1 = self.A1 + self.S12", "def __init__(self, description, vdma=None):\n super().__init__(description)\n self._vdma = vdma\n self._color = self.color_convert\n self._pixel = self.pixel_unpack\n self._hdmi = self.frontend", "def _createVetor(cls, elem):\n return cls(elem)", "def __init__(self, RV, name=None, description=''):\n self.RV = RV\n self.name = name or RV\n self.description = description\n\n # A node needs to know its parents in order to determine the shape of\n # its CPT. This should be a list of Nodes.\n self._parents: List[Node] = []\n\n # For purposes of message passing, a node also needs to know its\n # children.\n self._children: List[Node] = []", "def __init__(self, name, type_name, delta, b, D):\n self.name = name\n self.type = type_name\n self.length = 0\n self.delta = delta\n self.b = b\n self.diameter = D", "def __init__(self, name, typing, reflection, year):#Taking in parameters\n self.n = name#Assigning variables\n self.t = typing\n self.r = reflection\n self.y = year", "def __init__(self):\n super().__init__()\n self.dmdParams = {} # dmd settings container\n self.printTag = 'DMD' # print tag\n self._dynamicHandling = True # This ROM is able to manage the time-series on its own. No need for special treatment outside\n self.pivotParameterID = None # pivot parameter\n # variables filled up in the training stages\n self._amplitudes = {} # {'target1': vector of amplitudes,'target2':vector of amplitudes, etc.}\n self._eigs = {} # {'target1': vector of eigenvalues,'target2':vector of eigenvalues, etc.}\n self._modes = {} # {'target1': matrix of dynamic modes,'target2':matrix of dynamic modes, etc.}\n self.__Atilde = {} # {'target1': matrix of lowrank operator from the SVD,'target2':matrix of lowrank operator from the SVD, etc.}\n self.pivotValues = None # pivot values (e.g. time)\n self.KDTreeFinder = None # kdtree weighting model\n self.timeScales = {} # time-scales (training and dmd). {'training' and 'dmd':{t0:float,'dt':float,'intervals':int}}\n self.featureVals = None # feature values", "def _VRF(self) -> array:\n pass", "def __init__(self, name, volume_id, roi, electric_field, create_S2, xe_density=2.862):\n self.name = name\n self.volume_id = volume_id\n self.roi = roi\n\n self.electric_field = electric_field\n self.xe_density = xe_density\n self.create_S2 = create_S2\n self._is_valid()", "def cdd_Vrepresentation(self):\n return cdd_Vrepresentation(self._cdd_type, \n self.vertices(),\n [r for r in self.ray_generator()],\n [l for l in self.line_generator()] )", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Drone, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.name is None:\n self.name = ''\n if self.type_name is None:\n self.type_name = ''\n if self.home is None:\n self.home = flyaq.msg.Coordinate()\n if self.movements is None:\n self.movements = []\n if self.move_transitions is None:\n self.move_transitions = []\n if self.slot_names is None:\n self.slot_names = []\n if self.travel_mode is None:\n self.travel_mode = 0\n else:\n self.name = ''\n self.type_name = ''\n self.home = flyaq.msg.Coordinate()\n self.movements = []\n self.move_transitions = []\n self.slot_names = []\n self.travel_mode = 0", "def __init__(\n self, name=None, functions=None, domain=None, density=None,\n ):\n if functions is None:\n functions = []\n if domain is None:\n domain = Domain()\n if density is None:\n density = Density()\n if name is None:\n name = \"(no name)\"\n\n self.name = name\n self.functions = functions\n self.domain = domain\n self.density = density\n\n self.update()", "def __init__(\n self,\n lattice_resolution: int = None,\n number_of_objectives: int = None,\n creation_type: str = \"Uniform\",\n vector_type: str = \"Spherical\",\n ref_point: list = None,\n ):\n\n self.number_of_objectives = number_of_objectives\n self.lattice_resolution = lattice_resolution\n self.number_of_vectors = 0\n self.creation_type = creation_type\n self.vector_type = vector_type\n self.values = []\n self.values_planar = []\n self.ref_point = [1] * number_of_objectives if ref_point is None else ref_point\n self._create(creation_type)\n self.initial_values = np.copy(self.values)\n self.initial_values_planar = np.copy(self.values_planar)\n self.neighbouring_angles()\n # self.iteractive_adapt_1() Can use this for a priori preferences!", "def __init__(self, id, x, y, z):\n self.id = id\n self._dof_x = Dof(id=(id, 'u'), value=x)\n self._dof_y = Dof(id=(id, 'v'), value=y)\n self._dof_z = Dof(id=(id, 'w'), value=z)", "def __init__(self, D, K):\n\t\tself.D = D \n\t\tself.K = K \n\t\tself.V = np.zeros((D+1,K))\n\t\treturn", "def __init__(\n self,\n front_left_vertex,\n front_right_vertex,\n back_left_vertex,\n back_right_vertex,\n strength,\n ):\n\n self.front_left_vertex = front_left_vertex\n self.front_right_vertex = front_right_vertex\n self.back_left_vertex = back_left_vertex\n self.back_right_vertex = back_right_vertex\n self.strength = strength\n\n # Initialize the line vortices that make up the ring vortex.\n self.front_leg = LineVortex(\n origin=self.front_right_vertex,\n termination=self.front_left_vertex,\n strength=self.strength,\n )\n self.left_leg = LineVortex(\n origin=self.front_left_vertex,\n termination=self.back_left_vertex,\n strength=self.strength,\n )\n self.back_leg = LineVortex(\n origin=self.back_left_vertex,\n termination=self.back_right_vertex,\n strength=self.strength,\n )\n self.right_leg = LineVortex(\n origin=self.back_right_vertex,\n termination=self.front_right_vertex,\n strength=self.strength,\n )\n\n # Initialize a variable to hold the centroid of the ring vortex.\n self.center = ps.geometry.centroid_of_quadrilateral(\n self.front_left_vertex,\n self.front_right_vertex,\n self.back_left_vertex,\n self.back_right_vertex,\n )" ]
[ "0.61528945", "0.60658884", "0.599728", "0.59843045", "0.59223074", "0.5918787", "0.5913648", "0.5847089", "0.5769131", "0.5743365", "0.57272416", "0.57149744", "0.57125163", "0.5688939", "0.56877244", "0.5670665", "0.56602484", "0.56357646", "0.5623393", "0.5620744", "0.5612004", "0.5606798", "0.5602032", "0.56006205", "0.5591343", "0.5587323", "0.5578491", "0.55619115", "0.5560981", "0.5558533" ]
0.7178919
0
Convert the ``Response`` object into django's ``HttpResponse``
def _finalize_response(self, response): res = HttpResponse(content=response.content, content_type=self._get_content_type()) # status_code is set separately to allow zero res.status_code = response.code return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_http_response(self) -> HttpResponse:\n response = (\n JsonResponse(self.body)\n if (self.headers or {}).get(\"Content-Type\") == \"application/json\"\n else HttpResponse(self.body)\n )\n response.headers = self.headers\n return response", "def make_response(self, rv):\n status_or_headers = headers = None\n if isinstance(rv, tuple):\n rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))\n\n if rv is None:\n raise ValueError('View function did not return a response')\n\n if isinstance(status_or_headers, (dict, list)):\n headers, status_or_headers = status_or_headers, None\n\n if not isinstance(rv, self.response_class):\n if isinstance(rv, six.text_type):\n rv = self.response_class(rv, status=status_or_headers)\n else:\n raise ValueError('Content must be a string')\n\n if status_or_headers is not None:\n if isinstance(status_or_headers, six.text_type):\n # FIXME: I'm pretty sure Django's reason_phrase is *just* the\n # 'OK' in '200 OK', whereas Flask allows passing '200 OK'\n rv.reason_phrase = status_or_headers\n else:\n rv.status = status_or_headers\n\n if headers:\n # HttpResponse doesn't take a headers kwarg, so we must set each\n # header manually with rv[header] = value\n if isinstance(headers, dict):\n headers_iter = six.iteritems(headers)\n elif isinstance(headers, list):\n headers_iter = headers\n else:\n raise ValueError('headers must be dict, list, or None')\n\n for header, value in headers_iter:\n rv[header] = value\n\n return rv", "def finalize_response(self, request, response, *args, **kwargs):\n\t\t# Make the error obvious if a proper response is not returned\n\t\tassert isinstance(response, BaseResponse), (\n\t\t\t'Expected a `Response` object '\n\t\t\t'to be returned from the view, but received a `%s`'\n\t\t\t% type(response)\n\t\t)\n\t\treturn response", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def get_django_response(proxy_response, strict_cookies=False):\n content = proxy_response\n response = HttpResponse(content, status=200)\n return response", "def _prepare_response(self, response):\n\n if not isinstance(response, Response):\n return Response(0, response)\n return response", "def to_response(self):\n return make_response(self.res, self.status)", "def to_response(self):\n return make_response(self.res, self.status)", "def _http_response(response, http_status_code):\n return make_response(jsonify(response), http_status_code)", "def get_final_response(self,request,response):\n return response", "def process_response(self, response):\n return response", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def get_django_response(proxy_response, strict_cookies=False):\n status = proxy_response.status\n headers = proxy_response.headers\n\n logger.debug('Proxy response headers: %s', headers)\n\n content_type = headers.get('Content-Type')\n\n logger.debug('Content-Type: %s', content_type)\n\n if should_stream(proxy_response):\n logger.info('Content-Length is bigger than %s', DEFAULT_AMT)\n response = StreamingHttpResponse(proxy_response.stream(DEFAULT_AMT),\n status=status,\n content_type=content_type)\n else:\n content = proxy_response.data or b''\n response = HttpResponse(content, status=status,\n content_type=content_type)\n\n logger.info('Normalizing response headers')\n set_response_headers(response, headers)\n\n logger.debug('Response headers: %s', getattr(response, '_headers'))\n\n cookies = proxy_response.headers.getlist('set-cookie')\n logger.info('Checking for invalid cookies')\n for cookie_string in cookies:\n cookie_dict = cookie_from_string(cookie_string,\n strict_cookies=strict_cookies)\n # if cookie is invalid cookie_dict will be None\n if cookie_dict:\n response.set_cookie(**cookie_dict)\n\n logger.debug('Response cookies: %s', response.cookies)\n\n return response", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def _process_response(self, request, response):\n if http_utils.is_ajax(request) and hasattr(request, 'horizon'):\n queued_msgs = request.horizon['async_messages']\n if type(response) == http.HttpResponseRedirect:\n # Drop our messages back into the session as per usual so they\n # don't disappear during the redirect. Not that we explicitly\n # use django's messages methods here.\n for tag, message, extra_tags in queued_msgs:\n getattr(django_messages, tag)(request, message, extra_tags)\n if response['location'].startswith(settings.LOGOUT_URL):\n redirect_response = http.HttpResponse(status=401)\n # This header is used for handling the logout in JS\n redirect_response['logout'] = True\n if self.logout_reason is not None:\n utils.add_logout_reason(\n request, redirect_response, self.logout_reason,\n 'error')\n else:\n redirect_response = http.HttpResponse()\n # Use a set while checking if we want a cookie's attributes\n # copied\n cookie_keys = {'max_age', 'expires', 'path', 'domain',\n 'secure', 'httponly', 'logout_reason'}\n # Copy cookies from HttpResponseRedirect towards HttpResponse\n for cookie_name, cookie in response.cookies.items():\n cookie_kwargs = dict((\n (key, value) for key, value in cookie.items()\n if key in cookie_keys and value\n ))\n redirect_response.set_cookie(\n cookie_name, cookie.value, **cookie_kwargs)\n redirect_response['X-Horizon-Location'] = response['location']\n upload_url_key = 'X-File-Upload-URL'\n if upload_url_key in response:\n self._copy_headers(response, redirect_response,\n (upload_url_key, 'X-Auth-Token'))\n return redirect_response\n if queued_msgs:\n # TODO(gabriel): When we have an async connection to the\n # client (e.g. websockets) this should be pushed to the\n # socket queue rather than being sent via a header.\n # The header method has notable drawbacks (length limits,\n # etc.) and is not meant as a long-term solution.\n response['X-Horizon-Messages'] = json.dumps(queued_msgs)\n return response", "def get_response(self):\r\n response = self.response\r\n return response", "def from_sync_httpx_response(cls, httpx_response, target, **kwargs):\n return httpcore.Response(\n status=httpx_response.status_code,\n headers=httpx_response.headers.raw,\n content=httpx_response.stream,\n extensions=httpx_response.extensions,\n )", "def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):\n desired_format = self.determine_format(request)\n serialized = self.serialize(request, data, desired_format)\n return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)", "def serialize_response(self, response):\n raise NotImplementedError()", "def finalize_response(self, response):\n if self.request.is_ajax() and response.status_code == 302:\n if self.ajax_catch_redirects:\n return http.HttpResponse(\n json.dumps(\n {\n 'redirect': response['location'],\n 'result': self.result_text,\n }\n ),\n content_type='application/json',\n )\n return response", "def get_json_response(obj):\n return HttpResponse(json.dumps(obj))", "def get_json_response(obj):\n return HttpResponse(json.dumps(obj))", "def json_response(obj):\n return HttpResponse(json.dumps(obj), content_type=\"application/json\")", "def dispatch(self, request, *args, **kwargs):\n # Wrap the dispatch method, so that we autoencode JSON\n response = super(JSONRestView, self).dispatch(request, *args, **kwargs)\n # If this is not an HTTPResponseBase object (Base class for responses) \n if not isinstance(response, HttpResponseBase):\n response = json_response(response)\n\n return response", "def get_json_response(self, content, **httpresponse_kwargs):\n\t\treturn HttpResponse(content,\n\t\t\t\t\t\t\t\t content_type='application/json',\n\t\t\t\t\t\t\t\t **httpresponse_kwargs)", "def get_response(self, request):\n data = self.get_data(request)\n outrepr = self.get_outrepr(request)\n return outrepr(data)" ]
[ "0.7379816", "0.7109294", "0.69839066", "0.6952941", "0.6952941", "0.6925957", "0.6925957", "0.69120204", "0.68229306", "0.6809943", "0.6809943", "0.6740994", "0.6736841", "0.6683467", "0.66039944", "0.65061057", "0.64141685", "0.640344", "0.6377596", "0.634833", "0.6296918", "0.6275522", "0.6235488", "0.619169", "0.61888534", "0.61888534", "0.6176314", "0.61546004", "0.6145264", "0.61393183" ]
0.759568
0
Return ContentType header with charset info.
def _get_content_type(self): return '%s; charset=%s' % (self.content_type, self.charset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def charset(self) -> Optional[str]:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_dict.get(\"charset\") # type: ignore[union-attr]", "def content_type_header(request: Request) -> str:\n return request.content_type", "def get_content_type_and_encoding(content_type_header):\n\tif not content_type_header:\n\t\treturn (None, None)\n\t\n\th_parts = content_type_header.split(';')\n\tcontent_type = h_parts[0]\n\tpage_encoding = None\n\tfor h_part in h_parts[1:]:\n\t\th_part = h_part.strip()\n\t\tif h_part.lower().startswith('charset='):\n\t\t\tpage_encoding = h_part[8:]\n\treturn (content_type, page_encoding,)", "def declared_encoding(self) -> Optional[str]:\n content_type = self.get(\"Content-Type\", \"\")\n return http_content_type_encoding(content_type)", "def content_type(self):\n return self._headers['CONTENT-TYPE']", "def content_type(self):\n return self._headers.get(\"content-type\")", "def build_content_type(fmt, encoding='utf-8'):\r\n if 'charset' in fmt:\r\n return fmt\r\n\r\n return \"%s; charset=%s\" % (fmt, encoding)", "def get_charset(request):\n\n content_type = request.META.get('CONTENT_TYPE', None)\n if content_type:\n return extract_charset(content_type) if content_type else None\n else:\n return None", "def content_type(self) -> str:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_type # type: ignore[return-value]", "def extract_charset(content_type):\n\n match = charset_pattern.match(content_type)\n return match.group(1) if match else None", "def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type", "def get_content_type():\n return {'Content-type': 'application/json', 'Accept': 'text/plain'}", "def extractCharset(response, default='utf-8'):\n\n charset = default\n if 'content-type' in response.headers:\n for item in response.headers['content-type'].split(';'):\n if item.strip().startswith('charset'):\n charset = item.split('=')[1].strip()\n break\n return charset", "def build_content_type(format, encoding='utf-8'):\n if 'charset' in format:\n return format\n \n return \"%s; charset=%s\" % (format, encoding)", "def CONTENT_TYPE(self):\n return self.content_type", "def build_content_type(format, encoding='utf-8'):\n if 'charset' in format:\n return format\n\n return \"%s; charset=%s\" % (format, encoding)", "def CONTENT_TYPE(self):", "def encoding(response: tornado.httpclient.HTTPResponse) -> str:\n if 'Content-Encoding' in response.headers:\n return response.headers['Content-Encoding'].decode()\n elif 'Content-Type' in response.headers:\n headers = email.message_from_string('Content-Type: ' +\n response.headers['Content-Type'])\n return headers.get_param('charset', 'utf-8')\n else:\n return 'utf-8'", "def content_type(self) -> str:\n return pulumi.get(self, \"content_type\")", "def content_type(self):\r\n return self.__content_type", "def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"", "def content_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"content_type\")", "def content_type(self):\n return self._content_type", "def get_content_type(self):\n if \"Content-Type\" not in self.headers:\n return None\n\n content_type = self.content_type\n\n # NOTE(markmc): text/plain is the default for eventlet and\n # other webservers which use mimetools.Message.gettype()\n # whereas twisted defaults to ''.\n if not content_type or content_type == 'text/plain':\n return None\n\n if content_type not in SUPPORTED_CONTENT_TYPES:\n raise exception.InvalidContentType(content_type=content_type)\n\n return content_type", "def content_type(self, value):\n if value:\n self._headers['CONTENT-TYPE'] = value\n else:\n self.del_header('CONTENT-TYPE')", "def content_type(self):\n return self.__content_type", "def getContentType(self):\n return self.content_type", "def __set_content_type(self):\n if self.headers is None:\n return\n\n content_type = self.headers.get(\"content-type\", None)\n\n if content_type is None:\n return\n if \";\" in content_type:\n content_type_parts = content_type.split(\";\")\n\n if len(content_type_parts) == 2:\n self.__content_type = content_type_parts[0]\n else:\n self.__content_type = content_type", "def __set_content_type(self):\r\n if self.headers is None:\r\n return\r\n\r\n content_type = self.headers.get(\"content-type\", None)\r\n\r\n if content_type is None:\r\n return\r\n if \";\" in content_type:\r\n content_type_parts = content_type.split(\";\")\r\n\r\n if len(content_type_parts) == 2:\r\n self.__content_type = content_type_parts[0]\r\n else:\r\n self.__content_type = content_type", "def content_type(self):\n return self.environ.get('CONTENT_TYPE') or 'application/octet-stream'" ]
[ "0.7331894", "0.71138537", "0.6978401", "0.6755701", "0.6664483", "0.6629774", "0.6557151", "0.65387005", "0.65190446", "0.64697444", "0.64657295", "0.6446364", "0.642422", "0.6410978", "0.63235193", "0.6279511", "0.6264188", "0.623511", "0.6197167", "0.61729234", "0.6166692", "0.6113517", "0.60943377", "0.6070859", "0.60630924", "0.60603076", "0.6055843", "0.60369766", "0.60036373", "0.5972495" ]
0.74809164
0
Initialize the manager. The ``_datamappers`` dictionary is initialized here to make testing easier.
def __init__(self): self._datamappers = { '*/*': DataMapper() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def do_init(self):\n\n pass", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def initialize():\n manager.initialize()\n logs.exit_great_success()", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def setup(self):\n\n logger.info('Setting up SimulatedMaps module.')\n\n # Save the cls as a class attribute\n self.cls = self.read_cls()\n\n logger.info('Setup done!')", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def memb_init(self):\n self.initialize()", "def __init__(self, data_manager, response_variable=None):\n\n self._is_init = False\n\n if not isinstance(data_manager, DataManager):\n raise TypeError(\"data_manager must be type data.DataManager\")\n\n self._data_manager = copy.deepcopy(data_manager)\n\n variable_names = data_manager.get_variable_names()\n\n # set the response variable\n self._response_variable = None\n if not response_variable:\n response_variable = variable_names[0]\n self.set_response_variable(response_variable)\n\n # initialize the explanatory variables attribute\n self._explanatory_variables = tuple(variable_names[1:])\n\n # noinspection PyUnresolvedReferences\n self._excluded_observations = pd.DatetimeIndex([], name='DateTime')\n self._model_dataset = pd.DataFrame()\n self._model_data_origin = pd.DataFrame(columns=['variable', 'origin'])\n\n # initialize the model attribute\n self._model = None\n\n self._is_init = True", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n actor_actor_matrix_obj.fetchActorActorSimilarityMatrix()", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)" ]
[ "0.6461124", "0.6418458", "0.6413563", "0.6375085", "0.6290806", "0.6228817", "0.6185347", "0.6185347", "0.6185347", "0.61723155", "0.61663306", "0.6159628", "0.6139021", "0.6131483", "0.61117107", "0.6094118", "0.6062833", "0.6062833", "0.6062833", "0.6062833", "0.6062833", "0.60238105", "0.60238105", "0.60238105", "0.60238105", "0.60238105", "0.60238105", "0.60238105", "0.60238105", "0.6022567" ]
0.7486393
0
Select appropriate formatter based on the request.
def select_formatter(self, request, resource): # 1. get from resource if resource.mapper: return resource.mapper # 2. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from accept header mapper_name = self._get_name_from_accept(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _determine_format(self, request):\n return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)", "def determine_format(request, serializer, default_format='application/json'):\r\n # First, check if they forced the format.\r\n if request.GET.get('format'):\r\n if request.GET['format'] in serializer.formats:\r\n return serializer.get_mime_for_format(request.GET['format'])\r\n\r\n # Try to fallback on the Accepts header.\r\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\r\n formats = list(serializer.supported_formats) or []\r\n # Reverse the list, because mimeparse is weird like that. See also\r\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\r\n # more information.\r\n formats.reverse()\r\n best_format = mimeparse.best_match(\r\n formats, request.META['HTTP_ACCEPT'])\r\n\r\n if best_format:\r\n return best_format\r\n\r\n # No valid 'Accept' header/formats. Sane default.\r\n return default_format", "def determine_format(request, serializer, default_format='application/json'):\n # First, check if they forced the format.\n if request.GET.get('format'):\n if request.GET['format'] in serializer.formats:\n return serializer.get_mime_for_format(request.GET['format'])\n \n # If callback parameter is present, use JSONP.\n if request.GET.has_key('callback'):\n return serializer.get_mime_for_format('jsonp')\n \n # Try to fallback on the Accepts header.\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\n formats = list(serializer.supported_formats) or []\n # Reverse the list, because mimeparse is weird like that. See also\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\n # more information.\n formats.reverse()\n best_format = mimeparse.best_match(formats, request.META['HTTP_ACCEPT'])\n \n if best_format:\n return best_format\n \n # No valid 'Accept' header/formats. Sane default.\n return default_format", "def _get_format(self, request):\n\n # Derive a list of 'formats.Format' instances from the list of formats these views support.\n supported_formats = [formats.find(format) for format in self.supported_formats]\n\n # Determine format by extension...\n if '.' in request.path:\n extension = request.path.split('.')[-1]\n\n try:\n format = formats.find_by_extension(extension)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # Determine format by HTTP Accept header...\n if 'HTTP_ACCEPT' in request.META:\n content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])\n\n # Only consider 'accept' headers with a single format in an attempt to play nice\n # with browsers that ask for formats they really should not want.\n if len(content_types) == 1:\n content_type = content_types[0]\n\n # If the request has no preference as to the format of its response, prefer the\n # first of the view's supported formats.\n if content_type == '*/*':\n return supported_formats[0]\n\n try:\n format = formats.find_by_content_type(content_type)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # If no format is given by either extension or header, default to the format given in\n # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).\n if DEFAULT_FORMAT:\n format = formats.find(DEFAULT_FORMAT)\n\n if format in supported_formats:\n return format\n else:\n return None", "def get_format(request, default='html'):\n format_ = request.GET.get('format', None)\n if not format_:\n format_ = request.GET.get('view', default)\n return format_", "def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter", "def initialize_formatter(config):\n if config.json: # pylint: disable=R1705\n return formatters.JsonFormatter()\n elif config.severity: # pylint: disable=R1705\n return formatters.SeverityFormatter(config.colored)\n return formatters.Formatter(config.colored)", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def default_formatter(self, data):\n return data", "def set_formatter(self, formatter):\n self.format = formatter", "def formatter(formatter_name):\n\n def _formatter_decorator(f):\n def _formatter_wrapper(*wrapper_args, **wrapper_kwargs):\n ctx = wrapper_args[1]\n if not ctx.json and formatter_name in _formatter_functions:\n ctx.format_function = _formatter_functions[formatter_name]\n return f(*wrapper_args, **wrapper_kwargs)\n\n return _formatter_wrapper\n\n return _formatter_decorator", "def formatter(question: dict):\n fmt = question.get('possibilities', {}).get('format')\n if fmt == 'date':\n return pd.to_datetime\n elif fmt == 'num':\n return lambda x: x\n else:\n raise ValueError(f\"Question format {fmt} unknown\")", "def get_request_format():\n # if the user specifies a `format` HTTP parameter, use that\n mimetype = request.args.get('format', '').strip() or \\\n request.accept_mimetypes.best\n if not mimetype:\n return 'html' # default\n mimetype = mimetype.lower()\n choices = {\n 'application/json': 'json',\n 'text/javascript': 'json',\n 'application/twiml': 'twiml',\n 'text/html': 'html',\n 'text/plain': 'text',\n }\n if mimetype in choices:\n return choices[mimetype]\n bits = mimetype.split(\"/\")\n if len(bits) == 2:\n return bits[-1]\n return mimetype", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")", "def _config_formatter(self):\n filter = int( self.markup_filter )\n if filter == self.MARKUP_MARKDOWN:\n return { 'filter_name':'markdown' }\n elif filter == self.MARKUP_MARKDOWN_CODEHILITE:\n return { 'filter_name' : 'markdown',\n 'extensions' : [ 'codehilite' ] }\n elif filter == self.MARKUP_REST:\n return { 'filter_name':'restructuredtext' }\n elif filter == self.MARKUP_TEXTILE:\n return { 'filter_name' : 'textile' }\n else:\n raise ValueError( 'Invalid option for Entry.markup_filter' )", "def determine_emitter(self, request, *args, **kwargs):\n em = kwargs.pop(\"emitter_format\", None)\n\n if not em:\n em = request.GET.get(\"format\", \"json\")\n\n return em", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def get_format(self):\n pass", "def cli_formatter(self, data):\r\n if data:\r\n self._generic_cli_formatter(self.Response, data)", "def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def _get_format(self, token):\n if token in self._formats:\n return self._formats[token]\n\n if self._style is None:\n result = self._get_format_from_document(token, self._document)\n else:\n result = self._get_format_from_style(token, self._style)\n\n self._formats[token] = result\n return result", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def determine_emitter(cls, request):\r\n default_emitter = cls._meta.emitters[0]\r\n if not request:\r\n return default_emitter\r\n\r\n if request.method == 'OPTIONS':\r\n return JSONEmitter\r\n\r\n accept = request.META.get('HTTP_ACCEPT', '*/*')\r\n if accept == '*/*':\r\n return default_emitter\r\n\r\n base_format = mimeparse.best_match(cls._meta.emitters_dict.keys(),\r\n accept)\r\n return cls._meta.emitters_dict.get(\r\n base_format,\r\n default_emitter)", "def format(self, value) -> None:\n\n if value.upper() not in ['JSON', 'XML']:\n raise ValueError(\n 'Incorrect format, please set to either `XML` or `JSON`.'\n )\n\n self._format = value.upper()", "def init_logging(log_format: str='default', level: str='INFO') -> Union[DefaultFormatter, DebugFormatter]:\n stream_handler = logging.StreamHandler()\n if log_format == 'default':\n formatter = DefaultFormatter\n elif log_format == 'human':\n formatter = DebugFormatter\n else:\n raise ValueError('Unrecognized Format: {}'.format(log_format))\n stream_handler.setFormatter(formatter())\n ROOT_LOGGER.addHandler(stream_handler)\n ROOT_LOGGER.setLevel(level)\n return formatter", "def set_format_by_type(self, value, format):\n self.set_render_func_by_type(value, format.format)", "def get_renderers(self, request):\n if self._format_override_parameter in request.REQUEST:\n formats = request.REQUEST[self._format_override_parameter].split(',')\n renderers, seen_formats = [], set()\n for format in formats:\n if format in self._renderers_by_format and format not in seen_formats:\n renderers.extend(self._renderers_by_format[format])\n elif request.META.get('HTTP_ACCEPT'):\n accepts = self.parse_accept_header(request.META['HTTP_ACCEPT'])\n renderers = MediaType.resolve(accepts, self._renderers)\n elif self._default_format:\n renderers = self._renderers_by_format[self._default_format]\n else:\n renderers = []\n if self._force_fallback_format:\n renderers.extend(self._renderers_by_format[self._force_fallback_format])\n return renderers" ]
[ "0.70516115", "0.68860674", "0.67620766", "0.6718547", "0.641668", "0.62356836", "0.6230665", "0.61402184", "0.61402184", "0.6086643", "0.6054365", "0.59921783", "0.59283495", "0.5847288", "0.58354276", "0.5824539", "0.57529914", "0.5744481", "0.5692539", "0.569212", "0.564786", "0.5626562", "0.5588993", "0.558028", "0.5545742", "0.55029684", "0.54785", "0.5444825", "0.543519", "0.54083747" ]
0.7539723
0
Select appropriate parser based on the request.
def select_parser(self, request, resource): # 1. get from resource if resource.mapper: return resource.mapper # 2. get from content type mapper_name = self._get_name_from_content_type(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_parser():\n\n try:\n select_texttools_parser()\n except ImportError:\n select_python_parser()", "def get_parser(self):\n if self.vendor and self.platform and self.version:\n cls = self.profile.get_profile().get_parser(\n self.vendor.code, self.platform.name, self.version.version\n )\n if cls:\n return get_handler(cls)(self)\n return get_handler(\"noc.cm.parsers.base.BaseParser\")(self)", "def get_first_available_parser():\n if sys.platform == 'cli':\n try:\n from bridge.parser.bridge_dotnet import Parser\n return Parser\n except ImportError:\n pass\n elif sys.platform[:4] == 'java':\n try:\n from bridge.parser.bridge_java import Parser\n return Parser\n except ImportError:\n pass\n \n from bridge.parser.bridge_default import Parser\n \n return Parser", "def _parser(self, request, *args, **kwargs):\n\n self.request = request\n\n # parse header\n self.header = {k[5:]: v for k, v in request.META.items() if k.startswith('HTTP_')}\n self.header['CONTENT_TYPE'] = request.META.get('CONTENT_TYPE')\n\n # parse boby\n if request.method not in ['GET', 'HEAD']:\n\n # TODO: serve other body format\n if 'multipart/form-data' in self.header['CONTENT_TYPE']:\n self.body = request.POST.dict()\n\n else:\n # default: application/json\n if self.request.body:\n try:\n self.body = json.loads(self.request.body)\n except Exception as e:\n raise Exception('parse json body error')\n \n # parse query\n self.query = request.GET.dict()\n\n # parse cookie\n self.cookie = {k: v for k, v in request.COOKIES.items()}", "def _get_parser(self, language: str):\n parser = None\n if language:\n parser = self.parsers.get(language)\n\n if not parser:\n self.log.warning(f\"Content parser for {language} is not available.\")\n return parser", "def select_python_parser():\n\n global selected_parser\n import cvs2svn_rcsparse.default\n selected_parser = cvs2svn_rcsparse.default.Parser", "def __init__(self, parser):\n if parser == \"csv\":\n self._parser = CSVParser()\n elif parser == \"static\":\n self._parser = StaticParser()\n else:\n raise NotImplementedError", "def get_parser_for_file_type(file_type):\n parser = file_type.upper()\n if file_type not in SUPPORTED_PARSERS:\n parser = 'XML'\n return parser", "def get_parser(project, layoutdir):\n for parser_class in PARSERS:\n parser = parser_class(project, layoutdir)\n if parser.can_load():\n return parser\n raise ValueError(\"No loader available for '{0}'.\".format(project))", "def _get_parser(filepath, cfg):\n if not os.path.isfile(filepath):\n LOG.error('File not found: %s', filepath)\n return\n valid_parsers = importer.get_parsers(filepath, cfg)\n if not valid_parsers:\n LOG.error('No parsers found for file: %s', filepath)\n return\n\n if len(valid_parsers) > 1:\n while True:\n print('More than one valid parser found. '\n 'Please select which one to use:')\n for idx, vp in enumerate(valid_parsers):\n print('[{}] {}'.format(idx, vp.__name__))\n inp = input()\n try:\n parser = valid_parsers[inp]\n break\n except (IndexError, TypeError):\n print('Invalid input. Please select the parser number.')\n else:\n parser = valid_parsers[0]\n\n return parser", "def parser(self, q, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'parser')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def GetParserObjectByName(cls, parser_name):\n parser_class = cls._parser_classes.get(parser_name, None)\n if not parser_class:\n return\n return parser_class()", "def get_parser_for_uri(uri):\n path = uri_to_path(uri)\n parser = get_extension(path)\n\n if parser not in SUPPORTED_PARSERS:\n parser = 'XML'\n\n return parser", "def parser_dispatch(lang=\"cpp\"):\n parser_cls = {\n \"cpp\": cpp.Parser,\n \"c\": c.Parser,\n }.get(lang)\n return parser_cls", "def parse_from_request(self, name, request):\n # type: (str, Request) -> Any\n name_bytes = name.encode()\n if name_bytes not in request.args:\n if self.default is not None:\n return self.default\n if self.required:\n raise Error(BAD_REQUEST, message=b\"%s is required\" % name_bytes)\n else:\n return None\n\n if len(request.args[name_bytes]) != 1:\n raise Error(BAD_REQUEST, message=b\"Pass exactly one argument for %s\" % name_bytes)\n\n val = request.args[name_bytes][0]\n return self.parse(val)", "def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")", "def __parser__(self):\n return self", "def select_formatter(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from accept header\n mapper_name = self._get_name_from_accept(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def find_parser(optimize=1):\n for p in PARSERS:\n if not p.parsing:\n print('REUSE PARSER')\n return p\n print('NEW PARSER')\n return Parser(optimize=optimize)", "def set_parser(self, output_format):\n self.output_parser = output_parsers.get(output_format, lambda x:x)", "def handle_request(self, request: HttpParser) -> None:\n raise NotImplementedError() # pragma: no cover", "def _get_format(self, request):\n\n # Derive a list of 'formats.Format' instances from the list of formats these views support.\n supported_formats = [formats.find(format) for format in self.supported_formats]\n\n # Determine format by extension...\n if '.' in request.path:\n extension = request.path.split('.')[-1]\n\n try:\n format = formats.find_by_extension(extension)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # Determine format by HTTP Accept header...\n if 'HTTP_ACCEPT' in request.META:\n content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])\n\n # Only consider 'accept' headers with a single format in an attempt to play nice\n # with browsers that ask for formats they really should not want.\n if len(content_types) == 1:\n content_type = content_types[0]\n\n # If the request has no preference as to the format of its response, prefer the\n # first of the view's supported formats.\n if content_type == '*/*':\n return supported_formats[0]\n\n try:\n format = formats.find_by_content_type(content_type)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # If no format is given by either extension or header, default to the format given in\n # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).\n if DEFAULT_FORMAT:\n format = formats.find(DEFAULT_FORMAT)\n\n if format in supported_formats:\n return format\n else:\n return None", "def _get_parserobj(self, option_list):\n if '--version' in self.parselines[0]:\n if 'optparse' == self.parser_type:\n parser = OptionParser(version=\"dummy\")\n else:\n parser = ArgumentParser(\n version='dummy',\n formatter_class=RawDescriptionHelpFormatter)\n else:\n if 'optparse' == self.parser_type:\n parser = OptionParser()\n else:\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter)\n for opt in option_list:\n if opt['short'] and self.parser_type is 'optparse':\n parser.add_option(opt['short'], opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif not opt['short'] and self.parser_type is 'optparse':\n parser.add_option(opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif opt['short'] and self.parser_type is 'argparse':\n parser.add_argument(opt['short'], opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif not opt['short'] and self.parser_type is 'argparse':\n parser.add_argument(opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n else:\n raise InvalidParserTypeError(\"Invalid paresr type.\")\n return parser", "def get_parser(self, tag_name):\n return self.mock_parsers.setdefault(tag_name, MockParser(tag_name))", "def parser(self):\n return self._parser", "def getParser(self):\n return self._parser", "def _get_parser(current_parser=None):\n if not current_parser:\n current_parser = configparser.ConfigParser()\n current_parser.read(PRAW_FILE_PATH)\n return current_parser", "def change_parser(parser):\r\n prev = base.current_executor()\r\n try:\r\n base.use_executor(lambda request, _: prev(request, parser))\r\n yield\r\n finally:\r\n base.use_executor(prev)", "def _load_parser(self, grammar: str, protocol: Protocol) -> None:\n self.parser = parsley.makeGrammar(grammar, {\n 'punctuation': string.punctuation,\n 'ascii_uppercase': string.ascii_uppercase,\n 'ascii_lowercase': string.ascii_lowercase,\n 'itertools': itertools,\n\n 'Art': Art,\n 'ArtField': ArtField,\n 'Field': Field,\n 'RelLoc': RelLoc,\n 'Names': Names,\n\n 'protocol': protocol,\n 'Boolean': Boolean,\n 'Size': Size,\n 'ArgumentExpression': ArgumentExpression,\n 'MethodInvocationExpression': MethodInvocationExpression,\n 'ConstantExpression': ConstantExpression,\n 'FieldAccessExpression': FieldAccessExpression,\n 'ThisExpression': ThisExpression,\n 'IfElseExpression': IfElseExpression,\n })", "def select_texttools_parser():\n\n global selected_parser\n import cvs2svn_rcsparse.texttools\n selected_parser = cvs2svn_rcsparse.texttools.Parser" ]
[ "0.67126334", "0.6521535", "0.6489922", "0.640149", "0.6139885", "0.61379045", "0.613299", "0.61198676", "0.6049748", "0.602667", "0.5946609", "0.58708733", "0.58408815", "0.5837304", "0.58118755", "0.5710313", "0.5667457", "0.5613246", "0.5606975", "0.5493934", "0.5479771", "0.54696", "0.5444578", "0.54171056", "0.53872496", "0.5317217", "0.52991647", "0.5295775", "0.5290234", "0.5282592" ]
0.73894787
0
Returs mapper based on the content type.
def get_mapper_by_content_type(self, content_type): content_type = util.strip_charset(content_type) return self._get_mapper(content_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mapper(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return mapper", "def mapper_for_type(self, type_):\n mapper = self.mappers[type_]\n return mapper", "def get_mapping_type(cls):\n ...", "def _get_mapper(obj):\n its_a_model = isinstance(obj, type)\n mapper = class_mapper if its_a_model else object_mapper\n return mapper(obj)", "def _do_mapping(self):\n pass", "def getMappingType(self):\n \n return self.mapping_type", "def select_parser(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from content type\n mapper_name = self._get_name_from_content_type(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def mapper_id(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return id(mapper)", "def typeMapping(self):\n statemachines = self.package.getStateMachines()\n classes = {}\n for sm in statemachines:\n workflowId = sm.getCleanName()\n for klass in sm.getClasses():\n # We allow to bound a workflow to a <<stub>>\n if klass.isabstract:\n continue\n elif not self.atgenerator._isContentClass(klass) and \\\n not klass.hasStereoType(self.atgenerator.stub_stereotypes):\n continue\n name = klass.getTaggedValue('portal_type') or \\\n klass.getCleanName()\n classes.setdefault(name, []).append(workflowId)\n\n classNames = classes.keys()\n classNames.sort()\n result = []\n for id_ in classNames:\n item = {}\n item['id'] = id_ # portal type\n item['workflowIds'] = classes[id_]\n result.append(item)\n\n # no need to check use_workflow, it's already done by xmiparser.XMIModel.associateClassesToStateMachines,\n # so the sm.getClasses() already returns classes which uses use_workflow tgv.\n # if you uncomment thoses lines, you will have the bound-workflow twice\n #handle the use_workflow tgvs\n #for klass in self.package.getProduct().getClasses(recursive=True):\n # if klass.hasTaggedValue('use_workflow'):\n # result.append(dict(id=klass.getCleanName(),workflowId=klass.getTaggedValue('use_workflow')))\n # remember special case\n remembertypes = []\n self.atgenerator.getRememberTypes(remembertypes, self.package)\n for remembertype in remembertypes:\n existent = False\n for type in result:\n if type['id'] == remembertype['portal_type']:\n existent = True\n if existent:\n continue\n additionaltype = dict()\n additionaltype['id'] = remembertype['portal_type']\n additionaltype['workflowIds'] = [remembertype['workflow']]\n result.append(additionaltype)\n\n # take tgv on state machine itself into account\n for sm in statemachines:\n bindings = sm.getTaggedValue('bindings', '')\n bindings = [b.strip() for b in bindings.split(', ') if b.strip()]\n for binding in bindings:\n item = {}\n item['id'] = binding\n item['workflowIds'] = [sm.getCleanName()]\n result.append(item)\n\n return result", "def route(self, media):\n for mr in media:\n for accepted in self.dumpers:\n if mr in accepted:\n self.content_type = mr # TODO handle \"*\" in media range\n return self.dumpers[accepted]\n return (None, None)", "def get_content_type_configs(self) -> t.Mapping[str, ContentTypeConfig]:", "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def mapper(self):\n if not self._fitted:\n raise ValueError(\"Cannot get mapper if object has not been fitted.\")\n return self._mapper.copy()", "def get_items_by_type(self, item_type, object_type=None):\n # Check to see if the item_type is in the map.\n try:\n dict_or_list = self.model_map[item_type]\n except KeyError:\n return None\n\n # If we're working with objects,\n if item_type == 'object':\n # See if we have this object_type.\n if object_type is not None:\n try:\n object_dict = self.model_map[item_type][object_type]\n except KeyError:\n return None\n \n # Loop over the map and create the return. In each list,\n # the first element is a key into the model_dict, and \n # the user of this method doesn't care about that. Thus,\n # we grab v[1], which is the actual dictionary.\n out = {k: v[1] for k, v in object_dict.items()}\n \n else:\n # Require and object_type for item_type of 'object'\n raise ValueError(\"If item_type is 'object', then \" +\n \"object_type must not be None.\")\n elif item_type == 'clock':\n # Simply return the clock dictionary. The first item in this \n # list is the key into the model map.\n out = dict_or_list[1]\n elif item_type == 'module':\n # Return a dict of dicts keyed by module name.\n out = {k: v[1] for k, v in dict_or_list.items()}\n elif item_type == 'object_unnamed':\n # Return a list which doesn't include the keys into the \n # model_dict.\n out = [i[1] for i in dict_or_list]\n else:\n # Hopefully we never get here, as the try/except at the\n # very beginning of this method will catch most cases.\n raise ValueError(\n 'The given item_type, {}, is not supported.'.format(item_type))\n\n # We can get a 0 length if the given item type existed at one\n # point, but has then been removed. In this case, it exists in\n # the map, but is empty.\n if len(out) == 0:\n return None\n else:\n return out", "def get_serializer(self, content_type, default_serializers=None):\n\n default_serializers = default_serializers or {}\n\n try:\n mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n if mtype in self.serializers:\n return mtype, self.serializers[mtype]\n else:\n return mtype, default_serializers[mtype]\n except (KeyError, TypeError):\n raise exception.InvalidContentType(content_type=content_type)", "def mappings(self) -> pulumi.Output[Optional[Sequence['outputs.TypePropertiesMappingResponse']]]:\n return pulumi.get(self, \"mappings\")", "def of(cls, type_):\n\n for subcls in Mapping.__subclasses__():\n if subcls.type() == type_:\n return subcls()\n\n return None", "def guess_mapping_format_from_content(content):\n regex = re.compile(\"MAPPING.*?FROM\", re.DOTALL | re.IGNORECASE)\n syntax = \"SMS2\" if regex.search(content) else None\n\n return syntax", "def from_content_type(self, content_type, content_encoding=None):\n if content_encoding:\n return Response\n mimetype = to_native_str(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)", "def testContentTypes_JustDefault(self):\n self.mox.ReplayAll()\n\n mapper = service_handlers.RPCMapper(['GET', 'POST'],\n 'my-content-type',\n self.protocol)\n\n self.assertEquals(frozenset(['GET', 'POST']), mapper.http_methods)\n self.assertEquals('my-content-type', mapper.default_content_type)\n self.assertEquals(frozenset(['my-content-type']),\n mapper.content_types)\n\n self.mox.VerifyAll()", "def getMapping(self):\n self._process()\n return self._mapping", "def from_mimetype(self, mimetype):\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)", "def _get_matched_by_type(\n self, connectable: bool\n ) -> MutableMapping[str, IntegrationMatchHistory]:\n return self._matched_connectable if connectable else self._matched", "def _get_mapping_record(self):\n return self.__mapping_record", "def decoder(self, contentType, decoder):\n pass", "def unstructured_mappers(self):\n return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)", "def get_items(self):\n return self._internal_type_mapping", "def yield_parts(self, mime_type):\n yield from self.parts_by_type[mime_type]", "def _get_iomaps_shared_result_file_type(self):\n raise Exception(\"Classes using the IOMapsMixin must override the _get_iomaps_shared_result_file_type method.\")" ]
[ "0.6787792", "0.6768891", "0.62163997", "0.5705611", "0.56396294", "0.55432177", "0.5536067", "0.5350777", "0.53324276", "0.53072", "0.529081", "0.5220667", "0.5189422", "0.5182988", "0.51817083", "0.50861496", "0.50557506", "0.5036861", "0.50153947", "0.49689895", "0.49663913", "0.49193448", "0.49163407", "0.4910162", "0.4903001", "0.48936886", "0.4877736", "0.4865307", "0.48641986", "0.4861451" ]
0.77844703
0
Set the default mapper to be used, when no format is defined. This is the same as calling ``register_mapper`` with ``/`` with the exception of giving ``None`` as parameter.
def set_default_mapper(self, mapper): mapper = mapper or DataMapper() self._datamappers['*/*'] = mapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def set_mapper(obj, mapper):\n setattr(obj, MAPPER, mapper)\n return mapper", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def set_default(self, node: Node) -> None:\n if isinstance(node, str):\n self._default = TextRecord(self.node, node)\n if isinstance(node, ast.AST):\n self._default = ExpressionRecord(node)", "def mapper(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return mapper", "def select_formatter(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from accept header\n mapper_name = self._get_name_from_accept(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def setdefault(self, value: Any) -> None: # type: ignore\n self.default_factory = value \n return", "def _format_default_functions(self):\n self._out_formatter = null_out_formatter\n self._f_default_names = lambda x: [0]", "def mapping(self):\n try:\n mapper_file = pathlib.Path(self.mapper)\n if mapper_file.is_file():\n with open(self.mapper, 'r') as f:\n self.mapper = json.load(f)\n except (OSError, TypeError):\n pass\n if not isinstance(self.mapper, dict):\n raise TypeError(f\"mapper must be dict {self.mapper} ==> \"\n f\"{type(self.mapper)}\")\n if not self._check_for_labels():\n raise(MissingLabelsKey(f\"mapper must contain 'labels' key at \"\n f\"outer most level: {self.mapper}\"))\n return self.mapper", "def __init__(self):\n\n self._datamappers = {\n '*/*': DataMapper()\n }", "def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return", "def set_default_value(self, field):\n if field._default is not None:\n if callable(field._default):\n default = field._default()\n else:\n default = field._default\n self.defaults[field.name] = default", "def set_default(self, name, default, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['default'] = self._get_enforced_type_value(\n opt_info['opt'], default)\n opt_info['location'] = LocationInfo(\n Locations.set_default,\n _get_caller_detail(3), # this function has a decorator to skip\n )", "def redirect_defaults2mountpoint(mountpoint):\n params = get_default_params()\n mountpoint = daisy.Toolkit.redirect2mounts(params,\n mountpoint,\n substitute_only=True)\n return mountpoint", "def __init__(self, default_value, map_ptr=None):\n\n if map_ptr is None:\n self.map = ipset.ipmap_new(default_value)\n else:\n self.map = map_ptr", "def addMapping(mapping):\n defaultMapping_.addMapping(mapping)", "def set_default(self, stdout=b'', stderr=b'', returncode=0,\n pid=1234, poll_count=3, behaviour=None):\n self.default_behaviour = self._resolve_behaviour(\n stdout, stderr, returncode, pid, poll_count, behaviour\n )", "def mapper(self):\n if not self._fitted:\n raise ValueError(\"Cannot get mapper if object has not been fitted.\")\n return self._mapper.copy()", "def _map_setdefault(self, key, default=None):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key in self:\n return self[key]\n self[key] = default\n return default", "def __init__(self, mapper=None, relative_to=None):\n\n if mapper and relative_to:\n raise ValueError(\"Must specify exactly one of 'mapper' or 'relative_to'\")\n\n if relative_to:\n base = os.path.abspath(relative_to)\n if not os.path.isdir(base):\n raise ValueError('Could not find a directory to bundle relative to at %s' % base)\n self.mapper = RelativeToMapper(base)\n else:\n self.mapper = mapper or RelativeToMapper(os.getcwd())\n\n self.filemap = {}", "def mapper_for_type(self, type_):\n mapper = self.mappers[type_]\n return mapper", "def setDefaultColorSpace(self, defaultColorSpace):\n self.PDFreactorConfiguration.in1[\"defaultColorSpace\"] = defaultColorSpace", "def default(self, default):\n\n self._default = default", "def select_parser(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from content type\n mapper_name = self._get_name_from_content_type(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def _set_real_format(self, fmt):\n # try to use the _nomax variant if available\n if not self._max and fmt + '_nomax' in self.formats:\n self._format = self.formats[fmt + '_nomax']\n elif fmt in self.formats:\n self._format = self.formats[fmt]\n else:\n self._format = fmt\n\n self._format_line_count = self._format.count('\\n')", "def build_default_catch_all_map(self):\n self.default_catch_all_map = self.data['catchall']", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def __init__(\r\n self,\r\n mapper: Union[MapperRectangularNoInterp, MapperVoronoiNoInterp],\r\n mat_plot_2d: MatPlot2D = MatPlot2D(),\r\n visuals_2d: Visuals2D = Visuals2D(),\r\n include_2d: Include2D = Include2D(),\r\n ):\r\n super().__init__(\r\n visuals_2d=visuals_2d, include_2d=include_2d, mat_plot_2d=mat_plot_2d\r\n )\r\n\r\n self.mapper = mapper", "def test_create_new_mapper(self):\n GraphMapper()", "def set_default_output_type(cls, outputtype):\n if outputtype in Info.ftypes:\n cls._outputtype = outputtype\n else:\n raise AttributeError(\"Invalid AFNI outputtype: %s\" % outputtype)" ]
[ "0.7185841", "0.5845222", "0.5408889", "0.5322639", "0.5239758", "0.5030067", "0.4899394", "0.48726788", "0.4867522", "0.48561874", "0.48168156", "0.4815694", "0.48097968", "0.48017895", "0.47900453", "0.4782826", "0.47522974", "0.47464633", "0.4713747", "0.46979213", "0.4691604", "0.46793267", "0.4672706", "0.46687618", "0.46683443", "0.46630833", "0.4649001", "0.4632003", "0.46313155", "0.46189228" ]
0.7901851
0
Return the default mapper.
def _get_default_mapper(self): return self._datamappers['*/*']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_default_mapper(self, mapper):\n\n mapper = mapper or DataMapper()\n self._datamappers['*/*'] = mapper", "def mapper(self):\n if not self._fitted:\n raise ValueError(\"Cannot get mapper if object has not been fitted.\")\n return self._mapper.copy()", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def get_mapper(self) -> fsspec.mapping.FSMap:\n return FSStore(self.root_path, fs=self.fs)", "def mapper_for_type(self, type_):\n mapper = self.mappers[type_]\n return mapper", "def mapper(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return mapper", "def _get_mapper(obj):\n its_a_model = isinstance(obj, type)\n mapper = class_mapper if its_a_model else object_mapper\n return mapper(obj)", "def mapping(self):\n try:\n mapper_file = pathlib.Path(self.mapper)\n if mapper_file.is_file():\n with open(self.mapper, 'r') as f:\n self.mapper = json.load(f)\n except (OSError, TypeError):\n pass\n if not isinstance(self.mapper, dict):\n raise TypeError(f\"mapper must be dict {self.mapper} ==> \"\n f\"{type(self.mapper)}\")\n if not self._check_for_labels():\n raise(MissingLabelsKey(f\"mapper must contain 'labels' key at \"\n f\"outer most level: {self.mapper}\"))\n return self.mapper", "def loc_mapper():\r\n return LocMapperSetupSansDjango.loc_store", "def get_task_mapper(parallel_procs=ALL_PROCESSORS):\n\n # Get the number of processes to use\n num_procs = get_num_processors(parallel_procs)\n\n # Set up the task mapper\n if num_procs:\n LOG.info('Attempting parallel processing with %d processes.', num_procs)\n if check_multiprocessing:\n import multiprocessing\n pool = multiprocessing.Pool(processes=num_procs)\n mapper = pool.map\n else:\n LOG.warning('Failed to initialize parallel processing.')\n LOG.warning('Falling back to serial mode.')\n mapper = map\n else:\n LOG.info('Using serial processing.')\n mapper = map\n\n return mapper", "def get_mapper(obj, *, expected=None):\n try:\n mapper = object.__getattribute__(obj, MAPPER)\n except AttributeError:\n mapper = None\n\n if mapper and expected is False:\n msg = \"{!r} is already mapped\".format(obj)\n raise TypeError(msg)\n\n if not mapper and expected is True:\n msg = \"{!r} is not mapped\".format(obj)\n raise TypeError(msg)\n\n return mapper", "def mapping(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping\")", "def mapping(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping\")", "def get_mapper_by_content_type(self, content_type):\n\n content_type = util.strip_charset(content_type)\n return self._get_mapper(content_type)", "def mapper_id(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return id(mapper)", "def base_mappings():\n return {\n 'from_1': {\n 'to_1': {\n 'mol_1': ({}, {}, []),\n 'mol_2': ({}, {}, []),\n },\n },\n }", "def _get_default(self):\n if callable(self.default):\n return self.default()\n else:\n return self.default", "def get_mapper(mixed):\n if isinstance(mixed, orm._MapperEntity):\n mixed = mixed.expr\n elif isinstance(mixed, orm.sa.Column):\n mixed = mixed.table\n elif isinstance(mixed, orm._ColumnEntity):\n mixed = mixed.expr\n\n if isinstance(mixed, orm.sa.orm.Mapper):\n return mixed\n if isinstance(mixed, orm.sa.orm.util.AliasedClass):\n return orm.sa.inspect(mixed).mapper\n if isinstance(mixed, orm.sa.sql.selectable.Alias):\n mixed = mixed.element\n if isinstance(mixed, orm.AliasedInsp):\n return mixed.mapper\n if isinstance(mixed, orm.sa.orm.attributes.InstrumentedAttribute):\n mixed = mixed.class_\n if isinstance(mixed, orm.sa.Table):\n if hasattr(orm.mapperlib, '_all_registries'):\n all_mappers = set()\n for mapper_registry in orm.mapperlib._all_registries():\n all_mappers.update(mapper_registry.mappers)\n else: # SQLAlchemy <1.4\n all_mappers = orm.mapperlib._mapper_registry\n mappers = [\n mapper for mapper in all_mappers\n if mixed in {mapper.local_table}\n ]\n if len(mappers) > 1:\n raise Exception('Still to many mappers %s' % str(mappers))\n if not mappers:\n raise ValueError(\n \"Could not get mapper for table '%s'.\" % mixed.name\n )\n else:\n return mappers[0]\n if not orm.isclass(mixed):\n mixed = type(mixed)\n return orm.sa.inspect(mixed)", "def mapping(self):\n # Allow mappings to be passed as a string if they don't exist yet\n # The setter has already done most of the work in getting the module,\n # but we need to actually resolve it here.\n if isinstance(self._mapping, str):\n self._mapping = getattr(self._mapping_module, self._mapping)\n\n if inspect.isclass(self._mapping):\n # Instantiate the class if not already\n self._mapping = self._mapping()\n\n try:\n mapping = self._mapping.__mapping__\n except AttributeError:\n mapping = self._mapping\n\n if not isinstance(mapping, BaseMapping):\n raise TypeError('Nested() must be called with a '\n 'mapping or a mapped serializer class or a mapped'\n 'serializer instance or a python path to one'\n 'of the above')\n return mapping", "def default(self, default=None):\n\n def default_value_list(sources: List[str] = None):\n \"\"\"\n Infores default method for a list of input knowledge source names.\n\n Parameters\n ----------\n sources: List[str]\n List of Knowledge source name strings being processed.\n\n Returns\n -------\n List[str]\n Infores identifiers mapped to input source strings.\n\n \"\"\"\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources\n\n def default_value_scalar(source=None):\n \"\"\"\n Infores default method for single input knowledge source name.\n\n Parameters\n ----------\n source: str\n Knowledge source name string being processed.\n\n Returns\n -------\n str\n Infores identifier mapped to the input source string.\n\n \"\"\"\n if not default:\n return None\n if not source:\n return default\n else:\n return source\n\n if self.ksf in column_types and column_types[self.ksf] == list:\n return default_value_list\n else:\n # not sure how safe an assumption for non-list column_types, but...\n return default_value_scalar", "def build_default_catch_all_map(self):\n self.default_catch_all_map = self.data['catchall']", "def get_mapping_type(cls):\n ...", "def get_mapping(self):\n if self.role:\n return self.role.get_mapping(self.mapping)\n\n return self.mapping", "def default(self):\n if callable(self._default):\n return self._default()\n\n return self._default", "def set_mapper(obj, mapper):\n setattr(obj, MAPPER, mapper)\n return mapper", "def get_default(self):\r\n if self.has_default:\r\n if callable(self.default):\r\n return self.default()\r\n else:\r\n return self.default", "def _do_mapping(self):\n pass", "def Default():\n return _DEFAULT", "def mapping(self):\n return self._mapping", "def get_ownership_mapper(self, org):\n if config.HAS_RBAC:\n from mist.rbac.mappings import OwnershipMapper\n else:\n from mist.api.dummy.mappings import OwnershipMapper\n return OwnershipMapper(self, org)" ]
[ "0.6871667", "0.6705098", "0.66739136", "0.65977365", "0.6307456", "0.62351674", "0.6097825", "0.5983813", "0.59388465", "0.57689005", "0.5738587", "0.57277805", "0.57277805", "0.5723523", "0.56839365", "0.562199", "0.5607039", "0.5599599", "0.5549087", "0.553436", "0.54751486", "0.5458363", "0.5448836", "0.5446745", "0.54312927", "0.54221433", "0.5415234", "0.54131263", "0.54112196", "0.5408993" ]
0.8958518
0
Return the mapper based on the given name.
def _get_mapper(self, mapper_name): if mapper_name in self._datamappers: # mapper found return self._datamappers[mapper_name] else: # unsupported format return self._unknown_format(mapper_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, name):\n try:\n return self._baseLookup(name)\n except ImportError:\n raise ImportError(\"No module named %r in mapper %r\" % (name, self))", "def get(cls, name):\n cls.initialize()\n if isinstance(name, cls):\n return name\n else:\n return cls.mapping[name]", "def get(map_name):\r\n if isinstance(map_name, Map):\r\n return map_name\r\n\r\n # Get the list of maps. This isn't at module scope to avoid problems of maps\r\n # being defined after this module is imported.\r\n maps = get_maps()\r\n map_class = maps.get(map_name)\r\n if map_class:\r\n return map_class()\r\n raise NoMapError(\"Map doesn't exist: %s\" % map_name)", "def mapper_for_type(self, type_):\n mapper = self.mappers[type_]\n return mapper", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def from_name(self, name):\n return self._name_to_loadout.get(name.lower())", "def loadNamed(fqn, mapper, m=None):\n maker = getModule(fqn)\n return load(maker, mapper, m=m)", "def mapper(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return mapper", "def get_map(self, name, return_type='image'):\n m = self.maps.get(name)\n if m is None:\n raise ValueError(\"No map with name '{}' found.\".format(name))\n return self.masker.inverse_transform(m) if return_type == 'image' else m", "def name(self, name):\n return self[self.name_cache[name]]", "def get_metric(name):\n return metric_name_to_function_mapping[name.lower()]", "def __getattr__(self,name):\r\n w=self.mapping.get(name,None)\r\n if w is not None:\r\n w.set_name(name)\r\n w.bind(self)\r\n return w\r\n else:\r\n raise AttributeError('{} not found in {}'.format(name,self.name))", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def find_by_name(self, name):\n return self.get(name)", "def get_func_by_name(self, name):\n if(name == self.name):\n res = self\n else:\n res = None\n return res", "def get_mapper(self) -> fsspec.mapping.FSMap:\n return FSStore(self.root_path, fs=self.fs)", "def mapping_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping_name\")", "def from_name(self, name):\n return self._name_to_operator.get(name.lower())", "def get_automaton_by_name(self, name: str) -> Automaton:\n for automaton in self._automata:\n if automaton.name == name:\n return automaton\n raise errors.NotFoundError(f\"there exists no automaton named {name!r}\")", "def get_mapper_by_content_type(self, content_type):\n\n content_type = util.strip_charset(content_type)\n return self._get_mapper(content_type)", "def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager", "def transform_name_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"transform_name_mapping\")", "def select_parser(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from content type\n mapper_name = self._get_name_from_content_type(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def lookup_pattern(name):\n\treturn _registered_patterns[name]" ]
[ "0.69885236", "0.6523901", "0.64461076", "0.63601327", "0.6340254", "0.6093008", "0.60604954", "0.60452425", "0.59643584", "0.5960165", "0.59453994", "0.589722", "0.5801919", "0.567288", "0.5612674", "0.5609351", "0.55997974", "0.559257", "0.55712503", "0.5496303", "0.54794675", "0.5440988", "0.54326206", "0.54280066", "0.54118925", "0.53821945", "0.5375975", "0.5375975", "0.5375975", "0.5373446" ]
0.7992836
0
Get name from ContentType header
def _get_name_from_content_type(self, request): content_type = request.META.get('CONTENT_TYPE', None) if content_type: # remove the possible charset-encoding info return util.strip_charset(content_type) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def content_type_header(request: Request) -> str:\n return request.content_type", "def gettype(self, failobj=None):\n missing = []\n value = self.get('content-type', missing)\n if value is missing:\n return failobj\n return re.split(r';\\s*', value.strip())[0].lower()", "def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"", "def content_type(self):\n return self._headers.get(\"content-type\")", "def content_type(self):\n return self._headers['CONTENT-TYPE']", "def getHeader(self, name):\n return self.headers.get(name.lower(), None)", "def get_content_name(self, content_url):\n endpoint = content_url.split('/')[-1]\n return re.match(r'(.+\\.(?:jpg|mp4))', endpoint).group(0)", "def content_type(self) -> str:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_type # type: ignore[return-value]", "def get_content_type_name(index: int) -> str:\n return [\"Post\", \"Story\", \"Video\"][index - 1]", "def content_type(self) -> str:\n return pulumi.get(self, \"content_type\")", "def CONTENT_TYPE(self):", "def content_type(self):\n return self.content_types[0]", "def _GetHeaderNameValue(header):\n i = header.find(':')\n if i > 0:\n return (header[:i].lower(), header[i+1:].strip())\n return None", "def getname(self):\n if 'chtt' in self.data:\n return self.data['chtt']", "def get_header(self, name):\n return self.headers.get(name)", "def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type", "def name(self):\n return self.raw.get(\"name\")", "def header(self, name):\n key = name.upper()\n if key not in _RESPONSE_HEADER_DICT:\n key = name\n return self._headers.get(key)", "def get_filename(self, stream, media_type, parser_context):\n try:\n return parser_context['kwargs']['filename']\n except KeyError:\n pass\n\n try:\n meta = parser_context['request'].META\n disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'])\n return disposition[1]['filename']\n except (AttributeError, KeyError):\n pass", "def GetHeaderName(name):\n name = os.path.splitext(name)[0] + '.h'\n name = name.replace(os.sep, '/')\n return 'ppapi/c/' + name", "def content_type(self):\r\n return self.__content_type", "def content_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"content_type\")", "def _get_content_type(self):\n return '%s; charset=%s' % (self.content_type, self.charset)", "def get(self, name, failobj=None):\n name = name.lower()\n for k, v in self._headers:\n if k.lower() == name:\n return v\n return failobj", "def getContentType(self):\n return self.content_type", "def content_type(self, _format=None):\r\n _format = _format or self.format\r\n return \"application/%s\" % (_format)", "def _select_header_content_type(content_types):\n if not content_types:\n return 'application/json'\n\n content_types = [x.lower() for x in content_types]\n\n if 'application/json' in content_types or '*/*' in content_types:\n return 'application/json'\n\n return content_types[0]", "def parse_content_type(value: str) -> str:\n if not value:\n return ''\n\n return value.split(';')[0].strip()", "def GetContentType(filename):\r\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'", "def CONTENT_TYPE(self):\n return self.content_type" ]
[ "0.6892327", "0.6575444", "0.65093666", "0.6481238", "0.6447588", "0.63857204", "0.6333128", "0.63329685", "0.6310448", "0.6281364", "0.6262302", "0.62129503", "0.6198477", "0.61856425", "0.608376", "0.60403216", "0.5957448", "0.5953572", "0.59456986", "0.5944716", "0.5935226", "0.5929152", "0.59286165", "0.5927454", "0.5893319", "0.58841765", "0.5880093", "0.58638644", "0.5860166", "0.58429456" ]
0.78704923
0
Determine short name for the mapper based on the URL. Short name can be either in query string (e.g. ?format=json) or as an extension to the URL (e.g. myresource.json).
def _get_name_from_url(self, request): format = request.GET.get('format', None) if not format: match = self._format_query_pattern.match(request.path) if match and match.group('format'): format = match.group('format') return format
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url_name(request):\n url_name = False\n if request.resolver_match:\n url_name = request.resolver_match.url_name\n return {\"url_name\": url_name}", "def shorten_url():\n return rh.shorten_url(request)", "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi", "def short_name(self):\n return self.get(\"short_name\", decode=True)", "def get_short_url_base():", "def get_by_short_url(cls, short_url):\n url_mapping = Url.load_url_mapping()\n return url_mapping.get(short_url)", "def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]", "def shortname(self):\n return self.get(\"shortName\")", "def get_humanish_name(url):\n name = re.sub(r'/$', '', url)\n name = re.sub(r':*/*\\.git$', '', name)\n name = re.sub(r'.*[/:]', '', name)\n return name", "def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url", "def set_short_url_base(url):", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"", "def url_shortner(self):", "def mapping_name(self) -> Optional[str]:\n return self.get(\"/TM\")", "def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)", "def get_short_code():\n return rh.get_short_code(request)", "def edit_url(\n modulename: str, is_package: bool, mapping: Mapping[str, str]\n) -> str | None:\n for m, prefix in mapping.items():\n if m == modulename or modulename.startswith(f\"{m}.\"):\n filename = modulename[len(m) + 1 :].replace(\".\", \"/\")\n if is_package:\n filename = f\"{filename}/__init__.py\".lstrip(\"/\")\n else:\n filename += \".py\"\n return f\"{prefix}{filename}\"\n return None", "def url_name(cls):\n return f'{cls.app_label}_{cls.name}'", "def get_short_name(self):\n\n return self.name", "def get_short_url(url) -> dict:\n service = Shorteners.TINYURL\n service_text = service\n service_url = 'http://tinyurl.com/'\n short_url = ''\n try:\n short_url = format(Shortener(service).short(url))\n except (ReadTimeout, ConnectionError, NewConnectionError, ShorteningErrorException) as e:\n logger('getter', repr(e), error=True)\n service_text = Translator('en').get(_.serviceNotAvailable)\n\n return {\n 'url': short_url,\n 'service': service,\n 'service_url': service_url,\n 'service_text': service_text,\n }", "def generate_short_url():\n\n def generate():\n x = \"\".join(random.choices(SHORT_URL_CHARACTERS, k=SHORT_URL_LENGTH))\n return x\n\n short_url = generate()\n while URLMapping.objects.filter(short_url=short_url).exists():\n short_url = generate()\n return short_url", "def shortener(url_hash: str) -> TResponse:\n shortened_id = decode(url_hash)\n tags = db.session.query(Shortener).get(shortened_id)\n if tags is None:\n return jsonify(error='/@%s not found' % str(url_hash)), 404\n\n tags = dict(tags.__dict__)\n tags.pop('_sa_instance_state', None)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(hash=url_hash, short_url='https://fanlens.io/@%s' % url_hash, tags=tags)\n else:\n user_agent = request.headers.get('User-Agent', '').lower()\n if user_agent.startswith('twitterbot') or user_agent.startswith('facebo') or user_agent.startswith('LinkedIn'):\n return render_template('shortener.html', **tags)\n return redirect(tags['url'], code=302)", "def decode(self, shortUrl: str) -> str:\n short = shortUrl.split('/')[-1]\n if short in short2long:\n return short2long[short]\n else:\n return None", "def _UrlBaseName(url):\n return url.rstrip('/').rpartition('/')[-1]", "def get_shorten_url(url):\n try:\n shorten = pyshorteners.Shortener()\n shortenurl = shorten.tinyurl.short(url)\n return shortenurl\n except Exception as e:\n return e", "def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def _get_object_name(self, object_url):\n infos = str(object_url).split('/')\n return infos[len(infos) - 1]", "def get_short_name(self):\r\n return self.name" ]
[ "0.6074111", "0.6005944", "0.5902819", "0.58046716", "0.5792287", "0.5767634", "0.57404774", "0.57221323", "0.57147455", "0.5575753", "0.5546919", "0.5540886", "0.553158", "0.55257696", "0.5518946", "0.5516314", "0.54784685", "0.5469388", "0.544259", "0.54098177", "0.5381609", "0.53544563", "0.53430635", "0.5339789", "0.53270274", "0.5325756", "0.5311373", "0.5309058", "0.526097", "0.52575505" ]
0.66325134
0
Deal with the situation when we don't support the requested format.
def _unknown_format(self, format): raise errors.NotAcceptable('unknown data format: ' + format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_format(self):\n raise NotImplementedError()", "def _determine_format(self, request):\n return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def get_format(self):\n pass", "def asformat(self, format):", "def _get_format(self, request):\n\n # Derive a list of 'formats.Format' instances from the list of formats these views support.\n supported_formats = [formats.find(format) for format in self.supported_formats]\n\n # Determine format by extension...\n if '.' in request.path:\n extension = request.path.split('.')[-1]\n\n try:\n format = formats.find_by_extension(extension)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # Determine format by HTTP Accept header...\n if 'HTTP_ACCEPT' in request.META:\n content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])\n\n # Only consider 'accept' headers with a single format in an attempt to play nice\n # with browsers that ask for formats they really should not want.\n if len(content_types) == 1:\n content_type = content_types[0]\n\n # If the request has no preference as to the format of its response, prefer the\n # first of the view's supported formats.\n if content_type == '*/*':\n return supported_formats[0]\n\n try:\n format = formats.find_by_content_type(content_type)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # If no format is given by either extension or header, default to the format given in\n # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).\n if DEFAULT_FORMAT:\n format = formats.find(DEFAULT_FORMAT)\n\n if format in supported_formats:\n return format\n else:\n return None", "def get_format_type(self):\n raise Unimplemented()", "def test_validation_get_valid_formats(self):\n self.assertIsInstance(api.validation.fetch_formats(), dict)", "def _validate_format(format_type):\n if format_type not in GeopandasWriter.formats:\n raise ValueError('Unsupported file format.')\n\n return True", "def _check_tt_data_format(ttdata: dict, name: str) -> None:\n formatVersion = ttdata.get(\"formatVersion\", None)\n if not isinstance(formatVersion, str):\n raise TypeError(\n f\"Illegal type '{type(formatVersion).__name__}' instead of 'str' for \"\n f\"formatVersion for instructions in {name}.\"\n )\n if formatVersion != \"1\":\n raise NotImplementedError(\n f\"Unknown formatVersion {formatVersion} for instructions in {name}.\"\n )", "def determine_format(request, serializer, default_format='application/json'):\r\n # First, check if they forced the format.\r\n if request.GET.get('format'):\r\n if request.GET['format'] in serializer.formats:\r\n return serializer.get_mime_for_format(request.GET['format'])\r\n\r\n # Try to fallback on the Accepts header.\r\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\r\n formats = list(serializer.supported_formats) or []\r\n # Reverse the list, because mimeparse is weird like that. See also\r\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\r\n # more information.\r\n formats.reverse()\r\n best_format = mimeparse.best_match(\r\n formats, request.META['HTTP_ACCEPT'])\r\n\r\n if best_format:\r\n return best_format\r\n\r\n # No valid 'Accept' header/formats. Sane default.\r\n return default_format", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def _file_format_adapter(self):\n raise NotImplementedError", "def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")", "def determine_format(request, serializer, default_format='application/json'):\n # First, check if they forced the format.\n if request.GET.get('format'):\n if request.GET['format'] in serializer.formats:\n return serializer.get_mime_for_format(request.GET['format'])\n \n # If callback parameter is present, use JSONP.\n if request.GET.has_key('callback'):\n return serializer.get_mime_for_format('jsonp')\n \n # Try to fallback on the Accepts header.\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\n formats = list(serializer.supported_formats) or []\n # Reverse the list, because mimeparse is weird like that. See also\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\n # more information.\n formats.reverse()\n best_format = mimeparse.best_match(formats, request.META['HTTP_ACCEPT'])\n \n if best_format:\n return best_format\n \n # No valid 'Accept' header/formats. Sane default.\n return default_format", "def unrecognised_format(link):\n print('Message has been identified as a YouTube link, but the format is not recognised.')\n print('Message was {}, support for this format should be added soon.'.format(link))\n pass", "def test_invalid_format(api):\n\twith pytest.raises(top_stories.InvalidFormatType):\n\t\tapi.get_stories(\"home\", \"xml\")", "def initFormat(self):\n pass", "def _handle_string(\n *, artifacts: types.ColumnArtifacts\n) -> typing.Union[String, Binary, Date, DateTime]:\n if artifacts.open_api.format in {None, \"byte\", \"password\"}:\n if artifacts.open_api.max_length is None:\n return String\n return String(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"binary\":\n if artifacts.open_api.max_length is None:\n return Binary\n return Binary(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"date\":\n return Date\n if artifacts.open_api.format == \"date-time\":\n return DateTime\n raise exceptions.FeatureNotImplementedError(\n f\"{artifacts.open_api.format} format for string is not supported.\"\n )", "def format(self, data):\n datatype = accept(self.supportedTypes)\n response.headers['Content-Type'] = datatype\n \n if datatype in ('text/json', 'text/x-json', 'application/json'):\n # Serialise to json\n return self.tojson(data)\n \n # Default... return in json anyway\n return self.tojson(data)", "def format(self, data):", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def format(self, *args, **kwargs):\n raise NotImplementedError()", "def format_to_extension(self, format):", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def AddFormat(self, format):\n self._legacy = False\n if format:\n self._format = format", "def _create_unsupported_media_type(self):\n body = self.server.create_error(\n 415,\n 'Not Acceptable', 'Invalid Accept header.',\n bad=True)\n self._write_response(415, body, content_type=CONTENT_TYPE_ERROR)", "def get_supported_formats(pandas = False):\n global _pma_debug\n url = \"https://host.pathomation.com/etc/supported_formats.php\"\n \n if _pma_debug == True:\n print(url)\n \n headers = {'Accept': 'application/json'}\n r = requests.get(url, headers=headers)\n json = r.json()\n \n if (pandas == True):\n import pandas as pd\n return pd.DataFrame.from_records(json, index=[\"vendor\"])\n else:\n return json", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def check_validity_input_formats(input_formats):\n from invenio.search_engine import get_available_output_formats\n valid_formats = get_available_output_formats()\n\n # let's to extract the values of the available formats\n format_values = []\n for aformat in valid_formats:\n format_values.append(aformat['value'])\n\n invalid_format = ''\n for aformat in input_formats:\n if aformat.lower() not in format_values:\n invalid_format = aformat.lower()\n break\n return invalid_format" ]
[ "0.7758894", "0.6573002", "0.6391546", "0.6325112", "0.62708557", "0.6243621", "0.6233522", "0.6175671", "0.6166886", "0.61477107", "0.61403495", "0.60342926", "0.60149646", "0.6010532", "0.5987411", "0.5959781", "0.5955787", "0.5895473", "0.58931667", "0.58854645", "0.58829576", "0.58827907", "0.5865844", "0.58428454", "0.5841047", "0.5837896", "0.57793903", "0.5762316", "0.57610196", "0.57380193" ]
0.76091594
1
Check that the mapper has valid signature.
def _check_mapper(self, mapper): if not hasattr(mapper, 'parse') or not callable(mapper.parse): raise ValueError('mapper must implement parse()') if not hasattr(mapper, 'format') or not callable(mapper.format): raise ValueError('mapper must implement format()')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_signature(self, inputs, signature):\n pass", "def verify_signature(self, inputs, signature):\n pass", "def signature_check(dummy, *args, **kwargs):\n try:\n dummy(*args, **kwargs)\n return True\n\n except TypeError:\n return False", "def _check_type(self):\n assert self.mapping == self.mapping_type, \\\n \"Expected header mapping='{}' but got mapping='{}' in '{}'\".format(\n self.mapping_type, self.mapping.upper(), self.filename)", "def validate_mapping(self):\n log.verbose(\"Validating\", repr(self.basename), \"with parameters\", repr(self.parkey))\n self.validate()", "def _validate(mapping):\n missing_fields = _MANDATORY_FIELDS - set(mapping)\n if missing_fields:\n raise ValueError(\n \"Missing mandatory fields: {0}\".format(\n \", \".join(repr(field) for field in sorted(missing_fields))\n )\n )", "def verify_request_signature(req_info: StatusResponse) -> None:\n if not req_info.signature_check(req_info.xmlstr):\n raise ValueError(_(\"Message signature verification failure\"))", "def check_signature(func, args_list):\n refsig = MethodSignature(func.__name__, args_list)\n actualsig = MethodSignature.from_callable(func)\n if refsig != actualsig:\n raise MethodSignatureMismatch(\n \"Expected {0}, not {1}\".format(refsig, actualsig)\n )\n return True", "def _validate_signature(self):\n signing_string = '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(\n 'Message',\n self._message_encoded,\n 'MessageId',\n self._message_id,\n 'Timestamp',\n self._timestamp,\n 'TopicArn',\n self._topic_arn,\n 'Type',\n self._type)\n\n crt = crypto.load_certificate(crypto.FILETYPE_PEM, self._pem)\n signature = base64.b64decode(self._signature)\n\n try:\n crypto.verify(\n crt,\n signature,\n signing_string.encode('utf-8'),\n 'sha1')\n except:\n self.error = 'Invalid signature.'\n raise ValueError('Invalid signature.')\n\n return True", "def validate_signature(self, params):\n if \"signature\" not in params:\n raise SignatureValidationError(\"Parameters did not include a signature\")\n\n signature = params[\"signature\"]\n\n keys = params.keys()\n keys.sort()\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") \\\n for key in keys if key != \"signature\")\n computed_hash = base64.b64encode(hmac.new(self.app_secret, query_string, hashlib.sha256)\n .digest())\n\n if computed_hash != signature:\n raise SignatureValidationError(\"Invalid signature: \" + query_string)\n\n issued_at = iso8601.parse_date(params[\"issuedAt\"])\n expires_at = issued_at + timedelta(minutes=SIGNATURE_WINDOW_SIZE)\n if datetime.utcnow() > expires_at.replace(tzinfo=None):\n raise SignatureValidationError(\"Expired signature\")", "def validate(self):\n super(ReferenceMapping, self).validate()\n self.check_observatory()\n self.check_instrument()\n self.check_filekind()\n self.check_schema_uri()\n if \"reference_to_dataset\" in self.header:\n parkeys = self.get_required_parkeys()\n for _reference, dataset in self.reference_to_dataset.items():\n assert dataset.upper() in parkeys, \\\n \"reference_to_dataset dataset keyword not in parkey keywords.\"\n with log.augment_exception(\"Invalid mapping:\", self.instrument, self.filekind):\n self.selector.validate_selector(self.tpn_valid_values)", "def warn_on_bad_signature(self) -> bool:\n return self._signature == 'warn'", "def validate_signature(self):\n return self.signature == 0xAA55", "def test_block_bad_signature(self):\n pass", "def has_valid_signature(method, headers_dict, body_dict, access_key, secret_key):\r\n _, expected_signature, _ = generate_signed_message(\r\n method, headers_dict, body_dict, access_key, secret_key\r\n )\r\n\r\n authorization = headers_dict[\"Authorization\"]\r\n auth_token, post_signature = authorization.split(\":\")\r\n _, post_access_key = auth_token.split()\r\n\r\n if post_access_key != access_key:\r\n log.error(\"Posted access key does not match ours\")\r\n log.debug(\"Their access: %s; Our access: %s\", post_access_key, access_key)\r\n return False\r\n\r\n if post_signature != expected_signature:\r\n log.error(\"Posted signature does not match expected\")\r\n log.debug(\"Their sig: %s; Expected: %s\", post_signature, expected_signature)\r\n return False\r\n\r\n return True", "def signature_check(self, fn):\n # fetch signature to analyze arguments\n sig = signature(fn)\n required_arg_is_present = False\n for key, param in sig.parameters.items():\n if key == self.required_function_arg:\n required_arg_is_present = True\n continue\n if param.kind == param.VAR_KEYWORD: # corresponds to \"fn(**kwargs)\"\n self.accept_kwargs = True\n elif param.kind == param.VAR_POSITIONAL: # corresponds to \"fn(*args)\"\n raise ValueError(\"Cannot connect a signal using the *%s syntax\" % key)\n elif (\n param.default == param.empty\n ): # \"fn(foo)\" : kind = POSITIONAL_ONLY or POSITIONAL_OR_KEYWORD\n self.required_arguments_names.add(key)\n if param.annotation != param.empty and callable(param.annotation):\n self.argument_types[key] = param.annotation\n self.accepted_argument_names.add(key)\n else: # \"fn(foo=bar)\" : kind = POSITIONAL_OR_KEYWORD or KEYWORD_ONLY\n self.optional_arguments_names.add(key)\n self.accepted_argument_names.add(key)\n if param.annotation != param.empty and callable(param.annotation):\n self.argument_types[key] = param.annotation\n if self.required_function_arg and not required_arg_is_present:\n msg = '%s(%s) must takes \"%s\" as first argument' % (\n self.__class__.__name__,\n self.path,\n self.required_function_arg,\n )\n raise ValueError(msg)", "def _verify_matching_signatures(implementation, dispatcher):\n implementation_spec = getargspec(implementation)\n dispatcher_spec = getargspec(dispatcher)\n\n if (implementation_spec.args != dispatcher_spec.args or\n implementation_spec.varargs != dispatcher_spec.varargs or\n implementation_spec.keywords != dispatcher_spec.keywords or\n (bool(implementation_spec.defaults) !=\n bool(dispatcher_spec.defaults)) or\n (implementation_spec.defaults is not None and\n len(implementation_spec.defaults) !=\n len(dispatcher_spec.defaults))):\n raise RuntimeError('implementation and dispatcher for %s have '\n 'different function signatures' % implementation)", "def test_signature_validation(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertTrue(app.utils.validate_signed_data(\n self._body,\n signature,\n settings.PUBLIC_KEY\n ))", "def _check_params(self):\n pass", "def CheckSignatures(EfiPath, MapList, SigList):\r\n\r\n for Entry in MapList:\r\n # Check for missing signatures\r\n assert Entry[0] in SigList, Entry[0] + \": missing signature\"\r\n # Make sure the signature fits in 16 bits\r\n assert SigList[Entry[0]] < 0x10000, Entry[0] + \": invalid signature\"\r\n\r\n with file(EfiPath, 'rb') as f:\r\n for Entry in MapList:\r\n f.seek(Entry[1] + 4)\r\n Data = struct.unpack('I', f.read(4))[0]\r\n # The 32 bit data should either be 0 or have the call signature marker\r\n assert Data == 0 or Data & 0xFFFF0000 == EBC_CALL_SIGNATURE, \"Unexpected data at address 0x%x\" % Entry[1]\r\n\r\n return True;", "def signature_check(self, fn):\n if not isinstance(fn, type) or not issubclass(fn, forms.BaseForm):\n raise ValueError(\"validate_form only apply to Django Forms\")\n self.required_arguments_names = set()\n self.optional_arguments_names = {\"data\"}\n self.accepted_argument_names = {\"data\"}", "def _check_oauth_signature(self, params, client_signature):\r\n client_secret = unicode(self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET))\r\n\r\n port = self.server.server_address[1]\r\n lti_base = self.DEFAULT_LTI_ADDRESS.format(port=port)\r\n lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)\r\n url = lti_base + lti_endpoint\r\n\r\n request = mock.Mock()\r\n request.params = [(unicode(k), unicode(v)) for k, v in params.items()]\r\n request.uri = unicode(url)\r\n request.http_method = u'POST'\r\n request.signature = unicode(client_signature)\r\n return signature.verify_hmac_sha1(request, client_secret)", "def validate_output(self):\n if self.dimension == 2:\n required = SEGMENT_GEO_SIG | self.output_signature\n for rays in [\n self.active_rays,\n self.finished_rays,\n self.stopped_rays,\n self.dead_rays\n ]:\n if bool(rays):\n sig = set(rays.keys())\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed output signature check. System \" \n f\"signature is {sig}, but needed {required}.\"\n )", "def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True", "def checkMap(self):\n return True", "def verify_signature(self, key, data):\n verify_signature(self, key, data)", "def _is_signature_valid(post_params):\r\n\r\n # Calculate the fields signature\r\n fields_sig = processor_hash(post_params.get('orderPage_signedFields'))\r\n\r\n # Retrieve the list of signed fields\r\n signed_fields = post_params.get('orderPage_signedFields').split(',')\r\n\r\n # Calculate the public signature\r\n hash_val = \",\".join([\r\n \"{0}={1}\".format(key, post_params[key])\r\n for key in signed_fields\r\n ]) + \",signedFieldsPublicSignature={0}\".format(fields_sig)\r\n\r\n public_sig = processor_hash(hash_val)\r\n\r\n return public_sig == post_params.get('orderPage_signaturePublic')", "def _check_transform_key(key: Hashable) -> None:\n _test_hashable = hash(key) # The only 'real' way to make sure is hashable\n # if not isinstance(key, Hashable):\n # raise TypeError((type(key), \"transformation lookup key is not hashable\"))", "def test_signature(self):\n with open(\"{}/{}\".format(self.APP_PATH, self.TARGET_PY_FILE),\n 'r', encoding=\"utf-8\", errors='ignore') as f:\n read_data = f.read()\n # Check [def predict()] section\n with self.subTest(name=\"[def handle()] in main.py\"):\n self.assertIsNotNone(\n re.search(r'def\\s+handle\\(\\w+\\)', read_data),\n msg=\"[def handle()] signature is missing or incorrect\")", "def check_params(self):\n raise NotImplementedError" ]
[ "0.6831698", "0.6831698", "0.6462788", "0.6245771", "0.62307477", "0.6047557", "0.601151", "0.5921455", "0.59146875", "0.5847448", "0.58155686", "0.57982296", "0.577483", "0.5725613", "0.57252246", "0.5719018", "0.5713518", "0.57050633", "0.56824374", "0.5622677", "0.561941", "0.55681926", "0.5557792", "0.5556245", "0.5548277", "0.55264026", "0.5509315", "0.5508706", "0.5504088", "0.54986906" ]
0.73222697
0
Return an airport code input after validating it
def airportCodeInput(self, prompt): while True: code = input(prompt).upper() if code not in self.travel_db.airports: print("Invalid airport code") else: return code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateAirport(self, code):\n print(code)\n if code in self.travel_db.airports:\n return True\n else:\n return False", "def iata(code):\r\n if len(code) == 3:\r\n return code.upper()\r\n else:\r\n raise argparse.ArgumentTypeError(\"%s is not valid IATA code\" % code)", "def validate_pin_input(value):\n try:\n int(value)\n return f\"D{value}\"\n except ValueError:\n return value.upper()", "def findAirport(state):\n if state == \"NSW\":\n airport = \"Sydney Airport\"\n elif state == \"VIC\":\n airport = \"Melbourne Airport\"\n elif state == \"QLD\":\n airport = \"Brisbane Airport\"\n elif state == \"TAS\":\n airport = \"Hobart Airport\"\n elif state == \"WA\":\n airport = \"Perth Airport\"\n elif state == \"SA\":\n airport = \"Adelaide Airport\"\n elif state == \"NT\":\n airport = \"Darwin Airport\"\n return airport", "def country(alpha_2_code: str) -> None:", "def request_two_factor_code(self):\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code", "def _two_factor_code(self):\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code", "def getAircraft(self, code):\n \t\n return self.aircraftDict[code.upper()]", "def validate(code):\n if not code.isdigit():\n raise IllegalCharacterError('[0-9]{%d}' % UPCA.digits)\n\n if len(code) != UPCA.digits:\n raise ValueError('Bar code %s requires %d digits' % (code, UPCA.digits))\n\n checksum = UPCA.calculate_checksum(code)\n if checksum != int(code[-1]):\n raise ValueError('Checksum character mismatch %s != %s' % (checksum, code[-1]))", "def currencyInput(self, prompt):\n while True:\n code = input(prompt).upper()\n if code not in self.travel_db.currencies:\n print(\"Invalid currency code\")\n else:\n return code", "def validate_state_code(cls, state):\n # \n if not isinstance(state, basestring):\n raise TypeError(\"State should be a string\")\n if not len(state) == 2:\n raise ValueError(\"State should be a 2-letter state code.\")\n if not state.isalpha():\n raise ValueError(\"State must be alphabetic.\")\n return state", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")", "def check_born_place(input_string: str) -> tuple:\n c = input_string[0]\n if c == 'A':\n return 'Taipei City', 10\n elif c == 'B':\n return 'Taichung City', 11\n elif c == 'C':\n return 'Keelung City', 12\n elif c == 'D':\n return 'Tainan City', 13\n elif c == 'E':\n return 'Kaohsiung City', 14\n elif c == 'F':\n return 'New Taipei City', 15\n elif c == 'G':\n return 'Yilan County', 16\n elif c == 'H':\n return 'Taoyuan City', 17\n elif c == 'I':\n return 'Chiayi City', 34\n elif c == 'J':\n return 'Hsinchu County', 18\n elif c == 'K':\n return 'Miaoli County', 19\n elif c == 'L':\n return 'Taichung County', 20\n elif c == 'M':\n return 'Nantou County', 21\n elif c == 'N':\n return 'Changhua County', 22\n elif c == 'O':\n return 'Hsinchu City', 35\n elif c == 'P':\n return 'Yunlin County', 23\n elif c == 'Q':\n return 'Chiayi County', 24\n elif c == 'R':\n return 'Tainan County', 25\n elif c == 'S':\n return 'Kaohsiung County', 26\n elif c == 'T':\n return 'Pingtung County', 27\n elif c == 'U':\n return 'Hualien County', 28\n elif c == 'V':\n return 'Taitung County', 29\n elif c == 'W':\n return 'Kinmen County', 32\n elif c == 'X':\n return 'Penghu County', 30\n elif c == 'Y':\n return 'Yangmingshan Management Bureau', 31\n elif c == 'Z':\n return 'Lienchiang County', 33\n else:\n # Should not happen\n return None, None # The return value is a tuple containing two values", "def main() -> None:\n\n airports = {}\n some_info = {'item1': 1,\n 'item2': 2,\n }\n\n # adding items\n airports['YYZ'] = \"Toronto Pearson\"\n airports['YOW'] = \"Ottawa Canada\"\n airports['DUB'] = \"Dublin Ireland\"\n airports['LHR'] = \"London Heathrow\"\n\n # input & process\n print(\"All the airports:\")\n for key, value in airports.items():\n print(f\"The airport code is {key} for {value}.\")\n print(\"\")\n\n airport_name = input(\"Type in an airport code: \")\n if airport_name in airports:\n print(f\"The name of the airport you chose is {airports[airport_name]}.\")\n else:\n print(\"That airport is not in the airport's dictionary.\")\n\n print(\"\\nDone.\")", "def validate_pnumac(pnumac):\n if not re.match(pnumac_pattern, pnumac):\n raise ValidationError(u'%s is not a valid area code'%pnumac)", "def find_airport_code_by_city(city):\n airports = get_airports()\n\n if city == 'London':\n return 'LHR'\n\n for airport_code in airports:\n if airports[airport_code].lower() == city.lower():\n return airport_code\n return None", "def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)", "def extract_bus_route(code):\n try:\n if int(code[-4:]): #testing if pattern ends in 4 digits, error here results in \"\" being returned\n return code[:-4].lstrip('0') #eliminates leading 0s (for routes containing letters eg 046A) and the trailing 4 digit mystery code\n except:\n return \"\" #error handling picked in bus_routes() function, this will catch null values and journey ids in the incorrect format", "def prompt_number(self):\r\n self.area_code = int(input(\"Area Code: \"))\r\n self.prefix = int(input(\"Prefix: \"))\r\n self.suffix = int(input(\"Suffix: \"))", "def get_year():\n try:\n year = input(\"Enter Year: \")\n year = int(year)\n if year > 2021 or year < 2000:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()\n else:\n os.system('cls')\n return year\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()", "def checkBarcode(barcode):\r\n barcode = barcode.strip()\r\n if validateFormat(barcode) is False:\r\n return 'barcode not valid'\r\n else:\r\n barcode = barcode.replace('-','')\r\n if len(barcode) == 12:\r\n fullbarcode = barcode + str(findlastdigit(barcode))\r\n return fullbarcode\r\n elif len(barcode) == 13:\r\n if findlastdigit(barcode) == int(barcode[-1]):\r\n return 'Valid'\r\n else:\r\n return 'Invalid'", "def airport_info(airport_code):\n\n r = requests.get(\"{}AirportBoards\".format(FA_ENDPOINT), auth=(USERNAME,FA_KEY), params={\n \"airport_code\":airport_code,\n \"type\":\"departures\",\n \"howMany\": 100\n })\n\n return r", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def get_address():\r\n address = input(\"What is the customer's address?: \")\r\n\r\n return address", "def _validate_code(self, key, code):\n \n if code is None:\n code = self.name\n \n if not isinstance(code, (str, unicode)):\n raise TypeError(\"Sequence.code should be an instance of str or \"\n \"unicode, not %s\" % type(code))\n \n code = Project._condition_code(code)\n \n return code", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card" ]
[ "0.7340205", "0.6232221", "0.6139465", "0.6118442", "0.6065149", "0.60150987", "0.5987312", "0.58300376", "0.57871574", "0.57339126", "0.5719007", "0.5665261", "0.5665261", "0.5665261", "0.5651849", "0.56237924", "0.5594974", "0.5574781", "0.55745834", "0.556082", "0.55538803", "0.55270445", "0.5524375", "0.5497616", "0.5487198", "0.54859924", "0.54674476", "0.54432213", "0.54234654", "0.5415347" ]
0.81940216
0
Return a country name input after validating it
def countryInput(self, prompt): while True: name = input(prompt) if name not in self.travel_db.countries: print("Invalid country name. Please make sure name is capitalized.") else: return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def country() -> str:", "def valid_country(value: Any) -> str:\n value = cv.string(value)\n all_supported_countries = holidays.list_supported_countries()\n\n try:\n raw_value = value.encode(\"utf-8\")\n except UnicodeError as err:\n raise vol.Invalid(\n \"The country name or the abbreviation must be a valid UTF-8 string.\"\n ) from err\n if not raw_value:\n raise vol.Invalid(\"Country name or the abbreviation must not be empty.\")\n if value not in all_supported_countries:\n raise vol.Invalid(\"Country is not supported.\")\n return value", "def city_country(city_name, country_name):\n city_country_combo = city_name + ', ' + country_name\n return city_country_combo.title()", "def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')", "def country(alpha_2_code: str) -> None:", "def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def city_country(city_name, country_name):\n combi = f\"{city_name.title()}, {country_name.title()}\"\n return combi.title()", "def city_country(city_name, country):\n formatted_string = f\"{city_name.title()}, {country.title()}\"\n return formatted_string", "def test_city_country(self):\n formatted_name = city_country('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)", "def country_identifier(name):\n if name.lower() in _country_dict.keys():\n return _country_dict[name.lower()]\n else:\n return name", "def get_country(self, field_name='COUNTRY'):\n default = self.get_default(field_name)\n if default != '' and default != None:\n return '%s' % str(default).split(',')[-1].strip()\n return ''", "def test_country_name_not_in_countries(self):\n\t\tcountry_code = get_country_code('Venezuela, RB')\n\t\tself.assertEqual(country_code, 've')", "def get_country_name(ip_addr):\n global geoip_db_reader\n try:\n name = geoip_db_reader.country(ip_addr).country.name\n return name\n except geoip2.errors.AddressNotFoundError:\n return None", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def city_country(city, country):\n place = f\"{city}, {country}\"\n return place.title()", "def convert_country(country):\n if (country and 'China' in country) or \\\n country == 'Chin' or country == 'CHINA':\n country = 'China'\n elif country and 'Brazil' in country or \\\n country == 'Brasil' or \\\n country == 'ITA - Instituto Tecnologico de Aeronautica (':\n country = 'Brazil'\n elif country and 'Argentina' in country:\n country = 'Argentina'\n elif country == 'Czechia':\n country = 'Czech Republic'\n elif 'Norwegian' in country:\n country = 'Norway'\n elif country and 'United Kingdom' in country:\n country = 'United Kingdom'\n elif country and 'Hong Kong' in country:\n country = 'Hong Kong'\n elif country == 'Cameroun':\n country = 'Cameroon'\n elif (country and 'Chile' in country) or country == 'CHILE':\n country = 'Chile'\n elif (country and 'United States of America' in \\\n country) or country == 'United States' or country \\\n == 'USA' or 'Florida' in country or \\\n 'California' in country or\\\n country == 'National Reference Centre for' or \\\n country == 'United State of America' or \\\n country == 'U.S.A.' or \\\n country == 'Virginia':\n country = 'United States of America'\n elif country=='Republic of Panamá' or country=='Panamá' or 'Panama' in country:\n country = 'Panama'\n elif 'Canada' in country:\n country = 'Canada'\n elif 'Colombia' in country:\n country = 'Colombia'\n elif 'Spain' in country or country=='España':\n country = 'Spain'\n elif 'Iran' in country:\n country = 'Iran'\n elif 'Saudi Arabia' in country:\n country = 'Saudi Arabia'\n elif 'Italy' in country:\n country = 'Italy'\n elif 'Japan' in country:\n country = 'Japan'\n elif 'Germany' in country:\n country = 'Germany'\n elif 'Luxembourg' in country:\n country = 'Luxembourg'\n elif ('France' in country) or country == 'Marseille':\n country = 'France'\n elif country == 'ROC' or country == 'R. O. C':\n country = 'Taiwan'\n elif country == 'Brasil':\n country = 'Brazil'\n elif country == 'México' or 'Mexico' in country:\n country = 'Mexico'\n elif 'Slowakia' in country:\n country = 'Slowakia'\n elif country == 'Korea' or 'Republic of Korea' in country:\n country = 'South Korea'\n elif country == 'United Kindgom':\n country = 'United Kingdom'\n elif country and 'Netherlands' in country:\n country = 'Netherlands'\n elif country == 'Commonwealth of Australia' or 'Australia' in country:\n country = 'Australia'\n elif 'Singapore' in country:\n country = 'Singapore'\n elif country and (country[0].isdigit() or country[0] == '+'):\n country = 'N/A'\n return country", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None", "def validateCountry(self, country_name):\n if country_name in self.travel_db.countries:\n return True\n else:\n return False", "def get_country_validation_error(country):\n return _validate(_validate_country, errors.AccountCountryInvalid, country)", "def city_country(city, country):\n return city.title() + \", \" + country.title()", "def city_country(city, country):\r\n\treturn(city.title() + ', ' + country.title())", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"", "def city_country(city, country):\n return(city.title() + \", \" + country.title())", "def city_country(city, country):\n full_city = city + \", \" + country\n return full_city.title()", "def city_country(city, country):\n city_and_country = city + ', ' + country\n return city_and_country.title()", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()" ]
[ "0.6980752", "0.69086546", "0.6828054", "0.669586", "0.66514415", "0.66304183", "0.6440515", "0.64396936", "0.6435864", "0.6435098", "0.6365185", "0.63565147", "0.6318801", "0.6305565", "0.62800944", "0.6262626", "0.6246364", "0.6230827", "0.62267274", "0.62176657", "0.60721886", "0.6065407", "0.60645926", "0.60645926", "0.60619706", "0.60607874", "0.6057818", "0.60300016", "0.60300016", "0.60205144" ]
0.79892355
0
Return a currency code input after validaing it
def currencyInput(self, prompt): while True: code = input(prompt).upper() if code not in self.travel_db.currencies: print("Invalid currency code") else: return code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_currency(currency_code):\n try:\n rate = rates.get_rates(currency_code)\n return 0\n except:\n flash(f'Error: {currency_code} is not a valid currency')\n return 1", "def get_currency(test_loop_count=None) -> str:\n loop_count = 0\n while True:\n try:\n currency = input(\"Enter currency:\").lower()\n return Currency[currency].code\n except KeyError:\n loop_count += 1\n if test_loop_count == loop_count:\n break\n print(\"This is the incorrect currency format\")\n continue", "def currency(self, currency):\n allowed_values = [\"AED\", \"AFN\", \"ALL\", \"AMD\", \"ANG\", \"AOA\", \"ARS\", \"AUD\", \"AWG\", \"AZN\", \"BAM\", \"BBD\", \"BDT\", \"BGN\", \"BHD\", \"BIF\", \"BMD\", \"BND\", \"BOB\", \"BOV\", \"BRL\", \"BSD\", \"BTN\", \"BWP\", \"BYR\", \"BZD\", \"CAD\", \"CDF\", \"CHE\", \"CHF\", \"CHW\", \"CLF\", \"CLP\", \"CNY\", \"COP\", \"COU\", \"CRC\", \"CUC\", \"CUP\", \"CVE\", \"CZK\", \"DJF\", \"DKK\", \"DOP\", \"DZD\", \"EGP\", \"ERN\", \"ETB\", \"EUR\", \"FJD\", \"FKP\", \"GBP\", \"GEL\", \"GHS\", \"GIP\", \"GMD\", \"GNF\", \"GTQ\", \"GYD\", \"HKD\", \"HNL\", \"HRK\", \"HTG\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"IQD\", \"IRR\", \"ISK\", \"JMD\", \"JOD\", \"JPY\", \"KES\", \"KGS\", \"KHR\", \"KMF\", \"KPW\", \"KRW\", \"KWD\", \"KYD\", \"KZT\", \"LAK\", \"LBP\", \"LKR\", \"LRD\", \"LSL\", \"LTL\", \"LVL\", \"LYD\", \"MAD\", \"MDL\", \"MGA\", \"MKD\", \"MMK\", \"MNT\", \"MOP\", \"MRO\", \"MRU\", \"MUR\", \"MVR\", \"MWK\", \"MXN\", \"MXV\", \"MYR\", \"MZN\", \"NAD\", \"NGN\", \"NIO\", \"NOK\", \"NPR\", \"NZD\", \"OMR\", \"PAB\", \"PEN\", \"PGK\", \"PHP\", \"PKR\", \"PLN\", \"PYG\", \"QAR\", \"RON\", \"RSD\", \"RUB\", \"RWF\", \"SAR\", \"SBD\", \"SCR\", \"SDG\", \"SEK\", \"SGD\", \"SHP\", \"SLL\", \"SOS\", \"SRD\", \"SSP\", \"STD\", \"STN\", \"SVC\", \"SYP\", \"SZL\", \"THB\", \"TJS\", \"TMT\", \"TND\", \"TOP\", \"TRY\", \"TTD\", \"TWD\", \"TZS\", \"UAH\", \"UGX\", \"USD\", \"USN\", \"USS\", \"UYI\", \"UYU\", \"UZS\", \"VEF\", \"VES\", \"VND\", \"VUV\", \"WST\", \"XAF\", \"XCD\", \"XOF\", \"XPF\", \"YER\", \"ZAR\", \"ZMW\", \"ZWL\"] # noqa: E501\n if currency not in allowed_values:\n raise ValueError(\n \"Invalid value for `currency` ({0}), must be one of {1}\" # noqa: E501\n .format(currency, allowed_values)\n )\n\n self._currency = currency", "def getUserCurrency():", "def getCurrencySymbol():", "def currency_code(self):\n return self.__currency_code", "def validate_bet(buy_type, cash_in):\n while cash_in < 0:\n print(\"Invalid\", buy_type)\n cash_in = round(float(input(\"Enter \" + buy_type + \": $\")), 2)\n\n return cash_in", "def get_user_input():\n return float(input('Your transaction amount please: '))", "def currency_checker(start, new, ammount):\n\n newamount = str(round(c.convert(start, new, amount),2))\n return newamount", "def getActiveCurrency():", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "def currency_code_default():\n\n from common.models import InvenTreeSetting\n\n try:\n code = InvenTreeSetting.get_setting('INVENTREE_DEFAULT_CURRENCY', create=False, cache=False)\n except Exception: # pragma: no cover\n # Database may not yet be ready, no need to throw an error here\n code = ''\n\n if code not in CURRENCIES:\n code = 'USD' # pragma: no cover\n\n return code", "def getCurrencyCode(self):\n return self.currency_code", "def clean_currency(x: str):\n # cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n # x = str(x)\n if isinstance(x, str):\n if x.startswith(\"$\"):\n return x.replace('$', '').replace(',', '')\n # return float(x)\n return x\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def GetDollars():\n\n while True:\n us_dollars_input = raw_input(\"Enter a dollar and cents vale to convert to euros: \")\n try:\n us_dollars = float(us_dollars_input)\n except ValueError:\n print us_dollars, \"is not a valid dollar amount. Try again.\"\n continue\n return us_dollars", "def get_year():\n try:\n year = input(\"Enter Year: \")\n year = int(year)\n if year > 2021 or year < 2000:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()\n else:\n os.system('cls')\n return year\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()", "def test_currency_case(self):\n form = copy.deepcopy(self.base_form)\n form[\"mc_currency\"] = \"UsD\"\n Payment.process_paypal_ipn(form)\n payments = Payment.query.all()\n self.assertEqual(payments[0].currency, Currency.US_Dollar.value)", "def validateCurrency(self, currency_code):\n if currency_code in self.travel_db.currencies:\n return True\n else:\n return False", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card", "def clean_currency(x):\n \n if isinstance(x, str):\n x=x.replace(\"*\",\"\")\n x=x.replace(\",\",\"\")\n if x=='':\n return(0)\n elif x[0]!='$':\n return(0)\n else:\n x=x.split(' ')[0]\n x=x.replace('$',\"\")\n return float(x)\n return(x)", "def get_credit_card():\r\n print(\"- PAYMENT INFORMATION -\")\r\n print(\"Please enter your credit card information. This information will NOT be saved.\\n\")\r\n card_number = input(\"Please type your CREDIT CARD NUMBER: \").strip()\r\n card_expiry= input(\"Please type the EXPIRY DATE (MM/YY): \").strip().replace(\"/\",\"\")\r\n cvv = input(\"Please type the 3 digit SECURITY CODE: \").strip()\r\n zip_code = input(\"Please type your ZIP/POSTAL CODE: \").strip()\r\n\r\n try:\r\n card = CreditCard(card_number, card_expiry, cvv, zip_code)\r\n except Exception as e:\r\n print(\"Card details INVALID, please try again. \\n\", e)\r\n return get_credit_card()\r\n\r\n return card", "def validate_pin_input(value):\n try:\n int(value)\n return f\"D{value}\"\n except ValueError:\n return value.upper()", "def getCurrencyIsoCode(id=None):", "def get_price():\n price = input('Please enter the price of the piece: ')\n while not controls_utils.price_is_right(price):\n print('Price must be a numerical value ')\n price = input('Please enter the price of the piece: ')\n return int(price)", "def getBaseCurrency():", "def getValue(currency=None):", "def add_currency(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.new_home_currency_input.text == '':\n self.root.ids.new_home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.new_foreign_currency_input.text == '':\n self.root.ids.new_foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.new_foreign_currency_input.text)\n self.root.ids.new_foreign_currency_input.hint_text = 'Add value comparatively to home currency'\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.new_foreign_currency_input.text = ''\n self.root.ids.new_foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.new_home_currency_input.text)\n self.root.ids.new_home_currency_input.hint_text = 'Add value comparatively to foreign currency'\n valid_home_amount = True\n except ValueError:\n self.root.ids.new_home_currency_input.text = ''\n self.root.ids.new_home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n valid_input = home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True\n if self.root.ids.new_home_currency.text == '':\n valid_input = False\n self.root.ids.new_home_currency.hint_text = 'Must enter new currency name'\n elif self.root.ids.new_home_currency.text in self.currencies:\n valid_input = False\n self.root.ids.new_home_currency.text = ''\n self.root.ids.new_home_currency.hint_text = 'Currency already exists'\n else:\n self.root.ids.new_home_currency.hint_text = 'Enter currency name'\n if valid_input and home_amount > 0 and foreign_amount > 0:\n if self.new_foreign_currency != 'Select':\n self.currency_data.append([self.root.ids.new_home_currency.text, str(\n float(self.currency_data[find_nested_index(self.currency_data, 0, self.new_foreign_currency)][1]) *\n home_amount / foreign_amount)])\n self.currencies.append(self.root.ids.new_home_currency.text)\n self.root.ids.currency_output_label.text = 'Added currency: ' + self.root.ids.new_home_currency.text\n else:\n self.root.ids.currency_output_label.text = 'Must have a foreign currency'", "def getDefaultCurrency():", "def get_currency():\n return _currency", "def country(alpha_2_code: str) -> None:" ]
[ "0.6704559", "0.6646507", "0.6486832", "0.62412405", "0.6107163", "0.60303926", "0.600023", "0.59911394", "0.5982588", "0.59745014", "0.5958056", "0.5924218", "0.587548", "0.58722013", "0.5858742", "0.5832311", "0.5812267", "0.5781818", "0.5777066", "0.5729549", "0.5705858", "0.5700983", "0.5688952", "0.56808054", "0.56591165", "0.5655157", "0.56035244", "0.558855", "0.5584337", "0.55811733" ]
0.7700206
0
Return True if airport code valid, False otherwise.
def validateAirport(self, code): print(code) if code in self.travel_db.airports: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def airportCodeInput(self, prompt):\n while True:\n code = input(prompt).upper()\n if code not in self.travel_db.airports:\n print(\"Invalid airport code\")\n else:\n return code", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def validate_issue_year(passport: map) -> bool:\n if passport.get('iyr'):\n if int(passport['iyr']) >= 2010 and int(passport['iyr']) <= 2020:\n return True\n\n return False", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def valid_passport(passport: map) -> bool:\n results = []\n results.append(validate_birth_year(passport))\n results.append(validate_issue_year(passport))\n results.append(validate_exp_year(passport))\n results.append(validate_height(passport))\n results.append(validate_hair_color(passport))\n results.append(validate_eye_color(passport))\n results.append(validate_passport_number(passport))\n\n return any(results) and all(results)", "def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def validate_outward_code(outward_code: str) -> bool:\n outward_pattern_is_correct = re.fullmatch(\n '^{}$'.format(OUTWARD_REGEX),\n outward_code\n )\n\n if outward_pattern_is_correct:\n return True\n\n raise exceptions.InvalidOutwardCodeFormatError(\n 'Outward code is not correctly formatted'\n )", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def valid(self):\n try:\n if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):\n return True\n except:\n return False\n return False", "def valid_passport_format(passport_number):\n passport_format = re.compile('^\\w{5}-\\w{5}-\\w{5}-\\w{5}-\\w{5}$')\n\n if passport_format.match(passport_number):\n return True\n else:\n return False", "def is_AD(code):\n assert isinstance(code, str)\n code_set = ('331.0', '3310', 'G30')\n return code.startswith(code_set)", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def validate_pnumac(pnumac):\n if not re.match(pnumac_pattern, pnumac):\n raise ValidationError(u'%s is not a valid area code'%pnumac)", "def is_valid_passport_id(passport_id: int) -> bool:\n return len(passport_id) == 9 and passport_id.isnumeric()", "def _is_station_valid(station) -> bool:\n if station['stationStatus']['id'] != 'ACTIVE':\n return False\n if station['latitude'] is None or station['longitude'] is None:\n # We can't use a station if it doesn't have a latitude and longitude.\n # pylint: disable=fixme\n # TODO : Decide if a station is valid if we can't determine its ecodivision and/or core fire season\n return False\n return True", "def isValidPeptide(self, sequence):\n for position, disallowedAAs in self._rules.iteritems():\n nextAA = sequence[position - 1].upper()\n if nextAA in disallowedAAs:\n return False\n return True", "def validate_address(address:str) -> bool:\r\n return True", "def validate_exp_year(passport: map) -> bool:\n if passport.get('eyr'):\n if int(passport['eyr']) >= 2020 and int(passport['eyr']) <= 2030:\n return True\n\n return False", "def is_valid_address(address: str) -> bool:\n try:\n Account.validate_address(address)\n except ValueError:\n return False\n return True", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def valid(self, a_card: card.Card) -> bool:\n if self._pile:\n return self._pile[-1].tableau_valid(a_card)\n if a_card.value == 12:\n return True\n return False", "def validate_birth_year(passport: map) -> bool:\n if passport.get('byr'):\n if int(passport['byr']) >= 1920 and int(passport['byr']) <= 2002:\n return True\n\n return False", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def _is_valid_country(flag: str) -> bool:\n\n return bool(\n COUNTRY_CODE_FLAGS_REGEX.findall(flag)\n )" ]
[ "0.70381415", "0.6726082", "0.64569366", "0.6375067", "0.630065", "0.6263566", "0.6250235", "0.62470305", "0.6238505", "0.62053096", "0.6156845", "0.61343706", "0.61318254", "0.6113142", "0.6089113", "0.6086316", "0.6080452", "0.6008818", "0.5969722", "0.59552896", "0.5907283", "0.5890249", "0.5880435", "0.58551425", "0.58524776", "0.5849668", "0.5804292", "0.5800776", "0.57972115", "0.57876015" ]
0.8865747
0
Return True if country_name valid, False otherwise.
def validateCountry(self, country_name): if country_name in self.travel_db.countries: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_country():\n assert valid_country(\"Democratic Republic of Lungary\") is True\n assert valid_country(\"Kraznoviklandstan\") is True\n assert valid_country(\"kraznoviklandstan\") is True\n assert valid_country(\"KRAZNOVIKLANDSTAN\") is True\n\n assert valid_country(\"Democratic_Republic982759\") is False\n assert valid_country(\"Kraznoviklandsta\") is False\n assert valid_country(\"Principalities of Fryed\") is False\n assert valid_country(\"FRY\") is False", "def _is_valid_country(flag: str) -> bool:\n\n return bool(\n COUNTRY_CODE_FLAGS_REGEX.findall(flag)\n )", "def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)", "def is_valid(name):\n return bool(name)", "def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')", "def valid_country(value: Any) -> str:\n value = cv.string(value)\n all_supported_countries = holidays.list_supported_countries()\n\n try:\n raw_value = value.encode(\"utf-8\")\n except UnicodeError as err:\n raise vol.Invalid(\n \"The country name or the abbreviation must be a valid UTF-8 string.\"\n ) from err\n if not raw_value:\n raise vol.Invalid(\"Country name or the abbreviation must not be empty.\")\n if value not in all_supported_countries:\n raise vol.Invalid(\"Country is not supported.\")\n return value", "def test_country_name_not_in_countries(self):\n\t\tcountry_code = get_country_code('Venezuela, RB')\n\t\tself.assertEqual(country_code, 've')", "def name_valid(name):\n return name.isalpha()", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def is_valid_business_name(self):\n return self.business_name.lower() not in INVALID_BUSINESS_NAME", "def invalid_name(name):\n if any(not item.isalpha() for item in str(name)):\n return True\n return False", "def is_bank_name_valid(name_to_check: str):\n def is_name_short_enough():\n return True if len(name_to_check) <= 12 else False\n\n def is_name_only_letter():\n return True if name_to_check.isalpha() else False\n\n return True if is_name_short_enough() and is_name_only_letter() else False", "def validate_name(self, username: str) -> bool:\n\t\treturn not self.registry.name_taken(username)", "def is_valid_name(self):\n\n if self.whitelist_name == '':\n return True\n\n if len(self.whitelist_name) >= 64:\n LOGGER.debug('invalid name %s; must be less than 64 bytes',\n self.whitelist_name)\n return False\n\n return True", "def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def IsVPCNameValid(vpc):\n if len(vpc) < 1 or len(vpc) > 63:\n return False\n return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc))", "def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass", "def _validate_name(name):\r\n\tif HOST_NAME != name and len(name) > 0 and ZOOM_PHRASES[0] not in name and name not in WAITING_ROOM:\r\n\t\treturn True\r\n\treturn False", "def validate_name(name):\n name = name.strip()\n m = re.search('^[a-zA-Z0-9 ]{3,30}$', name)\n if m is None:\n return False\n else:\n return True", "def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def check_name(name):\n name = sanitize_name(name)\n for letter in name:\n if letter not in all_letters:\n # print(f\"Bad letter = {letter}\")\n return False\n role = extract_role(name)\n # remove group\n name = name.replace(f' - {role}', '')\n try:\n parts = name.split(' ')\n firstname = parts[0].title()\n if firstname[0] not in letters:\n return False\n for letter in firstname[1:]:\n if letter not in LETTERS:\n return False\n familynames = parts[1:]\n for familyname in familynames:\n if familyname[0] not in letters:\n return False\n for letter in familyname[1:]:\n if letter not in LETTERS:\n return False\n return True\n except:\n return False", "def validate_address(address:str) -> bool:\r\n return True", "def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False", "def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None", "def validUsername(name):\n if validCard(name):\n return False\n if ',' in name or \"?\" in name or \"=\" in name or \";\" in name or \"/\" in name or \"^\" in name or '\"' in name or '@' in name:\n return False\n if len(name) < 3:\n return False\n if \" \" in name:\n return False\n \n return True" ]
[ "0.7013575", "0.700049", "0.6939837", "0.68034947", "0.67918384", "0.6622717", "0.6601707", "0.6583337", "0.6476302", "0.64751047", "0.6429186", "0.64257467", "0.64110726", "0.6369207", "0.63588893", "0.62856215", "0.6275524", "0.62537944", "0.62458146", "0.6229923", "0.6229324", "0.61842495", "0.6144813", "0.6137241", "0.61069685", "0.6079655", "0.606545", "0.6057639", "0.60503554", "0.6034989" ]
0.8314883
0
Return True if currency_code valid, False otherwise.
def validateCurrency(self, currency_code): if currency_code in self.travel_db.currencies: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_currency(currency_code):\n try:\n rate = rates.get_rates(currency_code)\n return 0\n except:\n flash(f'Error: {currency_code} is not a valid currency')\n return 1", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def is_currency(currency: str, locale: Locale | str | None = None) -> bool:\n if not currency or not isinstance(currency, str):\n return False\n try:\n validate_currency(currency, locale)\n except UnknownCurrencyError:\n return False\n return True", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def is_valid(self):\n\t\treturn bool(call_sdk_function('PrlLic_IsValid', self.handle))", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def valid_totp(self, code, period=30):\n return valid_code(code) and self.totp(period) == int(code)", "def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False", "def is_valid(self):\n return phonenumbers.is_valid_number(self)", "def is_luhn_valid(card_number):\n is_valid = luhn_checksum(card_number) == 0\n return is_valid", "def _is_amount_valid(self):\n\t\tamount = self.entry_amount.get()\n\n\t\ttry:\n\t\t\tfloat(amount)\n\t\texcept ValueError:\n\t\t\tmessagebox.showerror(\"Invalid Amount\", \"Amount must be a positive number\")\n\t\t\treturn False\n\n\t\tif float(amount) < 0:\n\t\t\tmessagebox.showerror(\"Invalid Amount\", \"Amount must be a positive number\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def is_valid_expiration_year(expiration_year: int) -> bool:\n return expiration_year.isnumeric() and 2020 <= int(expiration_year) <= 2030", "def check_code(self, code):\n\n try:\n # Check if the code is valid\n voucher = Voucher.objects.get(code=code)\n\n # Check if the valid code in not completely redeemed\n if voucher.still_valid():\n message = \"Voucher code is valid, your discount = %s\" \\\n % voucher.get_discount_value()\n valid = True\n voucher.redeem_code()\n voucher.save()\n else:\n message = \"Voucher code has been redeemed.\"\n valid = False\n\n except Voucher.DoesNotExist as err:\n message = \"Voucher code is invalid\"\n valid = False\n\n return {'valid': valid, 'message': message}", "def is_currency_available(msrp_currencies, currency=None):\n available_currencies = set(AVAILABLE_CURRENCIES.keys()).intersection(msrp_currencies)\n\n if not currency and not available_currencies:\n return False\n \n if currency not in available_currencies:\n return False \n \n return True", "def is_valid(self):\n\n # get company id + filial id (first 12 digits)\n cnpj = self.cnpj[:12]\n \n # and following rules we stabilish some weight to multiply\n def weightlist(s=12):\n x = (list(range(2,10))*2)[:s]\n x.reverse()\n return x\n \n # while cnpj isn't complete\n while len(cnpj) < 14:\n\n # run trought numbers (x) mutiplying by weight (y) and then get\n # sum of rest of division by 11 as interger\n # (we have more than 9 digits so isn't simple as make calcs for CPF)\n r = int(sum([x*y for (x, y) in zip(cnpj, weightlist(len(cnpj)))]) % 11)\n\n # if digit is smaller than 2, turns 0\n if r < 2:\n f = 0\n else:\n f = 11 - r\n\n # append digit to cnpj\n cnpj.append(f)\n\n # if created number is same as original number, cnpj is valid\n return bool(cnpj == self.cnpj)", "def is_valid(self):\n return self is not Sugar.INVALID_SUGAR", "def is_valid_year_number(year_number: int) -> bool:\n if year_number in range(100):\n return True\n else:\n return False", "def validate(self, cnpj):\n return bool(cnpj[-2:] == self.digits(cnpj))", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def validate(self):\n try:\n num = map(int, self.card_number)\n except ValueError:\n raise AuthorizeInvalidError('Credit card number is not valid.')\n if sum(num[::-2] + map(lambda d: sum(divmod(d * 2, 10)), num[-2::-2])) % 10:\n raise AuthorizeInvalidError('Credit card number is not valid.')\n if datetime.now() > self.expiration:\n raise AuthorizeInvalidError('Credit card is expired.')\n if not re.match(r'^[\\d+]{3,4}$', self.cvv):\n raise AuthorizeInvalidError('Credit card CVV is invalid format.')\n if not self.card_type:\n raise AuthorizeInvalidError('Credit card number is not valid.')", "def is_valid_year(year_number):\n\n if (type(year_number) == int) and (START_YEAR <= year_number <= FINAL_YEAR):\n return True\n\n return False" ]
[ "0.759342", "0.70800674", "0.6537341", "0.62727094", "0.62223756", "0.619591", "0.6130835", "0.60933155", "0.6079185", "0.6059149", "0.6046414", "0.60261106", "0.5977034", "0.59302557", "0.5910405", "0.5875142", "0.5865183", "0.5845647", "0.5834872", "0.57638985", "0.57544607", "0.5743286", "0.5742743", "0.5742743", "0.5742743", "0.5742743", "0.5742743", "0.5742743", "0.57110846", "0.57026875" ]
0.85613906
0
Return a dictionary of Currency objects, with key = currency code. Created from info stored in filename
def buildCurrencyDict(filename): currencies = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: currencies[line[1]] = Currency(line[1], line[0], float(line[2])) return currencies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildCountryDict(filename, currencies_dict):\n # This function requires the currency dictionary to be built already.\n countries = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n try:\n countries[line[0]] = Country(line[0], line[14], currencies_dict)\n except KeyError: # If currency isn't found, country won't be added to the dictionary\n continue\n return countries", "def get_currencies():\n currencies = {}\n\n currencies_utilities.fetch_currencies()\n with open(currencies_csv, mode='rU') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n currencies[row['Code']] = row\n\n return currencies", "def GetCurrencies():\n return GetDataFromCsvFile('currencies.csv')", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def load_currencies_codes(data):\n currency_codes = [currency_code for currency_code in data.keys()]\n currency_code_objects = [Currency(data=currency_code)\n for currency_code\n in currency_codes]\n Currency.objects.bulk_create(currency_code_objects)", "def load_currency_info(wf):\n moedas = wf.stored_data(STORED_DATA_CURRENCY_INFO)\n if not moedas:\n log.debug('Loading currency data...')\n moedas = get_currencies()\n wf.store_data(STORED_DATA_CURRENCY_INFO, moedas)\n return moedas", "def getCurrencies():", "def getCityCodeDict():\n\n dictionary = {}\n f = open(filename1,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def __call__(self):\n currency_data = getUtility(ICurrencyData)\n currency_data_list = currency_data.currency_data_list()\n results = {}\n for i in currency_data_list:\n results.update({i['code']:i['decimal']})\n return results", "def currencies():\n return _CURRENCIES", "def currency_option(self):\r\n currency = []\r\n with open('MonthlyRate.csv', newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n currency.append(row['CurrencyCode'])\r\n return currency", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def load_cows(filename:str) -> dict:\n dict_of_cow = {}\n\n with open(filename, 'r') as open_file:\n content = open_file.read()\n \n ls_line = content.split('\\n')\n\n for line in ls_line:\n ls_context = line.split(',')\n dict_of_cow[ls_context[0]] = int(ls_context[1])\n\n return dict_of_cow", "def shopping_cost(filename):\n data = []\n with open(filename, \"r\") as f:\n rows = csv.reader(f)\n next(f)\n for i, row in enumerate(rows):\n row[2] = int(row[2])\n row[6] = float(row[6])\n record = {\n 'id': row[0],\n 'account': row[1],\n 'purchased_quantity': row[2],\n 'item_name': row[3],\n 'item_quantity': row[4],\n 'item_unit': row[5],\n 'item_price': row[6],\n 'category': row[7],\n }\n data.append(record)\n\n return data", "def readFile(filename):\n with open(filename) as f:\n name = f.readline().rstrip(\"\\n\")\n d={}\n for line in f:\n line = line.rstrip(\"\\n\")\n (itemName, Quantity, Price)=line.split(\" \")\n d[itemName]=[int(Quantity),int(Price)]\n return name, d", "def getCouponDict(coupon_file):\n file_handle = open(coupon_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n counter = 0\n coupon_dict = {}\n for row in file_reader:\n coupon_dict[row['COUPON_ID_hash']] = row\n counter += 1\n assert len(coupon_dict.keys()) == counter\n\n file_handle.close()\n return coupon_dict", "def load_cows(filename):\r\n\r\n cow_dict = dict()\r\n\r\n f = open(filename, 'r')\r\n \r\n for line in f:\r\n line_data = line.split(',')\r\n cow_dict[line_data[0]] = int(line_data[1])\r\n return cow_dict", "def load_cows(filename):\r\n\r\n cow_dict = dict()\r\n\r\n f = open(filename, 'r')\r\n\r\n for line in f:\r\n line_data = line.split(',')\r\n cow_dict[line_data[0]] = int(line_data[1])\r\n return cow_dict", "def __init__(self,file_path):\n\t\tdata_reader = csv.DictReader(file(file_path,'rU'))\n\t\tfor row in data_reader:\n\t\t\t# we have to turn the strings into floating point numbers.\n\t\t\tc = Compound( name = row['Name'],\n\t\t\t Antoine_params = [float(row['Antoine A']),float(row['Antoine B']),float(row['Antoine C'])],\n\t\t\t mass_density = float(row['Mass Density']),\n\t\t\t MW = float(row['Molecular Weight']),\n\t\t\t #Hvap = float(row['Enthalpy of Vaporization']),\n\t\t\t Cp = float(row['Molar Heat Capacity']) )\n\t\t\t# place it in the dictionary\n\t\t\t#print \"Have just read in \",c\n\t\t\tself[c.name] = c", "def files2dictionary(filename, countryID, supp_dict):\r\n\r\n fh = open(filename)\r\n header = next(fh)\r\n\r\n data_dict = {}\r\n data_dict[countryID] = {}\r\n\r\n numlist = range(1, 36)\r\n agelist = []\r\n for line in fh:\r\n linelist = line.strip().split(\",\")\r\n age = linelist[4]\r\n agelist.append(age)\r\n for icdrep in numlist:\r\n if str(age) not in data_dict[countryID]:\r\n data_dict[countryID][str(age)] = {}\r\n #if str(icdrep) not in data_dict[countryID][str(age)]:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n else:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n fh.close()\r\n supp_dict.update(support_counter(header.split(\",\"), agelist, supp_dict))\r\n return data_dict, supp_dict", "def get_source(filename: str) -> dict[str, str]:\n file_path = (\n filename\n or g.ledger.fava_options.default_file\n or g.ledger.beancount_file_path\n )\n source, sha256sum = g.ledger.file.get_source(file_path)\n return {\"source\": source, \"sha256sum\": sha256sum, \"file_path\": file_path}", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def file_to_dictionary():\n\n return;", "def load_cows(filename):\n\n cow_dict = dict()\n\n f = open(filename, 'r')\n \n for line in f:\n line_data = line.split(',')\n cow_dict[line_data[0]] = int(line_data[1])\n return cow_dict", "def load_cows(filename):\n\n cow_dict = dict()\n\n f = open(filename, 'r')\n \n for line in f:\n line_data = line.split(',')\n cow_dict[line_data[0]] = int(line_data[1])\n return cow_dict", "def getAircraftCodeDict():\n\n dictionary = {}\n f = open(filename2,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def load_sample_rates(path: str) -> Dict[str, int]:\n sample_rates = pd.read_csv(path)\n result_dict = {x['filename'].split('.')[0]: x['frame_rate'] for index, x in sample_rates.iterrows()}\n return result_dict", "def get_codecoolers_from_file(cls, file_name):\n constructors = cls.load_data_from_file(file_name)\n\n for constructor in constructors:\n name, surname, login, password, email = constructor\n\n cls(name, surname, login, password, email)", "def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)" ]
[ "0.70550233", "0.6869403", "0.6178886", "0.61106163", "0.59804064", "0.59100264", "0.5908569", "0.59003896", "0.58851796", "0.58286935", "0.58016914", "0.57944894", "0.57516325", "0.5689539", "0.5687478", "0.56543136", "0.56246847", "0.56089044", "0.56031054", "0.5590987", "0.5584396", "0.5555594", "0.5550168", "0.5535903", "0.5505436", "0.5505436", "0.55051535", "0.55051214", "0.5503637", "0.5489732" ]
0.84670454
0
Return a dictionary of Country objects, with key = country name. Created from info stored in filename
def buildCountryDict(filename, currencies_dict): # This function requires the currency dictionary to be built already. countries = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: countries[line[0]] = Country(line[0], line[14], currencies_dict) except KeyError: # If currency isn't found, country won't be added to the dictionary continue return countries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildAirportDict(filename, countries_dict): \n # This function requires the country dictionary to be built already.\n airports = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n try:\n airports[line[4]] = Airport(line[4], line[1], line[3], line[2], float(line[6]), float(line[7]), countries_dict)\n except KeyError: # If country isn't found, the airport won't be added to the dictionary\n continue\n return airports", "def get_info_for_country(country: Country) -> List[FileInformation]:\n\n connection_object_dic: Dict[Country, List[FileInformation]] = {\n Country.belgium: [\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_CASES_AGESEX.csv',\n PatientCase.positive_to_covid_daily,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'REGION',\n PatientCategory.geo_level_2: 'PROVINCE',\n PatientCategory.age: 'AGEGROUP',\n PatientCategory.sex: 'SEX',\n PatientCategory.total: 'CASES'\n }\n ),\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_CASES_MUNI.csv',\n PatientCase.positive_to_covid_daily,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'TX_RGN_DESCR_FR',\n PatientCategory.geo_level_2: 'TX_PROV_DESCR_FR',\n PatientCategory.geo_level_3: 'TX_DESCR_FR',\n PatientCategory.total: 'CASES'\n }\n ),\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_HOSP.csv',\n PatientCase.hospitalization_daily_prevalence,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'REGION',\n PatientCategory.geo_level_2: 'PROVINCE',\n PatientCategory.total: 'TOTAL_IN'\n }\n ),\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_HOSP.csv',\n PatientCase.hospitalization_respiratory_daily_prevalence,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'REGION',\n PatientCategory.geo_level_2: 'PROVINCE',\n PatientCategory.total: 'TOTAL_IN_RESP'\n }\n ),\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_HOSP.csv',\n PatientCase.hospitalization_ecmo_daily_prevalence,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'REGION',\n PatientCategory.geo_level_2: 'PROVINCE',\n PatientCategory.total: 'TOTAL_IN_ECMO'\n }\n ),\n FileInformation(\n 'https://epistat.sciensano.be/Data/COVID19BE_MORT.csv',\n PatientCase.death_daily,\n {\n PatientCategory.date: 'DATE',\n PatientCategory.geo_level_1: 'REGION',\n PatientCategory.sex: 'SEX',\n PatientCategory.age: 'AGEGROUP',\n PatientCategory.total: 'DEATHS'\n }\n )\n ],\n Country.france: [\n FileInformation(\n 'https://www.data.gouv.fr/fr/datasets/r/b4ea7b4b-b7d1-4885-a099-71852291ff20',\n PatientCase.positive_to_covid_daily,\n {\n PatientCategory.date: 'jour',\n PatientCategory.geo_level_1: 'dep',\n PatientCategory.age: 'clage_covid',\n PatientCategory.total: 'nb_pos'\n }\n ),\n FileInformation(\n 'https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7',\n PatientCase.hospitalization_ecmo_daily_prevalence,\n {\n PatientCategory.date: 'jour',\n PatientCategory.geo_level_1: 'dep',\n PatientCategory.sex: 'sexe',\n PatientCategory.total: 'hosp'\n }\n )\n ]\n }\n\n return connection_object_dic[country]", "def loadCountryGroupMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: (line[0], line[2].strip()))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def extract_data(file):\n countries = [\n \"Brunei Darussalam\",\n \"Cambodia\",\n \"Indonesia\",\n \"Lao People's Democratic Republic\",\n \"Malaysia\",\n \"Myanmar\",\n \"Philippines\",\n \"Singapore\",\n \"Thailand\",\n \"Viet Nam\",\n ]\n\n data = dict()\n\n with open(file, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if row[\"Region\"] in countries and row[\"Year\"] == \"2014\":\n value = int(float(row[\"Population\"]))\n data[row[\"Region\"]] = value\n\n return data", "def writeCountryCodeFile(self):\n try:\n geojson = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable at source.')\n \n country_mapping = {}\n for country in geojson['features']:\n iso_2 = country['properties']['ISO_A2']\n country_name = country['properties']['ADMIN']\n country_mapping.update({country_name: iso_2})\n \n with open('countryNameISO2.json', 'w') as file:\n json.dump(country_mapping, file)", "def files2dictionary(filename, countryID, supp_dict):\r\n\r\n fh = open(filename)\r\n header = next(fh)\r\n\r\n data_dict = {}\r\n data_dict[countryID] = {}\r\n\r\n numlist = range(1, 36)\r\n agelist = []\r\n for line in fh:\r\n linelist = line.strip().split(\",\")\r\n age = linelist[4]\r\n agelist.append(age)\r\n for icdrep in numlist:\r\n if str(age) not in data_dict[countryID]:\r\n data_dict[countryID][str(age)] = {}\r\n #if str(icdrep) not in data_dict[countryID][str(age)]:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n else:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n fh.close()\r\n supp_dict.update(support_counter(header.split(\",\"), agelist, supp_dict))\r\n return data_dict, supp_dict", "def getCityCodeDict():\n\n dictionary = {}\n f = open(filename1,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def buildCurrencyDict(filename): \n currencies = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n currencies[line[1]] = Currency(line[1], line[0], float(line[2]))\n return currencies", "def json_parsing():\n with open('countries.json') as f:\n countries = json.load(f)\n\n return countries", "def get_country_data(\n url=default_url,\n filename=default_data_file,\n force=False,\n corrections=corrections,\n):\n download_country_data(url, filename, force)\n df = read_country_csv(filename)\n df = correct_extremes(df, corrections)\n country_dict = convert_to_dict(df)\n return country_dict", "def load_country_names(data):\n country_names = [country['name'] for country in data]\n country_name_objects = [CountryName(data=country_name)\n for country_name\n in country_names]\n CountryName.objects.bulk_create(country_name_objects)", "def associate_timezones_to_countries(self):\n\t\t\n\t\tresult = {}\n\t\twith open(\"/usr/share/zoneinfo/zone.tab\", \"r\") as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tif line[0] == \"#\": continue\n\t\t\t\t\n\t\t\t\tline = line.replace(\"\\n\",\"\").split(\"\\t\")\n\t\t\t\tif not line[0] in result: result[line[0]] = line[2]\n\t\t\n\t\treturn result", "def test_build_map_dict_by_name():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n # Get pygal country code map\n pygal_countries = {'KEN':'Kenya', 'IDN':'Indonesia'}\n\n # 1960\n res = build_map_dict_by_name(gdpinfo, pygal_countries, \"1960\")\n print(res)", "def convert(dictCountry):\n\treturn Country(dictCountry['name'], dictCountry['code'])", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def get_countryes(db_name=_db_indicators, country_txt_file=os.path.join('Source', 'work_countries.txt')):\n imf = cmm.READ_DB(db_name=None)\n country_list = cmm.read_countries(file_name=country_txt_file)\n print('CREATE IMF: reading countries from all neede datasets...', end=' ')\n coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))\n dbSETS=pd.read_sql('SELECT DISTINCT Dataset from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), con=coni)\n\n cntrl=list()\n\n for k, d in dbSETS.iterrows():\n try:\n cntrl.append(pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])))\n except KeyError:\n pass\n\n # pdfC = pd.concat([pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])) for k, d in dbSETS.iterrows() ])\n pdfC = pd.concat(cntrl)\n\n pdfC=pdfC[pdfC.index.isin(country_list)]\n pdfC = pdfC[~pdfC.index.duplicated()]\n pdfC.index.name='id'\n pdfC=pdfC.rename(columns={'Geographical Areas':'Country'})\n print('done reading countries', end='\\n')\n return pdfC\n\n\n #print(dbSETS)", "def updateCountryNames(self):\n try:\n with open('countryNameMapping.json', 'r') as file:\n name_mapping = json.loads(file.read())\n except:\n sys.exit('countryNameMapping.json file is unavailable in current directory.')\n \n for key, value in name_mapping.items():\n self.covid_df.replace(key, value, inplace=True)\n \n try:\n with open('countryNameISO2.json', 'r') as file:\n self.name_iso2_mapping = json.loads(file.read())\n except:\n print('countryNameISO2.json file is unavailable in current directory, creating file...')\n self.writeCountryCodeFile()\n print('Re-importing required JSONs...')\n self.updateCountryNames()", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def getFlightDict():\n\n dictionary = {}\n f = open(filename3,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def pre_lookup(self, file):\n return {}", "def build_filedic(data_path, lanczos_path):\n filedic = {'CERA': sorted(glob.glob(data_path + 'CERA20C/*.nc')),\n 'lanczos(CERA)': sorted(glob.glob(lanczos_path + 'CERA_7*.nc')),\n 'lanczos(20CR)': sorted(glob.glob(lanczos_path + '20CRv3_5*.nc'))}\n return filedic", "def file_to_dictionary():\n\n return;", "def extract_data(file_name):\n population_data = {\n \"gTitle\": \"SAARC Countries Population For Year 2004 - 2014\",\n \"xLabels\": [\n \"2004\",\n \"2005\",\n \"2006\",\n \"2007\",\n \"2008\",\n \"2009\",\n \"2010\",\n \"2011\",\n \"2012\",\n \"2013\",\n \"2014\",\n ],\n \"xText\": \"Years\",\n \"yText\": \"Population in millions\",\n \"data\": [],\n }\n temp = {}\n with open(file_name, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if (\n row[\"Region\"] in saarc_countries\n and row[\"Year\"] in population_data[\"xLabels\"]\n ):\n value = float(row[\"Population\"])\n temp[row[\"Year\"]] = temp.get(row[\"Year\"], 0) + value\n\n for val in population_data[\"xLabels\"]:\n population_data[\"data\"].append(int((temp[val] / 1000)))\n\n return population_data", "def get_province_info(self, data, filename):\n\n number, name = self.split_file_name(filename)\n number = int(number)\n\n if \"owner\" in data:\n tag = data[\"owner\"]\n if data[\"owner\"] not in self.country_dict:\n self.add_tag(tag)\n self.country_dict[tag][\"province_count\"] += 1\n self.country_dict[tag][\"dev_tax\"] += int(data[\"base_tax\"])\n self.country_dict[tag][\"dev_production\"] += int(data[\"base_production\"])\n self.country_dict[tag][\"dev_manpower\"] += int(data[\"base_manpower\"])\n \n if \"hre\" in data and data[\"hre\"] == \"yes\":\n self.hre_dict[number] = True\n else:\n self.hre_dict[number] = False\n self.name_dict[number] = name", "def create_countries(name_countries,origin='united kingdom',beta=0.2,gamma=0.1,I0=10,Horizon=horizon):\n countries = []\n for country in name_countries:\n if country == origin:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=I0,H=Horizon)\n else:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=0,H=Horizon)\n countries.append(c)\n return countries", "def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)" ]
[ "0.65897286", "0.64826685", "0.6389784", "0.6383171", "0.6335359", "0.62931234", "0.6252615", "0.62445325", "0.6109666", "0.6080601", "0.6028311", "0.60078806", "0.5918966", "0.588614", "0.5876146", "0.5860082", "0.5792945", "0.576631", "0.57317054", "0.5713805", "0.5710851", "0.5683025", "0.56804025", "0.56711507", "0.5658149", "0.5641446", "0.5625927", "0.5605909", "0.56052184", "0.5604858" ]
0.74422234
0
Return a dictionary of Airport objects, with key = airport code. Created from info stored in filename
def buildAirportDict(filename, countries_dict): # This function requires the country dictionary to be built already. airports = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: airports[line[4]] = Airport(line[4], line[1], line[3], line[2], float(line[6]), float(line[7]), countries_dict) except KeyError: # If country isn't found, the airport won't be added to the dictionary continue return airports
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def triplify(self):\n airports = {}\n with open(self.source_file_path, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n for i, row in enumerate(reader):\n if i != 0:\n # even if it says that data is encoded to latin-1, it actually\n # contains a lot of unicode characters\n airport_name = re.sub('[\\s,\\.\\(\\)\\'/\\\\\\]', '_', row[1]).decode('utf-8')\n country = re.sub('[\\s,\\.\\(\\)\\']', '_', row[3])\n location, icao = row[4], row[5]\n lat, long, alt = row[6], row[7], row[8]\n airports_data = dict(country=country,\n icao=icao,\n lat=lat,\n long=long,\n alt=alt)\n if country == \"United_States\":\n airports_data[\"faa\"] = location\n else:\n airports_data[\"iata\"] = location\n airports[airport_name] = {}\n for key, value in airports_data.iteritems():\n if value and value != \"\\\\N\":\n airports[airport_name].update({key: value})\n return airports", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def read_airports():\n with open('../cities_with_airports.json', 'r') as data:\n airport_file = json.load(data)\n airport_dictionary = {}\n for city in airport_file:\n airport_dictionary[city['city']] = {\n 'lat_lon': city['lat_lon'],\n 'connections': city['destination_cities']\n }\n return airport_dictionary", "def getAircraftCodeDict():\n\n dictionary = {}\n f = open(filename2,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def read_airports(airports_source: TextIO) -> AirportDict:\n #AirportDict = Dict[str, List[str]]\n \n airports_list = airports_source.readlines()\n d = {}\n iata_index = AIRPORT_DATA_INDEXES['IATA']\n \n i = 0\n while i < len(airports_list):\n num_comma = 0\n comma_index = 0 \n while num_comma < iata_index:\n comma_index = airports_list[i].find(',', comma_index)\n num_comma += 1\n comma_index += 1\n iata = airports_list[i][comma_index + 1: \\\n airports_list[i].find(',', comma_index) - 1]\n \n if iata != '\"\"' and iata != \"\\\\N\":\n d[iata] = [get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Airport ID']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Name']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['City']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Country']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['IATA']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['ICAO']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Latitude']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['Longitude']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['Altitude']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Timezone']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['DST']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Tz']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Type']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Source'])]\n \n i += 1\n \n return d", "def get_airports():\n iata_to_city = {}\n with open('./airports.txt') as f:\n for line in f.readlines():\n line = line.strip()\n\n if len(line) < 5:\n continue\n\n r = line.strip().split(',')[0]\n r = r.replace(' ', '')\n iata, city = r.split('-', 1)\n\n if iata_to_city.get(iata) is None:\n iata_to_city[iata] = city\n\n return iata_to_city", "def getAircraftCodeDict():\n table = 'aircraft'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n aircraft = airlineClasses.Aircraft()\n aircraft.aircraftCode = row[0]\n aircraft.name = row[1]\n d[aircraft.aircraftCode] = aircraft\n \n curs.close()\n connection.close()\n return d", "def getFlightDict():\n\n dictionary = {}\n f = open(filename3,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def extract_airports(filename, store):\n print filename\n f = open(filename, 'r')\n text = f.read()\n f.close()\n \n if store:\n ## Database connection, db, collection\n conn = pymongo.Connection()\n db=conn.flight_db\n ap = db.airports\n\n airport_list = []\n \n ## extract city,country,airport code\n #match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a></td>\\s+', text)\n match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a><span\\s*style=.*', text)\n if not match:\n print 'airport:rank not found...'\n exit(1)\n for tuples in match:\n if store:\n ap.insert({\n 'city':tuples[0],\n 'country':tuples[1],\n 'code':tuples[2]\n })\n airport_list.append(tuples[0] + ', ' + tuples[1] + ' - ' + tuples[2])\n if store:\n conn.disconnect()\n return airport_list", "def buildAircraft(plane):\n aircraftDict = {}\n with open('aircraft.csv', newline='', encoding=\"utf8\") as airplane_file: # opens the csv file\n reader = csv.reader(airplane_file) # reads the cotents to a variable\n next(reader, None) # returns none at the end of the file\n for airplane in reader: # iterates through the reader\n if airplane[0] == plane:\n if airplane[2] == \"imperial\":\n airRange = int(airplane[4]) * 1.609\n else:\n airRange = airplane[4]\n aircraftDict[airplane[0]] = Aircraft.Aircraft(airplane[0], airplane[3], airRange)\n if len(aircraftDict) == 0:\n return False\n else:\n return aircraftDict", "def getCityCodeDict():\n\n dictionary = {}\n f = open(filename1,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)", "def parse_file(file_path):\n stations = defaultdict(dict) # Spares me 2 lines inside that loop\n for line in open(file_path):\n if '->' in line:\n origin, destiny = map(str.strip, line.split('->'))\n elif '-' in line:\n stations[origin][destiny] = [int(i) for i in line.split('-')]\n return stations", "def file_to_dictionary():\n\n return;", "def create_pokedex(filepath):\n try:\n with open(filepath, 'r') as file:\n reader = csv.DictReader(file)\n pokedex = dict()\n for row in reader:\n pokedex[row[\"Name\"]] = create_entry(row[\"#\"], row[\"Name\"], row[\"Type 1\"], row[\"Type 2\"], row[\"HP\"],\n row[\"Attack\"], row[\"Defense\"], row[\"Sp. Atk\"], row[\"Sp. Def\"],\n row[\"Speed\"], row[\"Generation\"], row[\"Legendary\"])\n\n return pokedex\n\n except FileNotFoundError as e:\n return dict()", "def getIteneraryData(self,itList,allData):\n \n for key in itList[0]: # ------ Tuple's 0 position has airports and 1 position has aircraft\n self.__airports[key] = allData.get(key) # ----- Dictionary with required fields\n return self.__airports", "def getCityCodeDict():\n table = 'airport'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n airport = airlineClasses.Airport()\n airport.cityCode = row[0]\n airport.city = row[1] \n d[airport.cityCode] = airport\n \n curs.close()\n connection.close()\n return d", "def airport_file_to_df(self):\n\t\tdf = pd.read_csv(\n\t\t\tfilepath_or_buffer=os.path.join(ROOT_DIR, \"raw\", \"airports.csv\".format(self.year)),\n\t\t\tsep=\",\", encoding=\"utf-8\",\n\t\t\tusecols=[\"iata\", \"airport\", \"city\", \"state\", \"country\", \"lat\", \"long\"]\n\t\t)\n\n\t\treturn df", "def build_filedic(data_path, lanczos_path):\n filedic = {'CERA': sorted(glob.glob(data_path + 'CERA20C/*.nc')),\n 'lanczos(CERA)': sorted(glob.glob(lanczos_path + 'CERA_7*.nc')),\n 'lanczos(20CR)': sorted(glob.glob(lanczos_path + '20CRv3_5*.nc'))}\n return filedic", "def airport_info(airport_code):\n\n r = requests.get(\"{}AirportBoards\".format(FA_ENDPOINT), auth=(USERNAME,FA_KEY), params={\n \"airport_code\":airport_code,\n \"type\":\"departures\",\n \"howMany\": 100\n })\n\n return r", "def getFlightDict():\n table = 'flights'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n flight = airlineClasses.Flight()\n flight.id = row[0]\n flight.flightnum = row[1]\n flight.departCity = row[2]\n flight.arriveCity = row[3]\n flight.departTime = row[4]\n flight.departDay = row[5]\n flight.arriveTime = row[6]\n flight.arriveDay = row[7]\n flight.cost = row[8]\n flight.code = row[9]\n d[flight.id] = flight\n \n curs.close()\n connection.close()\n return d", "def wac_to_dict(file_path: str) -> dict:\n\n weather_dict = {'longitude': '',\n 'latitude': '',\n 'altitude': '',\n 'time': [],\n 'temperature': [],\n 'relative_humidity': [],\n 'horizontal_global_solar_radiation': [],\n 'diffuse_horizontal_solar_radiation': [],\n 'air_pressure': [],\n 'vertical_rain': [],\n 'wind_direction': [],\n 'wind_speed': [],\n 'cloud_index': [],\n 'atmospheric_counter_horizontal_long_wave_radiation': [],\n 'atmospheric_horizontal_long_wave_radiation': [],\n 'ground_temperature': [],\n 'ground_reflectance': []\n }\n\n file_obj = open(file_path, 'r')\n file_lines = file_obj.readlines()\n file_obj.close()\n\n weather_dict['longitude'] = float(file_lines[4].split('\\t')[0].strip())\n weather_dict['latitude'] = float(file_lines[5].split('\\t')[0].strip())\n weather_dict['altitude'] = float(file_lines[6].split('\\t')[0].strip())\n\n for line in file_lines[12:]:\n splitted_line = line.split('\\t')\n weather_dict['time'].append(datetime.datetime.strptime(splitted_line[0].strip(), '%Y-%m-%d %H:%M'))\n weather_dict['temperature'].append(float(splitted_line[1].strip()))\n weather_dict['relative_humidity'].append(float(splitted_line[2].strip()))\n weather_dict['horizontal_global_solar_radiation'].append(float(splitted_line[3].strip()))\n weather_dict['diffuse_horizontal_solar_radiation'].append(float(splitted_line[4].strip()))\n weather_dict['air_pressure'].append(float(splitted_line[5].strip()))\n weather_dict['vertical_rain'].append(float(splitted_line[6].strip()))\n weather_dict['wind_direction'].append(float(splitted_line[7].strip()))\n weather_dict['wind_speed'].append(float(splitted_line[8].strip()))\n weather_dict['cloud_index'].append(float(splitted_line[9].strip()))\n weather_dict['atmospheric_counter_horizontal_long_wave_radiation'].append(float(splitted_line[10].strip()))\n weather_dict['atmospheric_horizontal_long_wave_radiation'].append(float(splitted_line[11].strip()))\n weather_dict['ground_temperature'].append(float(splitted_line[12].strip()))\n weather_dict['ground_reflectance'].append(float(splitted_line[13].strip()))\n\n return weather_dict", "def get_archer(self):\n\n # Format URL\n url = f'http://tropic.ssec.wisc.edu/real-time/adt/archive{self.year}/{self.id[2:4]}{self.id[1]}-list.txt'\n\n # Read in data\n a = requests.get(url).content.decode(\"utf-8\")\n content = [[c.strip() for c in b.split()] for b in a.split('\\n')]\n # data = [[dt.strptime(line[0]+'/'+line[1][:4],'%Y%b%d/%H%M'),-1*float(line[-4]),float(line[-5])] for line in content[-100:-3]]\n archer = {}\n for name in ['time', 'lat', 'lon', 'mnCldTmp']:\n archer[name] = []\n for i, line in enumerate(content):\n try:\n ndx = ('MWinit' in line[-1])\n archer['time'].append(dt.strptime(\n line[0] + '/' + line[1][:4], '%Y%b%d/%H%M'))\n archer['lat'].append(float(line[-5 - ndx]))\n archer['lon'].append(-1 * float(line[-4 - ndx]))\n archer['mnCldTmp'].append(float(line[-9 - ndx]))\n except:\n continue\n self.archer = archer\n\n return archer", "def create_pokedex(filepath):\n try:\n file = open(filepath, 'r')\n except FileNotFoundError:\n return {}\n else:\n pokedex = {}\n for line in file:\n stats = line.rstrip().split(\",\")\n if stats[0].isdigit():\n stats.pop(4)\n if stats[11] == \"True\":\n legendary = True\n else:\n legendary = False\n pokedex[stats[1]] = create_entry(int(stats[0]), stats[1], stats[2], stats[3], int(stats[4]), int(stats[5]), int(stats[6]), int(stats[7]), int(stats[8]), int(stats[9]), int(stats[10]), legendary)\n file.close()\n return pokedex", "def data_petrol_stations():\n petrol_stations = {}\n with codecs.open('azs.txt', 'r', encoding='UTF-8') as file_in:\n for string in file_in.readlines():\n string = string.split()\n station_number = int(string[0])\n queue_length = int(string[1])\n petrol_stations[station_number] = {}\n petrol_stations[station_number]['queue'] = queue_length\n petrol_stations[station_number]['kinds'] = string[2:]\n\n return petrol_stations", "def airports(osm_path): \n return (retrieve(osm_path,'multipolygons',['aeroway'],**{'aeroway':[\"='aerodrome'\"]})).rename(columns={'aeroway': 'asset'})", "def files2dictionary(filename, countryID, supp_dict):\r\n\r\n fh = open(filename)\r\n header = next(fh)\r\n\r\n data_dict = {}\r\n data_dict[countryID] = {}\r\n\r\n numlist = range(1, 36)\r\n agelist = []\r\n for line in fh:\r\n linelist = line.strip().split(\",\")\r\n age = linelist[4]\r\n agelist.append(age)\r\n for icdrep in numlist:\r\n if str(age) not in data_dict[countryID]:\r\n data_dict[countryID][str(age)] = {}\r\n #if str(icdrep) not in data_dict[countryID][str(age)]:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n else:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n fh.close()\r\n supp_dict.update(support_counter(header.split(\",\"), agelist, supp_dict))\r\n return data_dict, supp_dict", "def read_ics(self, filename, lat_long_data):\n with open(os.path.join(self.zoneinfo_path, filename), \"r\") as zone:\n zoneinfo = zone.readlines()\n\n with open(os.path.join(self.zoneinfo_pure_path, filename), \"r\") as zone:\n zoneinfo_pure = zone.readlines()\n\n ics_data = []\n for i in range(0, len(zoneinfo)):\n line = zoneinfo[i]\n key = line[:line.find(\":\")]\n\n if key == \"BEGIN\":\n if line != \"BEGIN:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key == \"END\":\n if line != \"END:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key in (\"TZID\", \"TZOFFSETFROM\", \"TZOFFSETTO\", \"TZNAME\", \"DTSTART\"):\n ics_data.append(line)\n elif key == \"RRULE\":\n if line == zoneinfo_pure[i]:\n ics_data.append(line)\n else:\n sys.stderr.write(\"Using pure version of %s\\n\" % filename[:-4])\n ics_data.append(zoneinfo_pure[i])\n\n zone_data = {\n \"ics\": \"\".join(ics_data).rstrip()\n }\n zone_name = filename[:-4]\n if zone_name in lat_long_data:\n zone_data[\"latitude\"] = lat_long_data[zone_name][0]\n zone_data[\"longitude\"] = lat_long_data[zone_name][1]\n\n return zone_data", "def buildCountryDict(filename, currencies_dict):\n # This function requires the currency dictionary to be built already.\n countries = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n try:\n countries[line[0]] = Country(line[0], line[14], currencies_dict)\n except KeyError: # If currency isn't found, country won't be added to the dictionary\n continue\n return countries", "def return_restaurant_rating_dictionary(filename):\n\n the_file = open(filename)\n\n for line in the_file:\n line = line.rstrip()\n ratings = line.split(\":\")\n\n restaurant_name = ratings[0]\n rating = ratings[1]\n restaurant_ratings[restaurant_name] = rating\n\n return restaurant_ratings" ]
[ "0.70951945", "0.7048426", "0.68105704", "0.67958844", "0.67303306", "0.660992", "0.6241538", "0.6214554", "0.60528135", "0.5989924", "0.59378284", "0.5857952", "0.58015823", "0.55762887", "0.55647373", "0.554717", "0.5545705", "0.5532614", "0.551982", "0.5506063", "0.54969645", "0.54827505", "0.5481052", "0.54598033", "0.54567987", "0.54193264", "0.53734374", "0.53562224", "0.53535634", "0.5348295" ]
0.7226677
0
Return a list of routes from a file, in the format [name, [airport code list]]. Return None if file not found.
def getRouteInputFile(filename): if filename[-4:] != ".csv": # Make sure the filename is a .csv return None routes = [] try: with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: routes.append([line[0], line[1:]]) except (UnicodeDecodeError, IndexError): # skip blank lines and lines with invalid characters continue except (FileNotFoundError, OSError): return None return routes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_routes_file(route_filename):\n\n list_route_descriptions = []\n tree = ET.parse(route_filename)\n for route in tree.iter(\"route\"):\n route_town = route.attrib['map']\n route_id = route.attrib['id']\n waypoint_list = [] # the list of waypoints that can be found on this route\n for waypoint in route.iter('waypoint'):\n waypoint_list.append(waypoint) # Waypoints is basically a list of XML nodes\n\n list_route_descriptions.append({\n 'id': route_id,\n 'town_name': route_town,\n 'trajectory': waypoint_list\n })\n\n return list_route_descriptions", "def bus_routes():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3]) #Journey ID field\n if route not in route_list and route!=\"\": #error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def get_routers(filename):\n with open(filename, 'r') as json_file:\n add_list = json.loads(json_file.read())\n print(\"Addresses :\" + str(add_list))\n routers = [telnet_router.TN_ROUTER(router['device'], router['username'], \\\n router['password'], router['en_password']) for router in add_list]\n return routers", "def retrieveRouteData(filename=\"sampleroutes.txt\"):\n with open(filename, \"r\") as f:\n return f.read()", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def load_rooms(self, filename):\n with open(filename, \"r\") as f:\n roomss = []\n for line in f:\n line = line.strip()\n\n # Add id, name and description to each room object\n if line.isdigit():\n id = line\n line = f.readline()\n line = line.strip()\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n room = Room(id, name, description)\n roomss.append(room)\n\n # Add the connected routes to the room\n elif line.isupper():\n line = line.split()\n direction = line[0]\n room_number = line[1]\n\n # Add multiple routes to a direction if needed\n if not direction in roomss[-1].connection:\n roomss[-1].connection[direction] = [room_number]\n else:\n roomss[-1].connection[direction].append(room_number)\n return roomss", "def mbta_route_list():\n f = open('complete_routes.txt', 'r')\n complete_routes = ast.literal_eval(f.read())\n\n #creates list of all route_ids in MBTA system\n subway_route_list = []\n for x in range(len(complete_routes['mode'])):\n if complete_routes['mode'][x]['mode_name'] == 'Subway':\n for y in range(len(complete_routes['mode'][x]['route'])):\n subway_route_list.append(complete_routes['mode'][x]['route'][y]['route_id'])\n\n #removes duplicates from list and returns\n return list(OrderedDict.fromkeys(subway_route_list))", "def get_routes():\n\n return Db().get_line_ids()", "def get_passports(filename):\n with open(filename) as f:\n content = f.read().split(\"\\n\\n\")\n passports = [dict(token_regex.findall(line)) for line in content]\n return passports", "def get_route_with_scariness_from_file(route_file_path):\n route = read_gpx.read_gpx(route_file_path)\n route = read_gpx.pad_gpx_dataframe(route)\n route_bounds = read_gpx.get_route_bounds(route)\n if not csp.check_route_bounds_fit_location_data(route_bounds):\n abort(400)\n altitudes_df = csp.get_complete_route_altitude_df(route_bounds)\n route = csp.calculate_route_scariness(route, altitudes_df)\n administer_route_database.insert_route_into_db_table(\n administer_route_database.prepare_route_for_insertion(route, route_file_path),\n administer_route_database.get_route_db_connection(), 'waypoints'\n )\n return route", "def read_file(self, path, route_id):\n\n #uncoded_route_id = route_id.decode(\"utf-8\")\n route_id = str(route_id)\n path += \"/line_\" + route_id + \".txt\"\n with io.open(path, encoding=\"utf-8\") as f:\n lines = f.readlines()\n\n stop_of_graph_list = list()\n\n # So here we're examining the lines of the file\n for line in lines[1:]:\n line = line.strip()\n\n if line != '':\n stop_of_graph_list.append(StopOfGraph.StopOfGraph(line))\n\n # We mustn't forget to give our bus line a name\n self.line_id = lines[0]\n return stop_of_graph_list", "def get_passports(filepath: str = PASSPORT_PATH) -> List[Dict[str, str]]:\n with open(filepath) as file:\n return [\n {field.split(\":\")[0]: field.split(\":\")[1] for field in passport.split()}\n for passport in file.read().split(\"\\n\\n\")\n ]", "def _process_file(self, file: str) -> List[Dict]:\n with open(self.base_path + file, encoding='latin-1') as hosts_file:\n records = []\n for line in hosts_file.readlines():\n line_contents = line.strip().split()\n if line_contents and line_contents[0][0] != \"#\":\n new_record = {\n RecordAttribute.CATEGORY: Category.GENERAL_MALWARE\n }\n if self.is_url_ip_address(line_contents[0]):\n new_record[RecordAttribute.IP] = line_contents[0]\n else:\n new_record[RecordAttribute.URL] = line_contents[0]\n records.append(new_record)\n return records", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def get_resolution(path_name, file_name):\n with open(path_name + 'resolution.txt') as file, \\\n mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as s:\n file.seek(0, s.find(file_name.encode()))\n line = file.readline().split(' ')\n return [line[1], line[2].rstrip('\\n')]", "def one_time_route_cost_check(routes_file, numbers_file):\n routes = open(routes_file, \"r\")\n read_file = routes.read().split()\n read_file = list(map(lambda x: x.split(\",\"), read_file))\n\n numbers = open(numbers_file, \"r\")\n numbers_read_file = numbers.read()\n\n return read_file", "def read_waypoints():\n\tfilename = \"waypoints.txt\"\n\tfile = open(filename, \"r\")\n\twp_list = []\n\n\tfor line in file:\n\t\t# Get the individual elements, splitting by whitespace\n\t\tdata_list = line.split()\n\t\tcoordinate = {'x': data_list[0], 'y': data_list[1], 'z': data_list[2]}\n\t\twaypoint = {'radius': data_list[3], 'point': coordinate}\n\n\t\twp_list.append (waypoint)\n\n\treturn wp_list", "def get_terraform_sources_from_file(file: str) -> list:\n filecontent = []\n with open(file, 'r') as fh:\n filecontent = fh.readlines()\n\n sources_in_file = []\n for line in filecontent:\n\n if TF_SOURCE_SEARCH_PATTERN in line \\\n and \"source\" in line \\\n and \"#\" not in line:\n\n sources_in_file.append(line.rstrip())\n\n return sources_in_file", "def parse_file(file_path):\n stations = defaultdict(dict) # Spares me 2 lines inside that loop\n for line in open(file_path):\n if '->' in line:\n origin, destiny = map(str.strip, line.split('->'))\n elif '-' in line:\n stations[origin][destiny] = [int(i) for i in line.split('-')]\n return stations", "def parse(self, fn, board):\n with open(fn) as f:\n return [(board.get_region(i['name']), i['base']) for i in json.loads(f.read())]", "def read_fixtures(filename):\n fixtures = []\n f = open(filename, 'r')\n routes = {}\n for line in f:\n parsed = parse_fixture_line(line)\n if parsed is None:\n continue\n strand = parsed[\"strand\"]\n address = parsed[\"address\"]\n panel_number = parsed[\"panel_number\"]\n x, y = parsed[\"grid_loc\"]\n pos1 = map_grid_loc_to_pixel((panel_number, x, y),\n panel_dimensions = langton_panel_dimensions)\n pixels = 1\n route_indices = parsed[\"routes\"]\n data = {\"strand\": strand,\n \"address\": address,\n \"pixels\": pixels,\n \"pos1\": pos1,\n \"pos2\": pos1,\n \"grid_loc\": (x, y)}\n fixtures.append(Fixture(data))\n for route_index in route_indices:\n if route_index in routes:\n routes[route_index].fixtures.append(data)\n else:\n routes[route_index] = Route({\"active\": False,\n \"color\": (1, 0, 0),\n \"fixtures\": [data],\n \"index\": route_index})\n f.close()\n return routes, fixtures", "def read_file(filename):\n nodes = {}\n try:\n with open(filename, 'r') as f:\n for line in f:\n params = {}\n device_list = line.split()\n router = device_list[0]\n ip = device_list[1]\n platform = device_list[2]\n params[\"ipv4_address\"] = ip\n params[\"platform\"] = platform\n nodes[router] = params\n except IOError:\n print \"File %s does not exist!\" % filename\n return nodes", "def __read_file(file_path):\n assert os.path.exists(file_path), 'FILE \"{}\" NOT FOUND,' \\\n ' PLEASE GIVE THE CORRECT FILE PATH.'.format(file_path)\n url_list = []\n if file_path == '':\n return url_list\n else:\n my_file = open(file_path, 'r')\n for line in my_file.readlines():\n url_list.append(''.join(line.split('\\n')))\n return url_list", "def get_routes():\n # get from cache if it exists\n routes = cache.get(\"routes\")\n if routes:\n return routes\n\n trips_url = \"https://data.edmonton.ca/api/views/ctwr-tvrd/rows.json?accessType=DOWNLOAD\"\n bus_heading_url = \"https://data.edmonton.ca/resource/atvz-ppyb.json\"\n\n trips_response = requests.get(trips_url)\n bus_heading_response = requests.get(bus_heading_url)\n\n if trips_response.status_code == 200 and bus_heading_response.status_code == 200:\n trips = trips_response.json()\n headings = bus_heading_response.json()\n\n bus_to_headings = {}\n trip_to_bus = {}\n\n for heading in headings:\n if \"route_long_name\" in heading:\n bus_to_headings[heading[\"route_id\"]] = heading[\"route_long_name\"]\n\n for item in trips[\"data\"]:\n trip_id = item[-4]\n bus_number = item[-6]\n if bus_number in bus_to_headings:\n bus_heading = bus_to_headings[bus_number]\n trip_to_bus[trip_id] = [bus_number, bus_heading]\n \n # store the routes in the cache for five minutes\n cache.set(\"routes\", trip_to_bus, timeout=5*60) \n return trip_to_bus", "def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList", "def parseFile(file,rules = None):\n if not rules: rules = RuleCollection()\n buf = \"\"\n for line in open(file,'r'):\n if not line[0]=='#':\n buf += line\n try:\n for (ptree,lo,hi) in ruleNT.scanString(buf):\n rules.add(Parser._convertRule(ptree))\n return rules\n except KeyError:\n print 'error near ',lo,'in',file\n return rules", "def list_route(aircraft_id):\n\n utils._validate_id(aircraft_id)\n\n route = _route_call(aircraft_id)\n return _process_listroute_response(route)", "def parse_route_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'routes' in f:\n URL_FILES.append(f)\n PY_FILES.remove(f)", "def load_rules(path):\n # type: (str) -> List[Tuple[AnyStr, AnyStr, AnyStr]]\n with open(path) as file:\n return parse_rules(file.read(), path)", "def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst" ]
[ "0.6682189", "0.61099017", "0.60007584", "0.59836805", "0.59614146", "0.585794", "0.58239037", "0.57957166", "0.5757466", "0.5705301", "0.5684137", "0.5619704", "0.5542456", "0.55303", "0.5512766", "0.5511307", "0.5509003", "0.54854715", "0.5457731", "0.54531974", "0.54459774", "0.538464", "0.53757715", "0.53604007", "0.5348553", "0.5336013", "0.53331393", "0.5323174", "0.5299257", "0.52870226" ]
0.6995897
0
Create a csv input file, given a list of routes. Routes are lists of names and airport codes.
def writeRoutesCSV(filename, routes): if filename[-4:] != ".csv": # Make sure the filename is a .csv filename += ".csv" try: with open(os.path.join("input", filename), "w", newline='') as f: writer = csv.writer(f, delimiter=",") writer.writerows(routes) except (OSError, FileNotFoundError): return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def write_to_csv(all_roads, geo, out_fn):\n\n output_header = [\"road_id\", \"color\", \"origin_lon\",\n \"origin_lat\", \"dest_lon\", \"dest_lat\"]\n\n segments_written = 0\n with open(out_fn, 'w') as fout:\n csvwriter = csv.writer(fout)\n csvwriter.writerow(output_header)\n\n for color in ['green', 'yellow', 'red']:\n roads = all_roads[color]\n for road_id in roads:\n # road is a list of coordinates, {x:___, y:___}.\n # we want to encode each pair of coordinates as its\n # own row in the CSV.\n road = geo[road_id]\n for origin, dest in zip(road, road[1:]):\n origin_lon = origin['x']\n origin_lat = origin['y']\n dest_lon = dest['x']\n dest_lat = dest['y']\n\n row = [road_id, color, origin_lon, origin_lat,\n dest_lon, dest_lat]\n csvwriter.writerow(row)\n\n segments_written += 1\n if segments_written % 100 == 0:\n print(f\"Added {segments_written} segments so far.\")\n\n print(f\"Added all {color} roads.\")", "def write_csv(csv_list, out_csv_path):\n with open(out_csv_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for row in csv_list:\n csv_writer.writerow(row)", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def csv_file_creator(path, list_of_jobs):\n with open(path, \"wb\") as out_file:\n writer = UnicodeWriter(out_file, delimiter=',')\n for row in list_of_jobs:\n writer.writerow(row)", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def write_to_csv(list_of_rows, file_name):\n with open(file_name, 'w') as f:\n writer = csv.writer(f)\n for row in list_of_rows:\n if None in row:\n continue\n writer.writerow(row)\n \n f.close()", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def import_route_csv(cf, filename):\n try:\n with open(filename, 'r') as routedata:\n print(\"Reading import file...\")\n routes_to_add = []\n routereader = csv.DictReader(routedata)\n for route in routereader:\n other_fields = {}\n orig = route[\"ORIGIN\"]\n dest = route[\"DEST\"]\n other_fields[\"airline\"] = route[\"OP_UNIQUE_CARRIER\"]\n other_fields[\"no\"] = route[\"OP_CARRIER_FL_NUM\"]\n other_fields[\"duration\"] = route[\"CRS_ELAPSED_TIME\"]\n other_fields[\"distance\"] = route[\"DISTANCE\"]\n\n try:\n other_fields[\"price\"] = route[\"PRICE\"]\n\n except KeyError:\n pass\n routes_to_add.append([orig, dest, other_fields])\n\n cf.add_many_flights(routes_to_add)\n\n print(\"Successfully imported {} flights\"\n .format(len(routes_to_add)))\n routedata.close()\n\n except FileNotFoundError:\n print(\"ERROR: File Not found.\")\n\n except KeyError as e:\n print(\"ERROR: Field not found in CSV: {}\".format(e))", "def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)", "def convert2csv(contacts, output_path):\n\n print(\"[!] not implemented yet\")", "def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def write_routes(output_dir: str, routes: List[Dict[str, Any]]):\n\n routes_file = ROUTES_FILE.format(output_dir=output_dir)\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n with open(routes_file, 'w') as f:\n logging.info(f'Wrote {len(routes)} routes to {routes_file}.')\n json.dump(routes, f, indent=4)", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def write_csv(file_names: list, csv_file_path: str):\n with open(csv_file_path, mode='w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(enumerate(file_names))", "def csv_writer(filepath, seqs):\n with open(filepath, 'w') as f:\n f.write('\\n'.join([','.join(\n ['\"{}\"'.format(r) \n if (' ' in r) or (',' in r) else r\n for r in s])\n for s in seqs]))", "def write_csv(header_row, data_rows, filename, course_id):\n shared.ensure_directory_exists(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course)\n\n\n path = shared.get_safe_file_path(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course,\n filename)\n ## need to encode the unico path in order to open the file in prod env\n path = path.encode('utf-8')\n\n with open(path, \"wb\") as ofile:\n writer = csv.writer(ofile, quoting=csv.QUOTE_ALL)\n writer.writerow(header_row)\n for datarow in data_rows:\n encoded_row = [cleanup_newlines(unicode(s).encode('utf-8'))\n for s in datarow]\n writer.writerow(encoded_row)", "def write_csv(path, lines, headers):\n print \"Opening %s for score output\" % base_name(path)\n\n try:\n f = open(path, 'wb')\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(lines)\n except IOError:\n print \"Cannot open %s\" % path\n else:\n print \"Scores successfully written to %s\" % path\n f.close()", "def outputapidata_csv(filename, data, headers=None):\n with open(filename,'w',encoding='utf-8',newline = \"\", ) as f:\n if headers:\n writer = csv.DictWriter(f,fieldnames = headers)\n writer.writeheader()\n else:\n writer = csv.DictWriter(f)\n writer.writerows(out)", "def dump_csv(f, rra, out):\n w = writer(out)\n for row in dump(f, rra):\n w.writerow([s.strip() for s in row])", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def write_to_csv(agents, filename):\n log.info(\"Writing CSV file '%s'...\" % filename)\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=place_detail_keys)\n writer.writeheader()\n writer.writerows(agents)", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def prepare_out_csv(output_dir, filename):\n out_columns_pi = ['fasta_file', 'acc.code',\n 'organism', 'EC.code', 'species',\n 'note', 'pi', 'modification', 'category']\n string = ''\n for i in out_columns_pi:\n if i == out_columns_pi[-1]:\n string += i\n else:\n string += i+','\n string += '\\n'\n with open(output_dir+filename, 'w') as f:\n f.write(string)" ]
[ "0.6899799", "0.66672605", "0.6458159", "0.63670135", "0.634914", "0.63457423", "0.63208055", "0.6306807", "0.6239996", "0.62074065", "0.6170774", "0.60570943", "0.6028385", "0.6027313", "0.60171574", "0.60073507", "0.59914494", "0.59842396", "0.5965804", "0.59501034", "0.59156203", "0.59145236", "0.58978146", "0.58921224", "0.58794606", "0.5855713", "0.583203", "0.5831053", "0.5830377", "0.58196855" ]
0.7744267
0
Write output .csv file for list of itineraries. Output file shows cheapest route and its cost.
def writeItineraryOutput(filename, itins): if filename[-4:] != ".csv": # Make sure the filename is a .csv filename += ".csv" try: with open(os.path.join("output", filename), "w", newline='') as f: writer = csv.writer(f, delimiter=",") firstline = ["Name", "Cost", "Home", "Dest 1", "Dest 2", "Dest 3", "Dest 4", "Dest 5", "Dest 6"] writer.writerow(firstline) for itinerary in itins: line = [] line.append(itinerary.name) line.append(itinerary.cheapest_cost) line = line + itinerary.cheapest_route.getCodeList() writer.writerow(line) except (FileNotFoundError, OSError): return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def write_to_csv(all_roads, geo, out_fn):\n\n output_header = [\"road_id\", \"color\", \"origin_lon\",\n \"origin_lat\", \"dest_lon\", \"dest_lat\"]\n\n segments_written = 0\n with open(out_fn, 'w') as fout:\n csvwriter = csv.writer(fout)\n csvwriter.writerow(output_header)\n\n for color in ['green', 'yellow', 'red']:\n roads = all_roads[color]\n for road_id in roads:\n # road is a list of coordinates, {x:___, y:___}.\n # we want to encode each pair of coordinates as its\n # own row in the CSV.\n road = geo[road_id]\n for origin, dest in zip(road, road[1:]):\n origin_lon = origin['x']\n origin_lat = origin['y']\n dest_lon = dest['x']\n dest_lat = dest['y']\n\n row = [road_id, color, origin_lon, origin_lat,\n dest_lon, dest_lat]\n csvwriter.writerow(row)\n\n segments_written += 1\n if segments_written % 100 == 0:\n print(f\"Added {segments_written} segments so far.\")\n\n print(f\"Added all {color} roads.\")", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")", "def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])", "def route_information(th_object, topology_info, file_name, node1, node2, path):\n save_path = path + node1 + \"_\" + node2 + \"_vs_t2.csv\"\n route_data = th_object.get_node_len_etx(topology_info, node1, node2)\n with open(save_path, \"w+\") as f_name:\n f_name.write(\"Time,No_hopes,Cost\\n\")\n cc = 0\n for k in file_name:\n f_name.write(str(k)[11:-7] + \",\" + str(route_data[cc]['hopes_count']) + \",\" + str(route_data[cc]['cost']) +\n \"\\n\")\n cc += 1\n print(node1 + \" \" + node2 + \" route information exported\")", "def write_csv(csv_list, out_csv_path):\n with open(out_csv_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for row in csv_list:\n csv_writer.writerow(row)", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def writeRoutesCSV(filename, routes):\n if filename[-4:] != \".csv\": # Make sure the filename is a .csv\n filename += \".csv\"\n try:\n with open(os.path.join(\"input\", filename), \"w\", newline='') as f:\n writer = csv.writer(f, delimiter=\",\")\n writer.writerows(routes)\n except (OSError, FileNotFoundError):\n return False\n else:\n return True", "def write_results(filename):\n \n with open(filename, 'w', newline='') as fn:\n writer = csv.writer(fn)\n writer.writerows([('id', 'number_of_places_available'), ''])\n pair: Point = yield # where x is the user id and y is the number of places available\n while pair is not None:\n writer.writerow(pair)\n pair = yield", "def generate_report(self, output_path):\n with open(output_path, 'w', newline='', encoding=\"utf-8\") as csv_fd:\n writer = csv.writer(csv_fd, quoting=csv.QUOTE_NONNUMERIC, doublequote=False, escapechar=\"\\\\\")\n writer.writerow([\"category\", \"level\", \"description\", \"method\", \"parameter\", \"url\", \"body\"])\n writer.writerows(self._vulns)\n writer.writerows(self._anomalies)\n writer.writerows(self._additionals)", "def write_rating_data():\n with open('outward.csv', 'w') as f:\n out = csv.writer(f)\n\n for item in Rating.query.filter(Rating.score != None).all():\n out.writerow([item.user_id, item.book_id, item.score])", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def export_fallout(): \n with open('fallout.csv', 'w', newline='') as csvfile:\n wr = csv.writer(csvfile, delimiter=',')\n wr.writerows(environment)", "def print_customers(self):\n output = ''\n for i in range(len(self.customers)):\n output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\\n'\n #print(output)\n with open('oneday.csv','a') as outfile:\n for i in range(len(self.customers)):\n outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\\n')", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)", "def WriteOutput(self, rows, fileName, access='wb'):\n \n outputFile = open(fileName, access)\n try: \n outputFile.write(self.GetBanner())\n csv.writer(outputFile, dialect='excel-tab').writerows(rows)\n print 'Wrote secondary output to: %s' %(fileName) \n except IOError:\n print 'Error writing output to: %s' %(fileName) \n finally:\n outputFile.close()", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def export_csv(state, out_file=None):\n\n if out_file is None:\n csvfile = sys.stdout\n else:\n csvfile = open(out_file, 'w')\n\n try:\n writer = csv.writer(csvfile)\n for grade in state.grades:\n writer.writerow([grade.student_name(), grade.score(),\n grade.breakdown(state.user_name)])\n finally:\n if out_file is not None:\n csvfile.close()", "def CSVWriter (iterable, outLoc, header=\"\", ):\n if not iterable:\n print (\"nothing to write\")\n return 0\n\n out = open(outLoc, 'w')\n\n if header:\n out.write(header+'\\n')\n\n #Only works if iterable is a nested list\n for member in iterable:\n for item in member:\n out.write(str(item)+',')\n out.write('\\n')\n\n print(\"write to \"+outLoc+\" successful.\")\n return 1" ]
[ "0.654631", "0.64892834", "0.6398828", "0.6333328", "0.6306452", "0.62436354", "0.6238039", "0.6226846", "0.6123271", "0.6066527", "0.6055366", "0.6047722", "0.6046962", "0.6041532", "0.6025302", "0.60197496", "0.6000928", "0.5996336", "0.59933907", "0.5990987", "0.5989872", "0.5989562", "0.59814686", "0.5952156", "0.59456223", "0.5935541", "0.5928818", "0.5921566", "0.59086233", "0.59044" ]
0.7681153
0
Create an input file with randomly generated routes for num_people.
def generateRandomInput(filename, num_people, travel_db): import random routes = [] for i in range(num_people): route = travel_db.randomRoute() route.insert(0,"Person " + str(i)) # Add a name for each route. routes.append(route) if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file print("File {0} created successfully with {1} people.".format(filename, num_people)) else: print("File {0} could not be created.".format(filename))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_input_file(self, filename, numlines, maxvalue):\n with open(filename, 'a') as f:\n for _ in range(numlines):\n f.write(str(randrange(maxvalue)) + '\\n')\n self.filepath = f.name", "def routes_gen(num) -> Generator[Route, None, None]:\n with open(f'data/route-costs-{num}.txt', 'rb') as routes:\n for route in routes:\n prefix, cost = route[:-1].split(b',')\n yield (prefix, float(cost))", "def generate_seed_file(kb_mapping, seed_file):\n r_file = open(kb_mapping, 'r')\n s_file = open(seed_file, 'w+')\n\n for line in r_file:\n values = line.strip().split(\"\\t\")\n relations = values[1].split(\" \")\n subsumptions = values[2].split(\" \")\n for subsumption in subsumptions:\n if subsumption == \"concept:relatedto\":\n continue\n for relation in relations:\n s_file.write(\"%s\\t%s\\t1.0\\n\" %(relation, subsumption))\n\n r_file.close()\n s_file.close()", "def gen_int(filename):\n random.seed()\n random.randint(-100,100)\n with open(filename, \"w\") as f:\n for i in range(1000):\n f.write(str(random.randint(-100,100)))\n f.write(\" \")\n # f.write(\"hello\")", "def write_numbers(file_path):\n count = random.randint(20, 40)\n try:\n with open(file_path, 'w') as f:\n for _ in range(count):\n f.write(' '.join([str(x) for x in random.sample(range(10, 90), random.randint(4, 12))]))\n f.write('\\n')\n except Exception as err:\n print('Unexpected error:', err)", "def generate_nums(filename, n):\n text = ''\n for i in range(n):\n num = random.randrange(0, 100)\n text += (str(num) + '\\n')\n f = open(filename, 'w')\n f.write(text)\n f.close()\n return", "def build_input_file(self, replica):\n\n file_name = self.inp_basename + \"_\" + \\\n str(replica.id) + \"_\" + \\\n str(replica.cycle) + \".md\"\n\n fo = open(file_name, \"wb\")\n for i in range(1,500):\n fo.write(str(random.randint(i, 500) + i*2.5) + \" \");\n if i % 10 == 0:\n fo.write(str(\"\\n\"));\n fo.close()", "def generate_hosts_file(n, path=\"./tests/fixtures\"):\n if not os.path.isdir(path):\n os.mkdir(path)\n with open(f\"{path}/hosts.txt\", \"w\") as f:\n for i in range(n):\n f.write(f\"{i},localhost,127.0.0.1,{5000+i}\\n\")", "def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename", "def get_random(filename, out_file, number_of_random_seqs):\n records = Records(Extractor.extract_records(filename))\n random_seqs = records.get_random_seqs(number_of_random_seqs)\n RecordsWriter(random_seqs).write_to(out_file)", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def build_routes_file(routes, name):\n top = dict()\n top[\"file-type\"] = \"routes\"\n top[\"name\"] = name\n top[\"routes\"] = routes\n return top", "def generate_random_input(n, p, fileName):\n\n\tmax_x = 1000\n\tL = []\n\tH = []\n\tE = []\n\tx = [] #non negative x-coordinate of vertices\n\tfor i in range(n):\n\t\tL.append('location' + str(i))\n\t\trand = round(random.random() * max_x) + 1\n\t\twhile rand in x:\n\t\t\trand = round(random.random() * max_x) + 1\n\t\tx.append(rand)\n\tfor i in range(n):\n\t\tif random.random() < p and len(H) < n / 2: #vertex is a home with probability p\n\t\t\tH.append(i)\n\tfor i in range(n):\n\t\tE.append([])\n\t\tfor j in range(0, i):\n\t\t\tE[i].append(abs(x[i] - x[j])) #E[i][j] = absolute value of difference in x-coordinates of vertex i and vertex j as weight to ensure triangular inequality\n\t\tE[i].append('x') #no self-edges\n\tfor i in range(n):\n\t\tfor j in range(i+1, n):\n\t\t\tE[i].append(E[j][i])\n\tstarting_index = int((random.random() * (len(L) - 1)) // 1)\n\ts = L[starting_index]\n\tprint_input(L, E, H, s, fileName)", "def generate():", "def generate_index(file_name):\n count = num_lines(file_name)\n index = random.randint(0, count - 1)\n return index", "def create_room(self):\r\n room_number = randrange(1, AMOUNT) # random number\r\n if isFile(FILENAME): # file Exist\r\n with open(FILENAME, 'rt') as f: # for opening `FILENAME`\r\n for line in f.readlines(): # assign line to lines of the file\r\n _, _, id_number, _, roomnum = line.split(\r\n ' ') # get information from line of file\r\n roomnum = roomnum[-1] # bray pak kardan \"\\n\"\r\n # check if room number of this line from file equal to ranrange(1, ROOM_AMOUNT)\r\n if roomnum == room_number:\r\n self.create_number() # go back to start method `Recursion`\r\n return room_number", "def generate_file(name, size):\n print('=> Generating %s file' % name)\n with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:\n fout.write(os.urandom(size))", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)", "def generate(seq_sz, num, offset, filename):\n\n # generator of data\n label = '>test_rand_' \n\n gen_dna = lambda rng: (choice(symbols) \\\n for _ in range(seq_sz + randint(-(rng / 2), rng / 2)))\n data = ((gen_dna(offset)) for _ in range(num))\n\n # write generated data to a file\n with open(filename, 'w') as f:\n for i, t in enumerate(data):\n f.write(label + str(i) + '\\n')\n f.write(''.join(t) + '\\n')", "def generate_file(self, filename, amount):\n with open(filename, \"w\", encoding=\"utf-8\") as fi:\n count = 0\n space = \"\" if self.token is Tokenization.byte or self.token is Tokenization.character else \" \"\n for generate_token in self.generate():\n count += 1\n outputStr = str(generate_token)\n outputStr += space\n fi.write(outputStr)\n if count >= amount:\n break", "def generate_N_doping(path, N_graphitic, N_pyridinic, N_pyrrolic, filename1):\n global bond_list\n bond_list = bond_list_1 + bond_list_3\n atom_list = read_in_graphene(path)\n rings = find_rings(atom_list)\n bond_list = bond_list_1 + bond_list_3\n map_3, map_2, map_2n = filter_carbon_atoms(atom_list, rings)\n graphitic = N_graphitic \n pyridinic = N_pyridinic\n pyrrolic = N_pyrrolic\n attempt = len(atom_list) / 10\n choices = [1, 2, 3]\n while (((N_graphitic > 0) or (N_pyridinic > 0) or (N_pyrrolic > 0)) and (attempt > 0)):\n print(\"Left to add: \", \"N_graphitic \", N_graphitic, \"N_pyridinic \", N_pyridinic, \"N_pyrrolic \", N_pyrrolic)\n if (N_graphitic == 0):\n try:\n choices.remove(1)\n except:\n pass\n if (N_pyridinic == 0):\n try:\n choices.remove(2)\n except:\n pass\n if (N_pyrrolic == 0):\n try:\n choices.remove(3)\n except:\n pass\n choice = random.choice(choices)\n if (choice == 1):\n while ((N_graphitic > 0) and (len(map_3) > 0)):\n random_atom = random.choice(map_3)\n N_graphitic -= 1\n N = Atom(random_atom.atom_number, \"N3\", \"N3A\", str(graphitic - N_graphitic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 3) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") and ((identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CY\"))):\n for ring in rings:\n if (random_atom in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n try:\n atom_list.remove(random_atom)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n elif (choice == 2):\n while ((N_pyridinic > 0) and (len(map_2) > 0)): \n random_atom = random.choice(map_2)\n N_pyridinic -= 1\n N = Atom(random_atom.atom_number, \"N2\", \"N2A\", str(pyridinic - N_pyridinic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 2) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") ):\n found = False\n for ring in rings:\n if (random_atom in ring):\n found = True\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n if (found == False):\n try:\n map_3.remove(random_atom)\n except:\n pass\n try:\n map_2.remove(random_atom)\n except:\n pass\n try:\n map_2n.remove(random_atom)\n except:\n pass\n atom_list.remove(random_atom)\n atom_list.append(N)\n else:\n attempt -= 1\n else: \n attempt -= 1\n elif (choice == 3):\n while ((N_pyrrolic > 0) and (len(map_2n) > 0)):\n random_atom_1 = random.choice(map_2n)\n for neighbour in identify_bonds(random_atom_1, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n random_atom_2 = neighbour[0]\n break\n for ring in rings:\n if (random_atom_1 in ring):\n center_6 = {}\n center_6['x'] = 0\n center_6['y'] = 0\n center_6['z'] = 0\n center_4 = {}\n center_4['x'] = 0\n center_4['y'] = 0\n center_4['z'] = 0\n for atom in ring:\n center_6['x'] += atom.x\n center_6['y'] += atom.y\n center_6['z'] += atom.z\n if ((atom != random_atom_1) and (atom != random_atom_2)):\n center_4['x'] += atom.x\n center_4['y'] += atom.y\n center_4['z'] += atom.z\n center_6['x'] /= 6\n center_6['y'] /= 6\n center_6['z'] /= 6\n center_4['x'] /= 4\n center_4['y'] /= 4\n center_4['z'] /= 4\n N_pyrrolic -= 1\n p = 0.6\n limit = 0.3\n if ((-limit < center_4['x'] - center_6['x'] < limit) and (-limit < center_4['y'] - center_6['y'] < limit)): \n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n for ring in rings:\n if (random_atom_1 in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n for mol in identify_bonds(atom, atom_list):\n try:\n map_2n.remove(mol[0])\n except:\n pass\n try:\n atom_list.remove(random_atom_1)\n atom_list.remove(random_atom_2)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n attempt -= 1\n writepdb(atom_list, filename1)\n print(\"done.\")\n return 'done.'", "def load_routes():\n\n print (\"routes\")\n\n Route.query.delete()\n\n with open(\"seed_data/routes_seed.psv\") as routes:\n for row in routes:\n route, route_acronym = row.strip().split(\"|\")\n\n # Checks if seed is empty, if so, inserts a Null cell into the db\n acronym = None if route_acronym == 'None' else route_acronym\n\n route = Route(route=route,\n route_acronym=acronym)\n\n\n db.session.add(route)\n\n db.session.commit()", "def route_creation():\r\n city_ids = json.loads(open(\"cities.json\").read())\r\n cities = []\r\n for id in city_ids:\r\n cities.append(fetch_weather(id))\r\n return Route(cities)", "def generate_random_training(file_name, nb_points):\n file_name = _format_file_extension(file_name)\n acoustic_data = _generate_random_acoustic(nb_points)\n acoustic_data = np.concatenate((acoustic_data, np.ones((nb_points, 1))), axis=1)\n data = pd.DataFrame(acoustic_data, columns=fmd.COLUMN_NAME)\n data.to_csv(file_name, index=False)", "def create_input_file(fpath):\n with open(fpath, 'w') as f:\n f.write(os.path.basename(fpath).split(\"_\")[1])\n f.write(\"\\n\")", "def make_random_forward(pages):\n\n txt = \"\"\"\\\n <script type=\"text/javascript\">\n\n var urls = new Array(PAGES);\n\n function redirect()\n {\n window.location = urls[Math.floor(urls.length*Math.random())];\n }\n\n redirect()\n </script>\n \"\"\".replace(' ','')\n\n pages = ('\"./../'+page+ '.html\"' for page in pages)\n pages = (utils.to_unicode(page) for page in pages)\n \n rand_file = utils.join(export_path,'_random/index.html')\n mkdir(rand_file,isfile=True,isfull=True)\n \n with open(rand_file,'wb') as F:\n F.write(txt.replace('PAGES',','.join(pages)).encode('utf8'))", "def generate_submission_sample(map_path, sample_path):\n with open(sample_path, 'wb') as output:\n output.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n output.write('<osm>\\n ')\n\n # Write every 10th top level element\n for i, element in enumerate(get_element(map_path)):\n if i % 10 == 0:\n output.write(ET.tostring(element, encoding='utf-8'))\n\n output.write('</osm>')", "def generate_models(input_file):\n if not os.path.exists(input_file):\n console.print(\n f\":pile_of_poo: [bold red]No file found at the given path:[/bold red] [i yellow]{input_file}[/i yellow]\"\n )\n exit(1)\n\n # TODO: Add try/catch for other possible errors\n collection = postman.load_postman_collection_from_file(input_file)\n folders = postman.map_response_bodies_to_folders(collection)\n written_path = postman.write_collection_models_to_files(folders)\n console.print(\":smiley: SUCCESS!\", style=\"bold green\")\n console.print(\"Models written to:\", list(set(written_path)))", "def generate(random, pid, autogen_tools, n):\n\n generator_path = autogen_tools.get_directory(__file__)\n\n template_path = path.join(generator_path, \"code.txt.template\")\n rendered_template_path = path.join(generator_path, \"code.txt\")\n\n autogen_tools.replace_source_tokens(\n template_path,\n {\"flag\": gen_code(n, \"Aviation House\")},\n rendered_template_path\n )\n\n code_link = autogen_tools.generate_resource_link(pid, \"code.txt\", title=\"Encrypted file\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"code.txt\"),\n ],\n },\n \"static_files\": {\n },\n \"problem_updates\": {\n \"description\": \"<p>We've updated the system to AES. We heard that this is military grade encryption so that should fix everything</p><p>The team have stored the password in %s. Bet you can't get into it</p>\" % code_link\n }\n }" ]
[ "0.587769", "0.5856231", "0.56174666", "0.55805063", "0.55153143", "0.5446747", "0.54272777", "0.54071355", "0.5406519", "0.5362641", "0.5354671", "0.53122556", "0.5305677", "0.5279175", "0.5232032", "0.5200919", "0.51978534", "0.518729", "0.51755005", "0.5140269", "0.5115986", "0.5107255", "0.5096858", "0.5074159", "0.5068046", "0.50665885", "0.506224", "0.5047501", "0.50464946", "0.5036642" ]
0.84894335
0
Tests API call to fetch multiple NS descriptor resources
def test_get_ns_descriptors(get_ns_descriptors_keys): sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) response = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"], limit=1000)) response = json.loads(response["data"]) assert isinstance(response, list) if len(response) > 0: assert set(get_ns_descriptors_keys).issubset( response[0].keys()), "All keys should be in the response"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_many_descriptors(self, uuids):", "def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)", "def test_get_ns_descriptors_nsdinfoid():\r\n sonata_nsd = SONATAClient.Nsd(HOST_URL)\r\n sonata_auth = SONATAClient.Auth(HOST_URL)\r\n _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))\r\n _token = json.loads(_token[\"data\"])\r\n _nsd_list = json.loads(sonata_nsd.get_ns_descriptors(\r\n token=_token[\"token\"][\"access_token\"]))\r\n _nsd_list = json.loads(_nsd_list[\"data\"])\r\n Helpers._upload_test_nsd(_token=_token[\"token\"][\"access_token\"])\r\n\r\n for _n in _nsd_list:\r\n if \"sonata-demo\" == _n['nsd']['name']:\r\n _nsd = _n['uuid']\r\n\r\n response = json.loads(sonata_nsd.get_ns_descriptors_nsdinfoid(\r\n token=_token[\"token\"][\"access_token\"], nsdinfoid=_nsd))\r\n\r\n Helpers._delete_test_nsd(_token=_token[\"token\"][\"access_token\"])\r\n if response[\"error\"]:\r\n return True\r\n else:\r\n return False", "def test_get_api_resources(self):\n pass", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def test_discover(self):\n client = Client()\n response = client.get('/discover/authors')\n print 'status code for authors', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n\n response = client.get('/simple_search') \n print 'status code for simple search', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n\n response = client.get('/extended_search') \n print 'status code for extended search', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n \n response = client.get('/discover/languages') \n print 'status code for languages', response.status_code\n self.failUnlessEqual(response.status_code, 200) \n \n response = client.get('/discover/subjects') \n print 'status code for tags', response.status_code\n self.failUnlessEqual(response.status_code, 200)", "async def test_1() -> None:\n LOG.debug(\"Test info endpoint\")\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://localhost:5050/\") as resp:\n data = await resp.json()\n if \"datasets\" in data and len(data[\"datasets\"]) > 0:\n for data_ids in data[\"datasets\"]:\n # In info endpoint we get all dataset ids be them PUBLIC, REGISTERED or CONTROLLED\n assert data_ids[\"id\"] in DATASET_IDS_LIST, \"Dataset ID Error or not in list.\"\n else:\n sys.exit(\"Info Endpoint Error!\")", "def test_permission_resource_detail(self):\n for i in API_MODELS_LOOKUP:\n url = reverse('api:{}_api_resource'.format(i))\n self.client.login(username='normaluser', password='pass')\n response = self.client.get(url)\n res_list = response.json()\n if res_list: # Object(s) exist.\n obj_id = res_list[0]['id']\n url = reverse('api:{}_api_resource'.format(i), kwargs={'pk': obj_id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.client.logout()\n self.client.login(username='readonlyuser', password='pass')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.client.logout()\n response = self.client.get(url) # Anonymous user\n self.assertEqual(response.status_code, 200)\n # The API response is a bit different for these models.\n # TODO: test filtering and pagination.\n for i in API_MODELS:\n url = reverse('api:{}_api_resource'.format(i))\n self.client.login(username='normaluser', password='pass')\n response = self.client.get(url)\n res_list = response.json()\n if res_list: # Object(s) exist.\n obj_id = res_list['objects'][0]['id']\n url = reverse('api:{}_api_resource'.format(i), kwargs={'pk': obj_id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.client.logout()\n self.client.login(username='readonlyuser', password='pass')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.client.logout()\n response = self.client.get(url) # Anonymous user\n self.assertEqual(response.status_code, 200)", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def test_get_cloud_resources(self):\n pass", "def test_context_data_info_message_for_multiple_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum 2')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '2 results found.')", "def test_get_resource(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{ResourceTypeName.get()}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self._test_paging('/v1/resources', admin_headers, 10, 'resources')", "async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))", "def test_fetch_all():\n response = requests.get('http://localhost:5000/api/persons')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data[0]", "def test_intent_classifier_get_details_all(self):\n pass", "def test_ListResources(self, zipped=False):\n from openflow.dummyom.models import DummyOM\n slice_urn, cred = self.create_ch_slice()\n options = dict(geni_compressed=zipped, geni_available=True)\n rspec = wrap_xmlrpc_call(\n self.am_client.ListResources,\n [cred, options], {}, settings.TIMEOUT)\n \n logger.debug(\"Got Advertisement RSpec: \\n%s\" % rspec)\n \n if zipped:\n import zlib, base64\n rspec = zlib.decompress(base64.b64decode(rspec))\n \n # Create switches and links\n self.switches, self.links = parse_rspec(rspec)\n \n # check the number of switches and links\n num_links = sum([len(d.get_switches()) for d in DummyOM.objects.all()])\n self.assertEqual(len(self.switches),\n num_links)\n self.assertEqual(len(self.links),\n settings.NUM_LINKS_PER_AGG * settings.NUM_DUMMY_OMS)", "def test_get_component_descriptors_by_types_using_get(self):\n pass", "async def test_retrieve_many(self):\n expected = [{\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }]\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles') \\\n .mock(return_value=Response(200, json=expected))\n profiles = await provisioning_client.get_provisioning_profiles(5, 'active')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles?version=5&status=active'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profiles == expected", "def Run(self, args):\n identifiers = args.CONCEPTS.api.Parse().AsDict()\n\n result = apigee.APIsClient.Describe(identifiers)\n\n # Must use vars(args) to check whether there's even a revision field in the\n # parsed args namespace. It's only present for ALPHA track.\n requested_revision = None\n if \"revision\" in vars(args):\n requested_revision = args.revision\n\n # If the user didn't ask for revision data, the response from\n # APIsClient.Describe() is good enough.\n if requested_revision is None and not args.verbose:\n return result\n\n rev_nums = result[\"revision\"]\n if requested_revision is not None:\n if requested_revision not in rev_nums:\n message = \"No revision %r among API %s's revisions: %s\"%(\n requested_revision, identifiers[\"apisId\"], rev_nums)\n raise exceptions.InvalidArgumentException(\"--revision\", message)\n # No need to check whether this revision exists within the original list;\n # if there's no such revision, RevisionsClient will raise an appropriate\n # error.\n rev_nums = [requested_revision]\n\n revisions = []\n for revision in rev_nums:\n identifiers[\"revisionsId\"] = revision\n revision_result = apigee.RevisionsClient.Describe(identifiers)\n del revision_result[\"name\"]\n revisions.append(revision_result)\n del result[\"revision\"]\n result[\"revisions\"] = revisions\n\n return result", "def test_get_descriptor1(self):\n # create some Pooled Normals\n poolednormal_filegroup_instance = FileGroup.objects.get(name=\"Pooled Normal\")\n fastq_filetype_instance = FileType.objects.get(name=\"fastq\")\n poolednormal_R1_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R1.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R1.fastq\",\n )\n poolednormal_R1_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R1_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n },\n )\n poolednormal_R2_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R2.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R2.fastq\",\n )\n poolednormal_R2_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R2_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n },\n )\n pooled_normals = FileMetadata.objects.all()\n\n descriptor = get_descriptor(bait_set=\"IMPACT468_BAITS\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, \"IMPACT468\")\n\n descriptor = get_descriptor(bait_set=\"IMPACT468\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, \"IMPACT468\")", "def test_access_all_data_all_endpoints(self):\n\n # Some end points just can't be fetched so we have to ignore them.\n end_point_exceptions = [\n \"/api/help/\",\n \"/api/test_host/\",\n \"/api/system_status/\",\n \"/api/updates_available/\",\n \"/api/session/\",\n \"/api/action/\",\n \"/api/run_stratagem/\",\n \"/api/stratagem_configuration/\",\n ]\n\n end_points = self.get_json_by_uri(\"/api/\", args={\"limit\": 0})\n\n for end_point in end_points.values():\n if end_point[\"list_endpoint\"] not in end_point_exceptions:\n import sys\n\n sys.stderr.write(\"\\nReading endpoint %s\\n\" % end_point[\"list_endpoint\"])\n self.get_json_by_uri(end_point[\"list_endpoint\"], args={\"limit\": 0})\n sys.stderr.write(\"\\nRead endpoint %s\\n\" % end_point[\"list_endpoint\"])", "def test_get_list(self):\n pass", "def test_get_descriptor2(self):\n # create some Pooled Normals\n poolednormal_filegroup_instance = FileGroup.objects.get(name=\"Pooled Normal\")\n fastq_filetype_instance = FileType.objects.get(name=\"fastq\")\n poolednormal_R1_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R1.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R1.fastq\",\n )\n poolednormal_R1_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R1_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"foo_IMPACT468_bar\",\n },\n )\n poolednormal_R2_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R2.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R2.fastq\",\n )\n poolednormal_R2_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R2_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"foo_IMPACT468_bar\",\n },\n )\n pooled_normals = FileMetadata.objects.all()\n\n descriptor = get_descriptor(bait_set=\"IMPACT468\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, None)\n\n descriptor = get_descriptor(bait_set=\"IMPACT468_bar\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, None)\n\n descriptor = get_descriptor(bait_set=\"foo_IMPACT468\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, None)\n\n descriptor = get_descriptor(bait_set=\"foo_IMPACT468_bar\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, \"foo_IMPACT468_bar\")", "def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource(self):\n pass", "def test_getDigitalObjects(self):\n cases = [\n (self.test_eac + 'NE00001.xml', 0),\n (self.test_eac + 'NE00100.xml', 1),\n (self.test_eac + 'NE01101.xml', 15),\n (self.test_eac + 'NE01400.xml', 1),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n result = doc.getDigitalObjects()\n self.assertNotEqual(result, None)\n self.assertEqual(len(result), expected)", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_get(client, example_records, h, prefix):\n id_ = example_records[0].id\n\n res = client.get(f'{prefix}{id_}', headers=h)\n assert res.status_code == 200\n assert res.json['id'] == id_\n # Test links\n assert res.json['links'] == {\n 'self': 'https://127.0.0.1:5000/api/vocabularies/licenses/cc-by'\n }", "def test_fetchSpecific(self):\n d = self.client.fetchSpecific('7')\n self.assertEqual(\n self.transport.value(), b'0001 FETCH 7 BODY[]\\r\\n')\n self.client.lineReceived(b'* 7 FETCH (BODY[] \"Some body\")')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.assertEqual(\n self.successResultOf(d), {7: [['BODY', [], \"Some body\"]]})", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_defects_responder(self):\n pass", "def test_context_data_info_message_for_multiple_result(self):\n factories.SourceDatasetFactory.create_batch(2, i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '2 results found.')" ]
[ "0.6480714", "0.64030355", "0.63878626", "0.633152", "0.61416095", "0.6027618", "0.59268606", "0.5893378", "0.5883436", "0.5867936", "0.5860991", "0.58218473", "0.58013874", "0.5790113", "0.5780573", "0.5772255", "0.57513046", "0.5746608", "0.5731543", "0.5729499", "0.5722015", "0.5696157", "0.5681511", "0.5677031", "0.565596", "0.56494033", "0.56428707", "0.5629865", "0.5628914", "0.5623051" ]
0.65850234
0
Tests API call to read information about an NS descriptor resources
def test_get_ns_descriptors_nsdinfoid(): sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) _nsd_list = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"])) _nsd_list = json.loads(_nsd_list["data"]) Helpers._upload_test_nsd(_token=_token["token"]["access_token"]) for _n in _nsd_list: if "sonata-demo" == _n['nsd']['name']: _nsd = _n['uuid'] response = json.loads(sonata_nsd.get_ns_descriptors_nsdinfoid( token=_token["token"]["access_token"], nsdinfoid=_nsd)) Helpers._delete_test_nsd(_token=_token["token"]["access_token"]) if response["error"]: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_ns_descriptors(get_ns_descriptors_keys):\r\n sonata_nsd = SONATAClient.Nsd(HOST_URL)\r\n sonata_auth = SONATAClient.Auth(HOST_URL)\r\n _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))\r\n _token = json.loads(_token[\"data\"])\r\n\r\n response = json.loads(sonata_nsd.get_ns_descriptors(\r\n token=_token[\"token\"][\"access_token\"], limit=1000))\r\n response = json.loads(response[\"data\"])\r\n\r\n assert isinstance(response, list)\r\n if len(response) > 0:\r\n assert set(get_ns_descriptors_keys).issubset(\r\n response[0].keys()), \"All keys should be in the response\"", "def test_get_info(self):\n pass", "def get_discovery_summary():\n pass", "def test_inheritedDescriptors(self):\n sddaemon = self.getDaemon(7, 3)\n self.assertEqual([7, 8, 9], sddaemon.inheritedDescriptors())", "async def test_get_ac_descr(test_db):\n resp = await test_db.get_ac_descr(\"NC_000007.13\")\n assert resp is not None\n\n resp = await test_db.get_ac_descr(\"NC_000007.14\")\n assert resp is None", "def test_get_api_resources(self):\n pass", "def describe():", "def get_ns_descriptors(self, token, _filter=None, host=None, port=None): \n if host is None:\n base_path = self._base_path.format(self._host, self._port)\n else:\n base_path = self._base_path.format(host, port)\n\n query_path = ''\n if _filter:\n query_path = '?_admin.type=' + _filter\n\n _endpoint = \"{0}/nsd/v1/ns_descriptors_content{1}\".format(base_path, query_path)\n result = {'error': True, 'data': ''}\n headers = {\"Content-Type\": \"application/yaml\", \"accept\": \"application/json\",\n 'Authorization': 'Bearer {}'.format(token)}\n\n try:\n r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)\n except Exception as e:\n result['data'] = str(e)\n return result\n\n if r.status_code == requests.codes.ok:\n result['error'] = False\n\n result['data'] = r.text\n return json.dumps(result)", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def test_describe(self):\n target = \"foo\"\n channel = \"#bar\"\n action = \"waves\"\n self.protocol.describe(target, action)\n self.protocol.describe(channel, action)\n expected = [\n \"PRIVMSG {} :\\01ACTION {}\\01\".format(target, action),\n \"PRIVMSG {} :\\01ACTION {}\\01\".format(channel, action),\n \"\",\n ]\n self.assertEqualBufferValue(self.transport.value().split(b\"\\r\\n\"), expected)", "def GetResourcesSample():\n client = CreateClient()\n # Get a feed and print it\n feed = client.GetResources()\n PrintFeed(feed)", "def test_iosxr_netconf_get(nornir):\n nr = nornir.filter(name=DEVICE_NAME)\n filter = \"\"\"\n <interfaces xmlns=\"http://openconfig.net/yang/interfaces\">\n <interface>\n <name>MgmtEth0/0/CPU0/0</name>\n </interface>\n </interfaces>\n \"\"\"\n result = nr.run(netconf_get, filter_type=\"subtree\", path=filter, xmldict=True)\n assert result[DEVICE_NAME].result\n assert result[DEVICE_NAME].result[\"rpc\"].data_xml\n assert result[DEVICE_NAME].result[\"xml_dict\"][\"data\"][\"interfaces\"][\"interface\"][0][\"state\"][\"enabled\"]", "def _describe(self) -> Dict[str, Any]:", "def test_get_descriptor1(self):\n # create some Pooled Normals\n poolednormal_filegroup_instance = FileGroup.objects.get(name=\"Pooled Normal\")\n fastq_filetype_instance = FileType.objects.get(name=\"fastq\")\n poolednormal_R1_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R1.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R1.fastq\",\n )\n poolednormal_R1_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R1_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n },\n )\n poolednormal_R2_file_instance = File.objects.create(\n file_type=fastq_filetype_instance,\n file_group=poolednormal_filegroup_instance,\n file_name=\"FROZENPOOLEDNORMAL.R2.fastq\",\n path=\"/FROZENPOOLEDNORMAL.R2.fastq\",\n )\n poolednormal_R2_filemetadata_instance = FileMetadata.objects.create(\n file=poolednormal_R2_file_instance,\n metadata={\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n },\n )\n pooled_normals = FileMetadata.objects.all()\n\n descriptor = get_descriptor(bait_set=\"IMPACT468_BAITS\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, \"IMPACT468\")\n\n descriptor = get_descriptor(bait_set=\"IMPACT468\", pooled_normals=pooled_normals)\n self.assertEqual(descriptor, \"IMPACT468\")", "def test_get_cloud_resources(self):\n pass", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_020_describe_by_valid_pid(self):\n # Verify that the checksums retrieved by getChecksum match what listObjects\n # reported.\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node[\"baseurl\"])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n # Build dict with lower case keys.\n headers_lower = dict(\n (header.lower(), value) for header, value in headers\n )\n # Check for the required headers.\n assert \"date\" in headers_lower\n assert \"content-type\" in headers_lower\n assert \"content-length\" in headers_lower\n # Verify that the object length reported by describe matches what was\n # reported by listObjects.\n assert int(headers_lower[\"content-length\"]) == object_info.size\n # Verify that date is a valid date.\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower[\"date\"])\n # Verify that date matches what was reported by listObjects.\n # TODO: Fails with: TypeError: can't compare offset-naive and\n # offset-aware datetimes\n # date = d1_common.date_time.from_iso8601(headers_lower['date'])\n # self.assertEqual(date, object_info.dateSysMetadataModified)", "def main():\n for dev in Discover.discover().values():\n print(dev)", "def test_getDigitalObjects(self):\n cases = [\n (self.test_eac + 'NE00001.xml', 0),\n (self.test_eac + 'NE00100.xml', 1),\n (self.test_eac + 'NE01101.xml', 15),\n (self.test_eac + 'NE01400.xml', 1),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n result = doc.getDigitalObjects()\n self.assertNotEqual(result, None)\n self.assertEqual(len(result), expected)", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def get_descriptor(self, uuid):", "def test_load_response_descriptor_projects_release_release_resource(self):\n pass", "def test_discover(self):\n client = Client()\n response = client.get('/discover/authors')\n print 'status code for authors', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n\n response = client.get('/simple_search') \n print 'status code for simple search', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n\n response = client.get('/extended_search') \n print 'status code for extended search', response.status_code\n self.failUnlessEqual(response.status_code, 200)\n \n response = client.get('/discover/languages') \n print 'status code for languages', response.status_code\n self.failUnlessEqual(response.status_code, 200) \n \n response = client.get('/discover/subjects') \n print 'status code for tags', response.status_code\n self.failUnlessEqual(response.status_code, 200)", "def test_api_can_get_metadata(self):\n response = self.client.get('/metadata/', format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "async def test_device_info(hass):\n api_discovery = deepcopy(API_DISCOVERY_RESPONSE)\n api_discovery[\"data\"][\"apiList\"].append(API_DISCOVERY_BASIC_DEVICE_INFO)\n\n with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n\n assert device.api.vapix.firmware_version == \"9.80.1\"\n assert device.api.vapix.product_number == \"M1065-LW\"\n assert device.api.vapix.product_type == \"Network Camera\"\n assert device.api.vapix.serial_number == \"00408C123456\"", "def test_get_component_descriptors_by_types_using_get(self):\n pass", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_get_pci_device_list(self):\n pass", "def get_many_descriptors(self, uuids):", "def test_get_devices(self):\n pass" ]
[ "0.64663666", "0.608117", "0.5999047", "0.5969652", "0.59466934", "0.59190524", "0.5892444", "0.5845748", "0.5732233", "0.5668746", "0.5648303", "0.5639067", "0.56217575", "0.56203", "0.56144845", "0.5610858", "0.5607481", "0.560151", "0.5600352", "0.55998194", "0.5585602", "0.5585311", "0.5577875", "0.55508226", "0.55503225", "0.554462", "0.55308837", "0.55233955", "0.5520724", "0.5515232" ]
0.70114577
0
Tests API call to delete NS descriptor resources
def test_delete_ns_descriptors_nsdinfoid(delete_ns_descriptors_nsdinfoid_keys): sonata_vnfpkgm = SONATAClient.VnfPkgm(HOST_URL) sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) _nsd_list = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"])) _nsd_list = json.loads(_nsd_list["data"]) _nsd = None for _n in _nsd_list: if "sonata-demo" == _n['nsd']['name']: _nsd = _n['uuid'] time.sleep(10) # Wait for NSD onboarding response = json.loads(sonata_nsd.delete_ns_descriptors_nsdinfoid( token=_token["token"]["access_token"], nsdinfoid=_nsd)) assert isinstance(response, dict) assert response["data"] == "{\"error\":\"The NSD ID None does not exist\"}" time.sleep(2) #Wait for NSD onboarding _vnfd_list = json.loads(sonata_vnfpkgm.get_vnf_packages( token=_token["token"]["access_token"])) _vnfd_list = json.loads(_vnfd_list["data"]) _vnfd = None for _v in _vnfd_list: if "vnfd_example" == _v['uuid']: _vnfd = _v['uuid'] response = None if _vnfd: response = json.loads(sonata_vnfpkgm.delete_vnf_packages_vnfpkgid( token=_token["token"]["access_token"], vnfPkgId=_vnfd)) assert isinstance(response, dict) assert response["data"] == ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_delete_api_resource(self, mock_delete: Mock, mock_set_token: Mock) -> None:\n exonet_client = ExonetClient(\"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\")\n exonet_client.delete_api_resource(\n ApiResource({\"type\": \"dns_records\", \"id\": \"qjJWA0Km8xgw\"})\n )\n\n # Check mock calls.\n assert mock_delete.call_count == 1\n assert mock_set_token.call_count == 1\n\n # Check call args.\n assert mock_set_token.call_args[0][0] == \"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\"", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def delete():", "def test_delete_nonexistent_resource_rpc(self, mcg_obj):\n response = mcg_obj.send_rpc_query(\n \"pool_api\", \"delete_namespace_resource\", {\"name\": \"notexisting_resource\"}\n )\n assert \"error\" in response.json()", "def test_delete_non_existing(created_test_helper, request):\n # delete all files from listed files\n response = created_test_helper.delete_single(-1)\n\n # Validate returned json contains right error\n created_test_helper.validate_response_json(request.node.name, response)", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_do_delete(delete_resource: MagicMock, response: execution.ResponseInfo):\n delete_resource.return_value = response\n bundle = MagicMock()\n bundle.resources.matching.return_value = [MagicMock(), MagicMock()]\n action = interface.CommandAction(MagicMock(), [], bundle)\n interface.do_delete(action)\n assert delete_resource.call_count == 2", "def test_delete_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/this_is_bilel\")\n c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])\n c.setopt(c.CUSTOMREQUEST, 'DELETE')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"", "def test_delete(self):\n pass", "def test_delete_resource_group(self):\n pass", "def test_delete_hyperflex_capability_info(self):\n pass", "def test_delete(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.DELETE, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.delete(rest_url)", "def test_delete_provisioning_request(self):\n response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_collection_net_namespace(self):\n pass", "def test_delete_namespaced_route(self):\n pass", "def test_delete_hyperflex_server_model(self):\n pass", "def test_delete_file_output(self):\n response = self.client.open(\n '/v1/control/file/{id}'.format(id='id_example'),\n method='DELETE',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_delete_net_namespace(self):\n pass", "def _delete(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n config = misc_utils.resolve_config(\n kwargs.pop('config', None),\n kwargs.pop('config_file', None),\n required=False\n )\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name),\n method='DELETE',\n config=config\n )", "def delete(self):\n # type: () -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n return self.connection.api_call(\"DELETE\", [\"v1\", \"resources\", self.id], headers=headers)", "def test_delete_collection_namespaced_route(self):\n pass", "def test_delete_device_by_id(self):\n pass", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "def test_delete_resource_used_in_ns_bucket_rpc(\n self, mcg_obj, cld_mgr, ns_resource_factory, bucket_factory\n ):\n # Create the namespace resources and verify health\n _, resource1 = ns_resource_factory()\n _, resource2 = ns_resource_factory()\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=resource1,\n read_ns_resources=[resource1, resource2],\n )\n response = mcg_obj.send_rpc_query(\n \"pool_api\", \"delete_namespace_resource\", {\"name\": resource2}\n )\n assert \"error\" in response.json()" ]
[ "0.70575756", "0.68376297", "0.67838246", "0.66237175", "0.64669347", "0.64376366", "0.64376366", "0.6399817", "0.6381159", "0.63791645", "0.63716006", "0.6369237", "0.6348905", "0.6325135", "0.6323571", "0.62920225", "0.62577426", "0.62550163", "0.6228073", "0.62071025", "0.61685973", "0.6162418", "0.6158214", "0.6137726", "0.61341506", "0.61318886", "0.61253", "0.61205816", "0.61158967", "0.61151266" ]
0.70335805
1