query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
return word embedding from path file | def gen_embedding(path):
word_emb = {}
with open(path, encoding='utf-8') as f:
for line in tqdm(f):
values = line.split()
word_emb[values[0]] = np.asarray(values[1:], dtype='float32')
return word_emb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300",
"def _load_word_embedding(self, lang):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if self.args.task == 'conneau' or self.args.task == 'xling':\n data_dir = os.path.join(self.args.data_dir, 'MUSE')\n lang_path = os.path.join(data_dir, 'wiki.' + lang + '.vec')\n elif self.args.task == 'dinu':\n data_dir = os.path.join(self.args.data_dir, 'dinu')\n lang_path = os.path.join(data_dir, 'embeddings', lang + '.emb.txt')\n elif self.args.task == 'zhang':\n order = [lang,trg]\n if lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(self.args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n lang_path = os.path.join(data_dir, 'word2vec.' + lang)\n\n langfile = open(lang_path, encoding=self.args.encoding, errors='surrogateescape')\n words, xs = embeddings.read(langfile, self.args.maxs)\n langfile.close()\n # Build word to index map\n word2ind = {word: i for i, word in enumerate(words)}\n\n return xs, words, word2ind",
"def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings",
"def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx",
"def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb",
"def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings",
"def load_embedding(path=PROJECT_DIR / \"outputs/models/embedding.pkl\"):\n try:\n with open(path, \"rb\") as inp:\n embedding = pickle.load(inp)\n return embedding\n\n except FileNotFoundError:\n logger.error(f\"There is no embedding to load at {path}\")",
"def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words",
"def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words",
"def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings",
"def load_embeddings(path):\r\n\r\n embeds = dict() # dictionary mapping words to vectors\r\n for line in open(path, encoding='utf-8'):\r\n row = line.strip().split('\\t')\r\n embeds[row[0]] = np.array(row[1:], dtype=np.float32)\r\n\r\n embeddings_dim = embeds[list(embeds)[0]].shape[0]\r\n\r\n return embeds, embeddings_dim",
"def load(cls, filepath) -> 'Word2VecEmbedding':\n with open(filepath, 'rb') as f:\n embedding = pickle.load(f)\n embedding.word2idx = {spell: idx for idx, spell in enumerate(embedding.vocab.idx2word)}\n return embedding",
"def load_words(filename):\n\n url = codeskulptor.file2url(WORDFILE)\n netfile = urllib2.urlopen(url)\n data = netfile.read()\n return data",
"def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim",
"def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model",
"def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index",
"def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words",
"def load_embedding(embedding_file_path, word_index, embedding_dim):\n # Create a Numpy Placeholder for Embedding\n max_features = len(word_index)+1\n embedding_weights = np.random.random([max_features, embedding_dim])\n count = 0\n glove_file = open(embedding_file_path)\n for line in glove_file:\n word, vector = line.split(' ')[0], line.split(' ')[1:]\n if word in word_index and word_index[word] <= max_features:\n count += 1\n vector = list(map(float, vector))\n embedding_weights[word_index[word]] = [float(i) for i in vector]\n\n print('Fraction found in glove {}'.format(count/len(embedding_weights)))\n return embedding_weights",
"def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs",
"def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)",
"def load_word_embed(path: str,\n dimension: int,\n *,\n skip_first: bool = False,\n freeze: bool = False,\n sep: str = ' '\n ) -> Tuple[nn.Embedding, Dict[str, int]]:\n vocab = {'$$$UNK$$$': 0}\n embed_matrix = [[0.0] * dimension]\n with open(path) as r:\n if skip_first:\n r.readline()\n for line in r:\n segments = line.rstrip('\\n').rstrip(' ').split(sep)\n word = segments[0]\n vocab[word] = len(vocab)\n embed = [float(x) for x in segments[1:]]\n embed_matrix.append(embed)\n print('Loaded %d word embeddings' % (len(embed_matrix) - 1))\n \n embed_matrix = torch.FloatTensor(embed_matrix)\n \n word_embed = nn.Embedding.from_pretrained(embed_matrix,\n freeze=freeze,\n padding_idx=0)\n return word_embed, vocab",
"def load_embed(file_name, vocab_size):\n\n with tf.io.gfile.Open(file_name, 'r') as embed_file:\n vocab = []\n embeds = []\n depth = -1\n for index, line in enumerate(embed_file):\n if vocab_size > 0 and index >= vocab_size:\n break\n line = line.strip()\n tokens = line.strip().split(' ')\n word = tokens[0]\n vocab.append(word)\n if depth == -1:\n embed = [float(token) for token in tokens[1:]]\n else:\n embed = [float(token) for token in tokens[-depth:]]\n d = len(embed)\n if depth == -1:\n depth = d\n if d != depth:\n raise ValueError('Inconsistent embedding sizes')\n embeds.append(embed)\n\n embeds = np.stack(embeds)\n\n return vocab, embeds, depth",
"def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix",
"def load_embed_text(embed_file):\n \n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embeddings should be same size\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size",
"def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index",
"def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()",
"def get_vocab(self, filename):\n return read_file(filename) #TODO(tilo): the-FAQ!",
"def getWordEmbeddingsMatrix(script_directory, embedding_file):\n translator = str.maketrans('', '', string.punctuation)\n all_words = []\n print(\"Loading vocab from text files in:\")\n for d in os.listdir(script_directory):\n print(d)\n for fname in os.listdir(\"%s/%s\" % (script_directory, d)):\n with open(\"%s/%s/%s\" % (script_directory, d, fname), 'r') as f:\n words = [w.translate(translator) for w in f.read().split() if w.translate(translator) != \"\"]\n all_words.extend(words)\n\n model = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n vocab = {\"PAD\" : 0, \"EOS\" : 1}\n vocab.update({w : i + 2 for i,w in enumerate([w1 for w1 in set(all_words) if w1 in model]) })\n inv_dict = vocab.keys()\n ## Take a minute to load...\n\n vocab_size = len(inv_dict)\n emb_size = 300 # or whatever the size of your embeddings\n embeddings = np.zeros((vocab_size + 1, emb_size))\n for k,v in vocab.items():\n embeddings[v] = model[k]\n vocab[\"UNK\"] = len(vocab.keys())\n embeddings[vocab[\"UNK\"]] = np.ones(emb_size)\n del model\n ## Now we have a numpy matrix of embeddings...\n # x_model = tf.placeholder(tf.int32, shape=[None, input_size])\n # with tf.device(\"/cpu:0\"):\n # embedded_x = tf.nn.embedding_lookup(embeddings, x_model)\n return embeddings, vocab",
"def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict",
"def load_embeddings(filename):\n count = 0\n matrix = []\n word_map = {}\n with open(filename, encoding=\"utf8\") as f:\n # with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n word = items[0]\n rest = items[1:]\n # print(\"word:\", word)\n word_map[word] = count\n count += 1\n\n rest = list(map(float, rest))\n matrix.append(rest)\n matrix = np.array(matrix)\n return word_map, matrix"
] | [
"0.714321",
"0.69293404",
"0.69026184",
"0.683942",
"0.67908764",
"0.6763844",
"0.6745005",
"0.6740114",
"0.6731184",
"0.6720369",
"0.6682428",
"0.66791385",
"0.66790795",
"0.6666699",
"0.66630864",
"0.6646522",
"0.66387117",
"0.66284627",
"0.6619337",
"0.66174227",
"0.6582144",
"0.6572052",
"0.6552051",
"0.6540873",
"0.65155494",
"0.6512254",
"0.6492109",
"0.648828",
"0.64755166",
"0.64676166"
] | 0.73965585 | 0 |
Returns the right value, b, from cons(a,b) | def cdr(pair):
def right_val(a,b):
return b
return pair(right_val) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cons(a, b):\r\n def pair(f):\r\n return f(a, b)\r\n return pair",
"def cons(first, second):\n pair = Pair(first, second)\n if _can_be_list(pair):\n pair = _pair2list(pair)\n return pair",
"def peek(self):\r\n print(self.a, self.b)\r\n if self.a:\r\n return self.a[0]\r\n return self.b[0]",
"def circlfunc(a,b):\n c = []\n for i in range(0,len(a)):\n c.append(a[b[i]-1])\n\n return c",
"def car(pair):\r\n def left_val(a,b):\r\n return a\r\n return pair(left_val)",
"def cdr(pair):\n\n def second(a, b):\n return b\n return pair(second)",
"def mini(a,b):\n\tif a < b: \n\t\treturn a\n\treturn b",
"def pull_values(A, right, left):\n A_left = A[left]\n A_right = A[right]\n\n return A_right, A_left",
"def second(pair):\n\treturn pair[1]",
"def peek(self):\n self._a_to_b()\n return self.b[-1]",
"def func(a,b):\n a=b\n return a",
"def extra_elem(a,b):\n \"*** YOUR CODE HERE ***\"\n return list( set(a).symmetric_difference(set(b)) )[0] # don't need to turn b into a set\n # the function will accept a list as an arg.",
"def get_value(self, a, b):\r\n if a != strings.EMPTY and b != strings.EMPTY:\r\n x = self.__get_position(a)\r\n y = self.__get_position(b)\r\n\r\n return int(self.__matrix[x][y])\r\n elif a == strings.EMPTY and b == strings.EMPTY:\r\n return 0\r\n return strings.NEGATIVE_INFINITY",
"def second(a, b):",
"def substraction(a, b):\n pass",
"def cons(l, x):\n return l + [x]",
"def mid(arr):\n\n \n arr_ln = len(arr)\n mid = arr_ln//2\n val = arr.pop(mid)\n\n return arr, val",
"def mini(a, b):\n return min(a, b)",
"def get(self, a: int, b: int) -> int:\n result = self.identity()\n q = [(1, 0, self.n2)]\n while q:\n k, left, right = q.pop()\n if a <= left and right <= b:\n result = self.binary(result, self.tree[k])\n continue\n m = (left + right) // 2\n k <<= 1\n if a < m and left < b:\n q.append((k, left, m))\n if a < right and left < m:\n q.append((k + 1, m, right))\n return result",
"def second(xs):\n if not xs:\n return None\n return xs[1]",
"def second(xs):\n if not xs:\n return None\n return xs[1]",
"def match_length(cls, a, b):\n if len(a) == len(b):\n return (a, b)\n elif len(a) < len(b):\n return (cls(a, len(b)), b)\n else:\n return (a, cls(b, len(a)))",
"def pair(first, second):\n return [first, second]",
"def _get_primary(operation, a=None, b=None, treat_a_as_pointer=True, treat_b_as_pointer=True):\n\n bytecode_dictionary = {\n \"sp++\": [\"@SP\", \"M=M+1\"],\n \"sp--\": [\"@SP\", \"M=M-1\"],\n }\n\n if operation == \"*a=*b\":\n load_b_into_d = [f\"@{b}\", \"D=M\"]\n\n if treat_b_as_pointer:\n load_b_into_d.insert(1, \"A=M\")\n\n load_d_into_a = [f\"@{a}\", \"M=D\"]\n\n if treat_a_as_pointer:\n load_d_into_a.insert(1, \"A=M\")\n\n load_b_into_a = load_b_into_d + load_d_into_a\n\n return load_b_into_a\n\n else:\n return bytecode_dictionary[operation]",
"def getval(exp, from_list):\n if from_list.null():\n return SExp(\"NIL\")\n if from_list.car().atom():\n raise error.LispException(\"a-list or d-list in wrong format\")\n if exp == from_list.car().car():\n return from_list.car().cdr()\n return getval(exp, from_list.cdr())",
"def first(pair):\n\treturn pair[0]",
"def car(pair):\n\n def first(a, b):\n return a\n return pair(first)",
"def make_abba(a, b):\n return a + b * 2 + a",
"def functorOtherValue(functor, val):\n range = functorRange(functor)\n assert len(range) == 2\n if val == range[0]:\n return range[1]\n else:\n return range[0]",
"def pop(self):\n self._a_to_b()\n r = self.b[-1]\n self.b.pop()\n return r"
] | [
"0.6104468",
"0.56823295",
"0.55193377",
"0.55020803",
"0.54645276",
"0.54475915",
"0.54198915",
"0.5387183",
"0.5368972",
"0.5313395",
"0.5312042",
"0.5309721",
"0.53004396",
"0.5286296",
"0.52612746",
"0.52436495",
"0.52195436",
"0.5196283",
"0.5162914",
"0.5103177",
"0.5103177",
"0.5094398",
"0.50780016",
"0.5049352",
"0.50273705",
"0.5014894",
"0.50037044",
"0.50026304",
"0.49980587",
"0.4979931"
] | 0.60538065 | 1 |
3d plot of the given x, y, z beside z2 | def plot3d2(x, y, z, z2, save_fig = True, title = None):
fig = plt.figure(figsize = (12, 7))
ax = fig.add_subplot(121, projection = '3d')
try:
ax.title.set_text(title)
except:
pass
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=20., azim=30)
ax = fig.add_subplot(122, projection = '3d')
ax.title.set_text('FrankeFunction')
ax.plot_surface(x, y, z2,
linewidth=0, antialiased=False)
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(elev=20., azim=30)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
try:
fig.savefig(results_dir + save_fig)
except:
pass
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot3d(x, y, z, savefig = True):\n\n fig = plt.figure(figsize=(12, 7))\n ax = fig.gca(projection='3d')\n\n # Plot the surface.\n surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Customize the z axis.\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n ax.set_xlabel('Arbitary length x', fontsize = 13)\n ax.set_ylabel('Arbitary length y', fontsize = 13)\n ax.set_zlabel('Arbitary height z', fontsize = 13)\n\n try:\n fig.savefig(results_dir + savefig)\n except:\n pass\n plt.show()",
"def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()",
"def plot3dproj(x, y, z, *args, color=(0,0,0), shadow_dist=1.0, color_proj=None, \n elev_azim=(39,-47), show_labels=False, **kwargs):\n\n if not color_proj:\n color_proj = lighter(color, .6)\n\n\n if np.isscalar(shadow_dist) == 1:\n sdist_x = shadow_dist\n sdist_y = shadow_dist\n sdist_z = shadow_dist\n else:\n sdist_x, sdist_y, sdist_z = shadow_dist\n\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection= '3d')\n \n ax.plot(x, z, *args, zdir='y', zs=sdist_y*np.max(y), color=color_proj, **kwargs)\n ax.plot(y, z, *args, zdir='x', zs=sdist_x*np.min(x), color=color_proj, **kwargs)\n ax.plot(x, y, *args, zdir='z', zs=sdist_z*np.min(z), color=color_proj, **kwargs)\n ax.plot(x, y, z, *args, color=color, **kwargs)\n\n ax.view_init(elev=elev_azim[0], azim=elev_azim[1])\n ax.set_aspect('auto', adjustable='box') \n \n# ratio = 1.0\n# xvals, yvals = ax.get_xlim(), ax.get_ylim()\n# xrange = xvals[1]-xvals[0]\n# yrange = yvals[1]-yvals[0]\n# ax.set_aspect(ratio*(xrange/yrange), adjustable='box')\n fixed_aspect_ratio(1.0)\n\n if not show_labels:\n ax.set_xticklabels([]) \n ax.set_yticklabels([]) \n ax.set_zticklabels([])\n #plt.show()\n\n return ax",
"def plot3d(data):\n assert span1 == span2\n span = span1\n # ---------------------- create the figure and axes ---------------------- #\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # -- discretize the definition space and compute the function's images --- #\n X, Y = discretise_space([defspace1, defspace2], n=span)\n Z = data\n\n # ----------------------- appearance and plotting ------------------------ #\n ax.set_zlim(np.min(Z) - 0.5, np.max(Z) + 0.5)\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set(xlabel='$W\\_C$', ylabel='$W\\_W$', zlabel=\"Utilité\")#,\n # title='Utilité à {} ticks en fonction de W_W et W_C'.format(ticks))\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, alpha=0.8, #, cmap='binary'\n linewidth=0, antialiased=False, zorder=1)\n\n plt.show()",
"def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()",
"def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))",
"def plot_3d(x, y):\n # Create grid coordinates\n x_axis = np.linspace(-10, 10, 50)\n y_axis = np.linspace(-1, 4, 50)\n xx, yy = np.meshgrid(x_axis, y_axis, indexing='xy')\n z = np.zeros((x_axis.size, y_axis.size))\n\n # Calculate z-values based on grid coefficients\n for (i, j), v in np.ndenumerate(z):\n z[i, j] = compute_cost(x, y, theta=[[xx[i, j]], [yy[i, j]]])\n\n # Construct plot\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet)\n ax.set_zlabel('Cost')\n ax.set_zlim(z.min(), z.max())\n ax.view_init(elev=15, azim=230)\n plt.title('X vs. Y vs. Cost')\n ax.set_xlabel(r'$\\theta_0$', fontsize=17)\n ax.set_ylabel(r'$\\theta_1$', fontsize=17)\n plt.show()\n plt.close()",
"def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()",
"def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return",
"def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])",
"def plot_bivariate_3d(X, Y, Z, bounds, title, **kwargs):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_yticks(np.linspace(bounds[0],bounds[1],6))\n ax.set_xlim(bounds)\n ax.set_ylim(bounds)\n ax.plot_surface(X,Y,Z, **kwargs)\n plt.title(title)\n plt.show()",
"def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)",
"def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()",
"def drawLine3D(x0,y0,z0,x1,y1,z1):\n dislin.strt3d(x0,y0,z0)\n dislin.conn3d(x1,y1,z1)",
"def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))",
"def show_skeletons(self, skel_2d, z_out, z_gt=None):\n fig = plt.figure(figsize=(20, 20))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2, projection='3d')\n edges = np.array([[1, 0], [0, 2],[2, 3], [3, 4], [0, 5], [5, 6], [6, 7]])\n\n ax_2d = ax1\n ax_3d = ax2\n\n # draw 3d\n for edge in edges:\n ax_3d.plot(skel_2d[0, edge], z_out[edge], skel_2d[1, edge], color='r')\n if z_gt is not None:\n ax_3d.plot(skel_2d[0, edge], z_gt[edge], skel_2d[1, edge], color='g')\n\n ax_3d.set_aspect('equal')\n ax_3d.set_xlabel(\"x\"), ax_3d.set_ylabel(\"z\"), ax_3d.set_zlabel(\"y\")\n ax_3d.set_xlim3d([-2, 2]), ax_3d.set_ylim3d([2, -2]), ax_3d.set_zlim3d([2, -2])\n ax_3d.view_init(elev=10, azim=-45)\n\n # draw 2d\n for edge in edges:\n ax_2d.plot(skel_2d[0, edge], skel_2d[1, edge], color='r')\n\n ax_2d.set_aspect('equal')\n ax_2d.set_xlabel(\"x\"), ax_2d.set_ylabel(\"y\")\n ax_2d.set_xlim([-2, 2]), ax_2d.set_ylim([2, -2])\n\n plt.show()",
"def newplot3(*args, **kwargs):\n\n if 'linewidth' and 'lw' not in kwargs.keys():\n kwargs['linewidth'] = 2\n\n fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)\n ax = fig.add_subplot(111, projection='3d')\n\n x = np.asarray(args[0], dtype=float)\n y = np.asarray(args[1], dtype=float)\n z = np.asarray(args[2], dtype=float)\n\n if z.ndim == 2:\n if x.ndim < 2:\n x = np.tile(x, z.shape[1]).reshape(z.T.shape).T\n if y.ndim < 2:\n y = np.tile(y, z.shape[0]).reshape(z.shape)\n\n # Plot each array independently\n for n in range(len(z)):\n ax.plot(x[n], y[n], z[n], *args[3:], **kwargs)\n else:\n ax.plot(*args, **kwargs)",
"def plot_3d(self, ax_3d: Axes3D, lims_x: array_like = (-1, 1), lims_y: array_like = (-1, 1), **kwargs) -> None:\n X, Y, Z = self.to_mesh(lims_x, lims_y)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)",
"def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()",
"def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')",
"def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')",
"def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)",
"def plot_3D(Y_data, num_area):\n ref_shape = [Y_data.shape[0], Y_data.shape[1], Y_data.shape[2]]\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n for a in np.arange(1, num_area+1):\n loc = np.where(Y_data == a)\n ax.scatter3D(loc[0], loc[1], loc[2], marker=\".\", alpha=0.9)\n\n plt.show()",
"def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))",
"def surf_plot(x, y, z, filename, title = None, xlabel = None, ylabel = None, zlabel = None, elev = 0, azim = 0, **surf_kwargs):\n # Checking that the x- and y- and z- inputs are equal in length \n if len(x) != len(y) != len(z):\n raise LengthError()\n\n fig = plt.figure() # Creates blank figure\n ax = fig.gca(projection='3d') # Creating 3-dimensional axes\n fig.set_size_inches(18, 10) # Sets figure size\n\n # Plotting the surface - specifying the colormap, and setting the surface to opaque (with antialiased = False)\n ax.plot_trisurf(x, y, z, cmap = cm.coolwarm, linewidth=0, antialiased=False, **surf_kwargs) \n\n # Setting plot parameters\n ax.set_title(title, fontsize = 24, pad = 15)\n ax.set_xlabel(xlabel, fontsize=18, labelpad = 15)\n ax.set_ylabel(ylabel, fontsize=18, labelpad = 15)\n ax.set_zlabel(zlabel, fontsize=18, labelpad = 15)\n ax.tick_params(axis='both', which='major', pad=10)\n ax.set_zlim(0, 1.0) # z-axis limits set to [0,1] as the z-axis refers to probability in our case.\n\n ax.view_init(elev=elev, azim=azim) # Sets 'camera angle' of surface plot, for saving\n # f-string allows save filepath to be set inside the plt.savefig() function\n plt.savefig(f'{os.path.join(plot_path,filename)}.pdf', dpi = 200) # Saving the plot in the 'plots' folder",
"def plot3d(self, data, axis2, axis3, mesh, data_type='solution', colormap='blue-red', axes=False,\n cartesian_coordinates=False, interp_size=None, ax_names=None, style=0, *args, **kwargs):\n # if type(axis2) is not Axis1d or type(axis3) is not Axis1d:\n # raise NotImplementedError(\"3D plots with such combination of axes are not supported.\")\n # x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n # Title\n if data_type == 'solution':\n title = util.text.solution_caption(cartesian_coordinates, self, axis2, axis3).replace('$', '') \\\n .replace('{', '').replace('}', '')\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n title = ' ' + re.sub('[${}]', '', util.text.detector_caption(mesh, data_type, cartesian_coordinates))\n if axes:\n if ax_names is None:\n axes = ('{}, {}'.format(self.name, self.units),\n '{}, {}'.format(axis2.name, axis2.units),\n '{}, {}'.format(axis3.name, axis3.units))\n else:\n axes = ax_names\n # Voxel style\n if style == 0:\n if cartesian_coordinates:\n vertices, faces = self.cell_edges3d_cartesian(axis2, axis3)\n else:\n vertices, faces = self.cell_edges3d(axis2, axis3)\n vertices = np.array(vertices)\n if data_type == 'solution':\n x = []\n y = []\n z = []\n new_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n new_data.append(data[i][j][k])\n shift += 1\n plot3d.voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n new_data = []\n for det in data:\n x = []\n y = []\n z = []\n det_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n det_data.append(det[i][j][k])\n shift += 1\n new_data.append(det_data)\n plot3d.detector_voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n return 0, 0\n\n if cartesian_coordinates:\n x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n else:\n coord = [self.coordinates, axis2.coordinates, axis3.coordinates]\n x_grid, y_grid, z_grid = np.array(np.meshgrid(*coord, indexing='ij'))\n\n if data_type == 'solution':\n # irregular or non-cartesian axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid, y_grid, z_grid, new_data = \\\n util.geometry3d_basic.make_regular(data, x_grid, y_grid, z_grid, interp_size)\n new_data = np.nan_to_num(new_data)\n new_data = np.clip(new_data, np.amin(data), np.amax(data))\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2, axis3)\n new_data *= mask\n else:\n new_data = data\n # plot\n plot3d.contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n # irregular axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid_n, y_grid_n, z_grid_n = x_grid, y_grid, z_grid\n new_data = np.zeros((data.shape[0], interp_size, interp_size, interp_size))\n # interpolate data for each detector\n print(\"Start interpolation.\")\n mask = None\n for i, d in enumerate(data):\n x_grid, y_grid, z_grid, new_data[i] \\\n = util.geometry3d_basic.make_regular(d, x_grid_n, y_grid_n, z_grid_n, interp_size)\n if mask is None:\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2,\n axis3)\n new_data[i] = np.nan_to_num(new_data[i])\n new_data[i] = np.clip(new_data[i], np.amin(data[i]), np.amax(data[i]))\n new_data[i] *= mask\n print('\\r', end='')\n print(\"...\", str((i + 1) * 100 // data.shape[0]) + \"% complete\", end='')\n print('\\r \\r', end='')\n\n else:\n new_data = data\n plot3d.detector_contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n\n return 0, 0",
"def scatter3d(self, x, y, z, filename=None, spot_cols=None, label=False, stem=False, \n label_font_size=6, rotation=134, elevation=48, interactive=False, squish_scales=False, \n spot_size=40, **kargs):\n assert filename, \"scatter(): Must provide a filename\" \n \n xdata = self.__v[x-1]\n ydata = self.__v[y-1]\n zdata = self.__v[z-1]\n \n fig = self.__draw.getfigure(**kargs)\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elevation, azim=rotation)\n \n cols = self.cols\n if spot_cols:\n cols = spot_cols \n \n ax.scatter(xdata, ydata, zdata, edgecolors=\"none\", c=cols, s=spot_size)\n if label:\n for i, lab in enumerate(self.labels):\n ax.text(xdata[i], ydata[i], zdata[i], lab, size=label_font_size, ha=\"center\", va=\"bottom\")\n \n if stem: # stem must go after scatter for sorting. Actually, not true right? matplotlib uses zorder for that...\n z_min = min(zdata)\n for x_, y_, z_ in zip(xdata, ydata, zdata): \n line = art3d.Line3D(*list(zip((x_, y_, z_min), (x_, y_, z_))), marker=None, c=\"grey\", alpha=0.1)\n ax.add_line(line)\n \n ax.set_xlabel(\"PC%s\" % (x,)) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s\" % (y,))\n ax.set_zlabel(\"PC%s\" % (z,))\n \n if \"logx\" in kargs and kargs[\"logx\"]:\n ax.set_xscale(\"log\", basex=kargs[\"logx\"])\n if \"logy\" in kargs and kargs[\"logy\"]:\n ax.set_yscale(\"log\", basey=kargs[\"logy\"])\n \n if squish_scales: \n # Don't worry about kargs, do_common_args will overwrite.\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n ax.set_zlim([min(zdata), max(zdata)])\n \n self.__draw.do_common_args(ax, **kargs)\n if \"zlims\" in kargs:\n ax.set_zlim([kargs[\"zlim\"][0], kargs[\"zlim\"][1]])\n \n if interactive:\n fig.show() # hope you are not on a cluster!\n \n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"scatter3d(): Saved 'PC%s' vs 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, z, real_filename))",
"def plot3D(x):\n cycol = cycle('bgrcmk')\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(5):\n ax.scatter(x[:, i, 0], x[:, i, 1], x[:, i, 2], c=next(cycol),\n marker='.')\n plt.show()"
] | [
"0.73884255",
"0.72141445",
"0.71854126",
"0.71801126",
"0.71716255",
"0.71536535",
"0.7151224",
"0.7124016",
"0.707871",
"0.70730287",
"0.707246",
"0.7070368",
"0.7057608",
"0.70523095",
"0.70174587",
"0.7009494",
"0.70016164",
"0.6999338",
"0.69921994",
"0.685559",
"0.6846879",
"0.68254447",
"0.6797342",
"0.6766533",
"0.6715691",
"0.67122895",
"0.6708738",
"0.66907007",
"0.6652135",
"0.6619722"
] | 0.76071006 | 0 |
Not used but would be a shame to delete it find the MSE for different nxn sized data sets and plots the MSE as a function of n. | def MSE_plots(n_min, n_max, save_fig, k = [5], method = 'OLS', lamb = 1, split = False, train = 0.7, N = 1, method2 = 'OLS'):
n = np.linspace(n_min, n_max, n_max - n_min + 1)
errors = np.zeros((4, len(k), len(n))) # First index MSE for real FrankeFunction, MSE for the data, R2 for the real FrankeFunction, R2 for the data
#Second index is the max order of polynomial, third index is for the n-value
if type(k) != type([2]):
k = [k]
for j in range(N):
#print(j)
for i in range(len(n)):
#print(i)
x = np.random.uniform(0, 1, size = int(n[i]))
y = np.random.uniform(0, 1, size = int(n[i]))
x, y = np.meshgrid(x, y)
z = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)
z_real = FrankeFunction(x, y)
for poly in range(len(k)):
a = regression(x, y, z, k = k[poly], split = split, train = train)
if method == 'OLS':
beta = a.OLS()
elif method == 'Ridge':
beta = a.Ridge(lam = lamb)
elif method == 'Lasso':
beta = a.Lasso(alpha = lamb)
elif method == 'K-fold':
beta = a.k_cross(fold = 25, method2 = method2, lam = lamb)[0]
if split == True:
X = a.design_matrix(k = k[poly])
X_train, X_test, z_real_train, z_real_test = a.train_test(X = X, z = z_real, train = train)
z_tilde = a.z_tilde(X = X_test, beta = beta)
errors[0, poly, i] += a.MSE(z_tilde, z_real_test)
errors[1, poly, i] += a.MSE(z_tilde, a.z_test)
errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real_test)
errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = a.z_test)
else:
z_tilde = a.z_tilde(beta = beta)
errors[0, poly, i] += a.MSE(z_tilde, z_real)
errors[1, poly, i] += a.MSE(z_tilde, z)
errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real)
errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = z)
n_mid = int(len(n)/2)
title = ['MSE FrankeFunction', 'MSE data', 'R2 FrankeFunction', 'R2 data']
y_label = ['MSE', 'MSE', 'R^2', 'R^2']
errors /= N
save_name = ['franke', 'data', 'franke', 'data']
if method == 'Ridge':
method += ' with lambda = ' + str(lamb)
if method == 'K-fold':
method += ' using ' + method2
if method2 == 'Ridge' or method2 == 'Lasso':
method += ' with lambda = ' + str(lamb)
for i in range(4):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 7))
for j in range(len(k)):
ax1.plot(n[:n_mid], errors[i, j, :n_mid], label = 'k = ' + str(k[j]))
ax2.plot(n[n_mid:], errors[i, j, n_mid:], label = 'k = ' + str(k[j]))
ax1.set_ylabel(y_label[i]); ax2.set_ylabel(y_label[i])
ax1.set_xlabel('n'); ax2.set_xlabel('n')
if split == True:
fig.suptitle(title[i] + ' with ' + str(method) + ' with test/training split at ' + str(train) + ' and mean of ' + str(N) + ' runs.')
else:
fig.suptitle(title[i] + ' with ' + str(method) + ' without test/training split' + ' and mean of ' + str(N) + ' runs.')
ax1.legend(); ax2.legend()
#fig.savefig(results_dir + save_fig + method + save_name[i] + y_label[i] + '.png')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MSE(actual, noisy):\n mean_squared_error(actual, noisy)",
"def calc_mse(data, ax=0):\n return ((data[:, 0] - data[:, 1]) ** 2).mean(axis=ax)",
"def compute_MSE(e):\n\n return 1/2*np.mean(e**2)",
"def plot(self) -> None:\n cw_l2_data_list = list(); cw_linf_data_list = list()\n\n for model in self.model_list:\n cw_l2_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_l2_1.pkl\"))\n\n cw_l2_attack = list(zip(self.model_list, cw_l2_data_list))\n\n for model in self.model_list:\n cw_linf_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_inf_1.pkl\"))\n\n cw_linf_attack = list(zip(self.model_list, cw_linf_data_list))\n\n # RMSE v.s. MAE over change budget\n # There will be one graph for each manipulation\n # CW_L2 ATTACK\n for datum in cw_l2_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Generate 10 ticks for the y_axis\n y_axis_ticks = np.linspace(0.0, 0.6, num=10, endpoint=True)\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n\n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_l2-attack-on-model-{}.png\".format(self.plot_name, model_tag), \n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n for datum in cw_linf_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n\n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n \n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_linf-attack-on-model-{}.png\".format(self.plot_name, model_tag),\n bbox_inches=\"tight\")\n plt.close()\n \"RMSE and MAE as Perturbation Budget increases for CW_Linf attack on model {}\".format(model_tag)\n \n # Scattter Index over the change budget\n # All the manipulations will be put on the same graph.\n # CW_L2 ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_l2_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_l2_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else:\n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_l2-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_linf_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_linf_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_linf_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else: \n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_linf-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()",
"def _plot_rmse(self, val=False):\n _, ax = plt.subplots()\n ax.plot(self.global_rmse, linewidth=3, color='blue', label='Train RMSE')\n ax.set_title('RMSE vs. Number of Iterations')\n if val is not None:\n ax.plot(self.validation_rmse, linewidth=3, color='green', label='Validation RMSE')\n ax.legend()\n plt.show()",
"def main():\n Nrep = 8 # number of repetition of EM steps\n nm = 3 # number of mixed gaussians.\n ns = 300 # number of samples.\n \n mu, sg, lm, lm_ind, smp, L_true = generate_synthetic_data(nm, ns)\n plt.figure(1, figsize=(5,4))\n plt.clf()\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n \n mue, sge, lme = generate_initial_state(nm, ns)\n axi = 0 # subplot number\n plt.figure(2, figsize=(12,9))\n plt.clf()\n for rep in range(Nrep):\n # E-step\n r, L_infer = e_step(smp, mue, sge, lme, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('E-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # M-step\n mue, sge, lme = m_step(smp, r, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('M-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # plot the ground truth for comparison\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n ax.set_title('grn_truth')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n print('L_infer = %2.6f , L_true = %2.6f' % (L_infer, L_true))",
"def hist_scores(self, fontsize=16, **kargs):\n pylab.clf()\n scores = self.scores\n opt = self.scores.min()\n N = len(set(scores))\n print(\"There are %s different MSE found amongst %s models\" % (N,len(scores)))\n res = pylab.hist(scores, **kargs)\n pylab.title(\"MSEs Distribution of the %s best models \" % len(scores),\n fontsize=fontsize)\n pylab.grid()\n pylab.plot([opt,opt], [0,max(res[0])], \"r--\",lw=2)\n pylab.xlabel(\"Mean Square Error of all models\", fontsize=fontsize)\n pylab.ylabel(\"#\", fontsize=fontsize)",
"def plot_mcse(\n idata,\n var_names=None,\n filter_vars=None,\n coords=None,\n errorbar=False,\n grid=None,\n figsize=None,\n textsize=None,\n extra_methods=False,\n rug=False,\n rug_kind=\"diverging\",\n n_points=20,\n labeller=None,\n ax=None,\n rug_kwargs=None,\n extra_kwargs=None,\n text_kwargs=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n mean_mcse = None\n sd_mcse = None\n\n if coords is None:\n coords = {}\n if \"chain\" in coords or \"draw\" in coords:\n raise ValueError(\"chain and draw are invalid coordinates for this kind of plot\")\n\n if labeller is None:\n labeller = BaseLabeller()\n\n data = get_coords(convert_to_dataset(idata, group=\"posterior\"), coords)\n var_names = _var_names(var_names, data, filter_vars)\n\n probs = np.linspace(1 / n_points, 1 - 1 / n_points, n_points)\n mcse_dataset = xr.concat(\n [mcse(data, var_names=var_names, method=\"quantile\", prob=p) for p in probs], dim=\"mcse_dim\"\n )\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(mcse_dataset, var_names=var_names, skip_dims={\"mcse_dim\"})),\n \"plot_mcse\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters, grid=grid)\n\n if extra_methods:\n mean_mcse = mcse(data, var_names=var_names, method=\"mean\")\n sd_mcse = mcse(data, var_names=var_names, method=\"sd\")\n\n mcse_kwargs = dict(\n ax=ax,\n plotters=plotters,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n errorbar=errorbar,\n rug=rug,\n data=data,\n probs=probs,\n kwargs=kwargs,\n extra_methods=extra_methods,\n mean_mcse=mean_mcse,\n sd_mcse=sd_mcse,\n textsize=textsize,\n labeller=labeller,\n text_kwargs=text_kwargs,\n rug_kwargs=rug_kwargs,\n extra_kwargs=extra_kwargs,\n idata=idata,\n rug_kind=rug_kind,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_mcse\", \"mcseplot\", backend)\n ax = plot(**mcse_kwargs)\n return ax",
"def plot_test_vs_validation_set(output_name, C, sigma, M, xlim=None, ylim=None, xticks=None, yticks=None):\n import matplotlib.pyplot as plt\n plt.ioff()\n plt.style.use('./latex-paper.mplstyle')\n plt.figure()\n ax = plt.axes()\n ax.yaxis.grid(True)\n\n d = pickler.load(f'categorical_K2_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_'))\n x_values = [x.n_train for x in d.xs]\n plt.plot(x_values, d.results[:,0], 'C0-', label=r'$\\mathrm{e}_{\\mathrm{val}}(m=n)$')\n plt.plot(x_values, d.results[:,1], 'C1-', label=r'$\\mathrm{e}_{\\mathrm{gen}}(m=n)$')\n\n d = pickler.load(f'categorical_LOO_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_'))\n x_values = [x.n_train for x in d.xs]\n plt.plot(x_values, d.results[:,0], 'C0--', label=r'$\\mathrm{e}_{\\mathrm{val}}(m=1)$')\n plt.plot(x_values, d.results[:,1], 'C1--', label=r'$\\mathrm{e}_{\\mathrm{gen}}(m=1)$')\n\n plt.xlabel('n')\n plt.ylabel('MSE')\n if xlim is not None:\n plt.xlim(xlim)\n if ylim is not None:\n plt.ylim(ylim)\n if xticks is not None:\n plt.xticks(xticks)\n if yticks is not None:\n plt.yticks(yticks)\n \n plt.legend(loc='best')\n simulations_framework.save_figure(output_name + f'_reps{d.n_repetitions}')",
"def mse(self, data, *args, **kwargs):\n return self._mse(np.array(data), *args, **kwargs)",
"def plot_mse(mse, lambda0, lambda1, scale, loc='lower right'):\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(*zip(*mse))\n plt.xlabel('$\\lambda$')\n plt.ylabel('MSE')\n plt.yticks(scale)\n\n ax = fig.add_subplot(212)\n ax.plot(*zip(*lambda0), label='$\\lambda=0$')\n plt.xlabel('Episode')\n plt.ylabel('MSE')\n ax.plot(*zip(*lambda1), label='$\\lambda=1$')\n plt.legend(loc=loc)\n\n plt.show()",
"def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))",
"def calculate_mse(e):\r\n return 1/2*np.mean(e**2)",
"def mse(self, x_tensors=None):\n\n return self.se(x_tensors)/self.y.size",
"def putting_it_all_together_2(design_matrix, response_vector):\n res = []\n X_train, X_test, y_train, y_test = train_test_split(design_matrix, response_vector, test_size=0.25)\n num_rows_in_X_train = X_train.shape[0]\n for p in range(1, 101):\n num_rows = int(num_rows_in_X_train * (p / 100)) + 1\n coefficients_vector, s = fit_linear_regression(X_train[:num_rows, :], y_train[:num_rows, :])\n prediction_vector = predict(X_test, coefficients_vector)\n res.append(mse(prediction_vector, y_test))\n\n fig, ax = create_fig(\"MSE over the test set as a function of p%\", \"p\", \"MSE over the test set\", (-1, 101),\n (2 * math.pow(10, 10), max(res)))\n ax.plot(np.arange(1, 101), res, color='g')\n fig.show()",
"def MSE(y,yhat):\r\n #\r\n y = np.asarray(y)\r\n yhat = np.asarray(yhat)\r\n if y.size != yhat.size:\r\n raise(ValueError(\"y and yhat should be of same size now\\n\\\r\n size(y) = %d and size(yhat) = %d\"%(y.size,yhat.size)))\r\n N = yhat.size\r\n y = y.reshape(N,)\r\n yhat = yhat.reshape(N,)\r\n \r\n res = y - yhat\r\n sse = np.sum(res**2) #sum squared errors\r\n MSE = sse/N\r\n return(MSE)",
"def fig_2_11V2(x, y, z, first_poly = 4, complexity = 10, N = 7, method = 'OLS', seed = 42, lam = 0, folds = 5, save_fig = ''):\n errors = np.zeros((4, complexity + 1))\n bias = np.zeros(complexity + 1)\n variance = np.zeros(complexity + 1)\n z_real = FrankeFunction(x, y)\n\n complx = np.arange(first_poly, first_poly + complexity + 1, 1)\n\n MSE = np.zeros(complexity + 1)\n\n for i in range(complexity + 1):\n print(i)\n model = regression(x, y, z, k = first_poly + i, split = False, seed = seed)\n\n for j in range(N):\n _, MSE_R2D2, _, _, _, _ = model.k_cross(fold = folds, method2 = method, lam = lam, random_num = True)\n errors[:, i] += np.mean(MSE_R2D2, axis = 0)\n\n errors /= N\n\n print(errors)\n\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.plot(complx, errors[0], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[2], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])\n plt.legend()\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.savefig(results_dir + 'tradeoff2MSE' + method + save_fig + '.png')\n\n plt.show()\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('R2', fontsize = 14)\n plt.plot(complx, errors[1], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[3], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[3]*1.2), np.max(errors_R2[1]*1.2)])\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'tradeoff2R2' + method + save_fig + '.png')\n plt.show()",
"def plot_reduce_dimension(model):\n\n outputs = []\n n = 8\n paths = 'data/grimace'\n dirs = np.random.choice(os.listdir(paths), n)\n\n for d in dirs:\n p = paths + '/' + str(d)\n files = os.listdir(p)\n if files:\n for f in files:\n img = os.path.join(p, f)\n image = cv2.imread(img)\n image = process_image(image)\n output = model.predict(image)[0]\n outputs.append(output)\n\n embedded = TSNE(2).fit_transform(outputs)\n\n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']\n\n for i in range(n):\n m, n = i * 20, (i + 1) * 20\n plt.scatter(embedded[m: n, 0], embedded[m: n, 1],\n c=colors[i], alpha=0.5)\n\n plt.title('T-SNE')\n plt.grid(True)\n plt.show()",
"def plot_losses(train, test, mode):\n\tplt.figure()\n\tplt.plot(range(len(train)), train, 'r', label='Training')\n\tplt.plot(range(len(test)), test, 'b', label='Testing')\n\tplt.title('MSE Loss (batch type: ' + mode + ')')\n\tplt.legend()\n\tplt.show()",
"def mse(actual,expected):\n return np.mean(se(actual,expected))",
"def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")",
"def plot_average_MAE(train_datagen, val_datagen, model, gender = None):\n ages = np.arange(15,41)\n y_true, y_pred, true_pred_df = get_ytrue_ypred(model, val_datagen)\n mae_average = get_average_MAE(true_pred_df)\n count_train = get_count_train(train_datagen)\n print(count_train)\n \n fig, ax = plt.subplots(figsize = (12,5))\n ax.plot(ages, mae_average, label = 'Average MAE', linewidth = 2)\n ax.scatter(ages, mae_average)\n ax2 = ax.twinx()\n ax2.plot(ages, count_train, color = 'steelblue',label = 'Count of Images')\n ax2.fill_between(ages,count_train,alpha = 0.1, color='steelblue')\n ax2.set_ylabel('Count of Images per age')\n ax.set_xticks(ages)\n ax.set_xlabel('Age')\n ax.set_ylabel('MAE')\n ax.set_xlim(left=14.5, right=40.5)\n ax.set_ylim(bottom = 0)\n ax2.set_ylim(bottom = 0)\n ax2.grid(None)\n ax2.legend(loc = 'upper center')\n ax.legend()\n if gender == 'M':\n ax.set_title('Average MAE per age - Male')\n elif gender == 'F':\n ax.set_title('Average MAE per age - Female')\n else:\n ax.set_title('Average MAE per age')",
"def _mse(self):\n error = self._input * self._weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_/self._input.shape[0]",
"def rmse(x, y):\n return mse(x, y) ** .5",
"def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)",
"def plotErrors(losses, model_title ='Shallow Network, SGD, Batch Size = 10'):\n fig, axes = plt.subplots()\n\n x = np.arange(len(losses))\n\n axes.plot(x, losses)\n axes.set_ylabel(\"Loss (cross entropy)\")\n axes.set_xlabel(\"Number of iterations\")\n axes.set_title(model_title) \n\n plt.show() \n\n return None",
"def mse(x, y):\n\n return (x - y).pow(2).sum(dim=1, keepdim=True).mean() / x.size(1)",
"def MeanSquaredError(y_data, y_model):\n\tn = np.size(y_model)\n\tMSE = (1/n)*np.sum((y_data-y_model)**2)\n\n\treturn MSE",
"def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn",
"def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)"
] | [
"0.66081226",
"0.645139",
"0.6398277",
"0.6397658",
"0.63548166",
"0.6339845",
"0.6230781",
"0.6199714",
"0.6194183",
"0.61925435",
"0.6190124",
"0.6163094",
"0.6145237",
"0.61395943",
"0.61211854",
"0.6069992",
"0.60633487",
"0.60624605",
"0.60621196",
"0.6047089",
"0.6033375",
"0.5995224",
"0.5986673",
"0.59786093",
"0.5948802",
"0.5907563",
"0.5899746",
"0.5889419",
"0.58888865",
"0.5871137"
] | 0.73682535 | 0 |
Varies lambda between lambda_min and lambda_max and plots the MSE and R2 for the test data as a function of lambda kfold is not used here, but I should have used the lambda_best_fit from regression.py. This was however added later. | def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):
lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())
polynomials = np.array(k)
X, Y = np.meshgrid(lambdas, polynomials)
MSE = np.zeros(np.shape(X))
j = 0
for k in polynomials:
print(k)
model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)
if method == 'Ridge':
model.SVD()
i = 0
for lam in lambdas:
if method == 'Ridge':
beta = model.Ridge(lam = lam)
elif method == 'Lasso':
beta = model.Lasso(lam = lam, max_iter = max_iter)
z_tilde = model.z_tilde(beta = beta, X = model.X_test)
MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)
i += 1
j += 1
print('Method = ', method)
lambdas_min = []
for i in range(len(polynomials)):
minimum_index = MSE[i].argmin()
print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())
lambdas_min.append(int(minimum_index))
#plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)
#plt.colorbar()
#plt.show()
plt.title('MSE for the test data with ' + method)
plt.contourf(lambdas, polynomials, MSE)
plt.colorbar()
plt.ylabel('Polynomial order', fontsize = 14)
plt.xlabel('Lambda', fontsize = 14)
try:
plt.savefig(results_dir + save_fig + 'contour' + '.png')
except:
pass
plt.show()
plt.title('MSE for the test data with ' + method)
plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))
plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))
plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))
if l_min:
plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))
else:
pass
plt.legend()
plt.xlabel('Lambda', fontsize = 14)
plt.ylabel('MSE', fontsize = 14)
plt.tight_layout()
try:
plt.savefig(results_dir + save_fig + '.png')
except:
pass
plt.show()
return lambdas_min | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n bestlambda = None\n err = 1\n\n for v in range(-19,20):\n if v>=0:\n val = float(\"1e+\"+str(v))\n else:\n val = float(\"1e\"+str(v))\n w = regularized_linear_regression(Xtrain,ytrain, val)\n error = mean_absolute_error(w, Xval,yval)\n if err > error:\n err = error\n bestlambda = val\n return bestlambda",
"def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n\n bestlambda = None\n mean_abs_err = 10000000\n power = -19\n while power < 20:\n lambda0 = 10 ** (power)\n w = regularized_linear_regression(Xtrain, ytrain, lambda0)\n err = mean_absolute_error(w, Xval, yval)\n if err < mean_abs_err:\n mean_abs_err = err\n bestlambda = lambda0\n power = power + 1\n return bestlambda",
"def trend_filter(rets_data, lambda_value):\r\n #USING CVXPY convex optimiser\r\n n_periods = rets_data.shape[0]\r\n rets = rets_data.to_numpy()\r\n\r\n D_full = np.diag([1]*n_periods) - np.diag([1]*(n_periods-1), 1)\r\n D = D_full[0:n_periods-1,]\r\n beta = cp.Variable(n_periods)\r\n lambd = cp.Parameter(nonneg=True)\r\n lambd.value = lambda_value\r\n\r\n def lasso_min(betas, rets, lambd):\r\n return cp.norm(rets-betas, 2)**2 + lambd*cp.norm(cp.matmul(D, betas), 1)\r\n\r\n problem = cp.Problem(cp.Minimize(lasso_min(beta, rets, lambd)))\r\n problem.solve()\r\n\r\n # NOT WORKING\r\n # n_periods = rets_data.shape[0]\r\n # D_full = np.diag([1] * n_periods) - np.diag([1] * (n_periods - 1), 1)\r\n # D = D_full[0:n_periods - 1, ]\r\n # def lasso_min(betas, rets, D, lambda_value):\r\n # return np.linalg.norm(rets-betas)**2 + lambda_value*np.linalg.norm(D@betas,1)\r\n #\r\n # init_guess = np.repeat(1/n_periods, n_periods)\r\n # bounds = Bounds(lb=0.0, ub=1.0)\r\n # results = minimize(fun=lasso_min,\r\n # args=(rets_data, D, lambda_value),\r\n # x0=init_guess,\r\n # bounds=bounds,\r\n # method='SLSQP',\r\n # options={'disp':False})\r\n # betas = pd.Series(results.x, index=rets_data.index)\r\n # return betas\r\n betas = pd.DataFrame(beta.value, index=rets_data.index.to_timestamp(), columns=['drift'])\r\n return betas",
"def demo(n, y, cov_fun, loo, K, ylim, figsize, seed):\n raise NotImplementedError(\n \"Not up-to-date with new proposed estimators for MinVar shrinkage\")\n np.random.seed(seed)\n\n T = y * N\n cov_fun, cov_kwargs = cov_fun_kwargs\n Sigma, tau = cov_functions[cov_fun](N)\n\n sim = Simulation(Sigma, T)\n\n fig, (ax0, ax1) = plt.subplots(figsize=figsize, ncols=2)\n ax0.plot(annualize_vol(tau / n), label='true')\n ax1.plot(annualize_vol(tau / n), label='true')\n ax0.plot(annualize_vol(lam / n), label='sample')\n ax1.plot(annualize_vol(lam / n), label='sample')\n\n # Oracle LW NLS shrinkage\n d_lw_oracle = nls_oracle(sim)\n d_isolw_oracle = nls_oracle(sim, isotonic=True)\n ax0.plot(annualize_vol(d_lw_oracle / n), label='lw oracle')\n ax1.plot(annualize_vol(d_isolw_oracle / n), label='lw oracle')\n\n # LW NLS shrinkage\n d_lw = nls_asymptotic(sim)\n ax1.plot(annualize_vol(d_lw / n), label='lw')\n\n if loo:\n # LOO LW NLS shrinkage\n d_loo = nls_loo(sim)\n d_isoloo = nls_loo(sim, isotonic=True)\n ax0.plot(annualize_vol(d_loo / n), label='noisy-loo')\n ax1.plot(annualize_vol(d_isoloo / n), label='isoloo')\n\n # K-fold LW NLS shrinkage\n d_kfold = nls_kfold(sim, K)\n d_isokfold = nls_kfold(sim, K, isotonic=True)\n ax0.plot(annualize_vol(d_kfold / n), label='noisy-kfold')\n ax1.plot(annualize_vol(d_isokfold / n), label='isokfold')\n\n # MinVar NLS shrinkage\n # d_mv_oracle = minvar_nls_oracle(sim, Sigma)\n # d_isomv_oracle = isotonic_regression(\n # d_mv_oracle, y_min=lam_N, y_max=lam_1)\n # d_isonlsq_mv_oracle = minvar_nls_oracle(\n # X, S, lam, U, Sigma, isotonic=True)\n # ax0.plot(annualize_vol(d_mv_oracle / n), label='noisy-mv_oracle')\n # ax1.plot(annualize_vol(d_isomv_oracle / n), label='buggy-iso-mv_oracle')\n # ax1.plot(annualize_vol(d_isonlsq_mv_oracle / n), label='isolsq-mv_oracle')\n\n ax0.legend()\n ax1.legend()\n ax0.set_ylim(*ylim)\n ax1.set_ylim(*ylim)\n plt.show()",
"def make_plot_for_different_thresholds(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num=None,\n measurement_type=None,\n runtime=1440,\n max_threshold=None,\n):\n all_ambulance_patients_mean_times = []\n all_other_patients_mean_times = []\n all_total_mean_times = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(1, max_threshold + 1):\n current_ambulance_patients_mean_times = []\n current_other_patients_mean_times = []\n current_total_mean_times = []\n for _ in range(num_of_trials):\n times = get_times_for_patients(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n measurement_type,\n runtime,\n )\n current_ambulance_patients_mean_times.append(np.nanmean(times[0]))\n current_other_patients_mean_times.append(np.nanmean(times[1]))\n current_total_mean_times.append(np.nanmean(times[0] + times[1]))\n all_ambulance_patients_mean_times.append(\n np.nanmean(current_ambulance_patients_mean_times)\n )\n all_other_patients_mean_times.append(\n np.nanmean(current_other_patients_mean_times)\n )\n all_total_mean_times.append(np.nanmean(current_total_mean_times))\n\n x_axis = [thres for thres in range(1, max_threshold + 1)]\n x_axis_label, y_axis_label, title = get_plot_for_different_thresholds_labels(\n measurement_type\n )\n plt.figure(figsize=(23, 10))\n diff_threshold_plot = plt.plot(\n x_axis,\n all_ambulance_patients_mean_times,\n \"solid\",\n x_axis,\n all_other_patients_mean_times,\n \"solid\",\n x_axis,\n all_total_mean_times,\n \"solid\",\n )\n plt.title(title, fontsize=13, fontweight=\"bold\")\n plt.xlabel(x_axis_label, fontsize=13, fontweight=\"bold\")\n plt.ylabel(y_axis_label, fontsize=13, fontweight=\"bold\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patients\", \"All times\"], fontsize=\"x-large\"\n )\n\n return diff_threshold_plot",
"def make_plot_of_confidence_intervals_over_runtime(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n warm_up_time=100,\n num_of_trials=10,\n min_r=720,\n max_r=2880,\n seed_num=None,\n measurement_type=None,\n):\n mean_time = []\n x_axis = []\n runtime_range = np.linspace(min_r, max_r, 20)\n for runtime in runtime_range:\n res = get_multiple_runs_results(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n warm_up_time,\n num_of_trials,\n runtime,\n )\n current_mean_time, title, y_axis_label = get_times_and_labels(\n res, measurement_type\n )\n mean_time.append(current_mean_time)\n x_axis.append(round(runtime))\n\n plt.figure(figsize=(23, 10))\n plot = plt.boxplot(mean_time, labels=x_axis, showfliers=False)\n plt.title(title)\n plt.xlabel(\"Simulation runtime\")\n plt.ylabel(y_axis_label)\n\n return plot",
"def plotdFvsLambda1():\n x = numpy.arange(len(df_allk))\n if x[-1]<8:\n fig = pl.figure(figsize = (8,6))\n else:\n fig = pl.figure(figsize = (len(x),6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n lines = tuple()\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n pl.xlabel('States', fontsize=12, color='#151B54')\n pl.ylabel('$\\Delta G$ '+P.units, fontsize=12, color='#151B54')\n pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8)\n pl.yticks(fontsize=8)\n pl.xlim(x[0], x[-1]+len(lines)*width)\n ax = pl.gca()\n for dir in ['right', 'top', 'bottom']:\n ax.spines[dir].set_color('none')\n ax.yaxis.set_ticks_position('left')\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n\n leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.title('The free energy change breakdown', fontsize = 12)\n pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def cross_val_lambda(self, X, y, n_fold, n_iter, lambda_range, model=None):\n best_lambda = 0\n error = np.inf\n for lambda_cur in np.arange(lambda_range[0], lambda_range[1], 0.1):\n avg_error,_ = self.cross_val(X, y, n_fold, n_iter, lambda_cur, model=model)\n if avg_error < error:\n error = avg_error\n best_lambda = lambda_cur\n logging.debug(\"Best lambda= %s for model: %s\", best_lambda, model)\n return best_lambda",
"def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")",
"def test_reiher_df_lambda():\n\n THRESH = 0.00125\n NAME = path.join(path.dirname(__file__), '../integrals/eri_reiher.h5')\n _, mf = load_casfile_to_pyscf(NAME, num_alpha=27, num_beta=27)\n eri_rr, LR, L, Lxi = df.factorize(mf._eri, thresh=THRESH)\n total_lambda = df.compute_lambda(mf, LR)\n assert eri_rr.shape[0] * 2 == 108\n assert L == 360\n assert Lxi == 13031\n assert np.isclose(np.round(total_lambda, decimals=1), 294.8)",
"def fold3_CV_for_lambda(x,y,index):\n # Construct any set of lambda to be tested\n set_of_lambda = np.logspace(-2,5,10)\n # for each lambda, test 3 folds and average error rate \n # to pick lowest one for corresponding lambda\n avg_error = []\n for lambd_val in set_of_lambda: \n #loop over 3 lambda values\n error = []\n for n in range(3):\n #calculate average error rate over 3 folds\n beta_vals, obj_vals = mylinearsvm(beta=np.zeros(x.shape[1]),\n lambd = lambd_val,\n x = x[np.ix_(index != n)],\n y = y[np.ix_(index != n)],\n step_size_init=1,\n max_iter=100)\n \n y_hat = 2*(beta_vals.dot(x[np.ix_(index == n)].T)>0)-1\n \n \n error_rate = np.mean(y_hat != y[np.ix_(index == n)])\n \n error.append(error_rate)\n \n avg_error.append(np.mean(error))\n \n return avg_error, set_of_lambda",
"def MF_ALS_fit(R_train,\n R_test,\n K,\n reg_lambda,\n max_epoch,\n min_RMSE_threshold=0.1,\n loss_estimate=MF_RMSE):\n assert isinstance(R_train, np.ndarray)\n n_users, n_items = R_train.shape\n\n # 留意矩阵的维度\n P, Q = np.random.random((K, n_users)), np.random.random((K, n_items))\n\n N_U = np.sum(R_train != 0, axis=1) # 用户评分的次数 shape: (n_users)\n\n N_M = np.sum(R_train != 0, axis=0) # 电影被评分的次数 shape: (n_items)\n # 用一部电影的平均评分作为该电影的第0个隐含特征的分数\n Q[0, :] = np.sum(R_train, axis=0) / N_M\n # 有些电影在该数据集中没有被打分,相除后平均分数为无穷大,需要处理这些值\n for i in range(n_items):\n # if M[0, i] == np.nan:\n if not (0 <= Q[0, i] <= 5):\n Q[0, i] = 0\n\n losses_train, losses_test = [], []\n\n for epoch in range(max_epoch):\n\n # 把M_Ui, U_Mj这些小型矩阵抽解出来,而不是在庞大的原始矩阵上进行补0操作\n # 不然后面矩阵求逆会很麻烦,效率会很低,甚至因为0项太多,矩阵是不可逆的\n\n for i in range(n_users):\n M_Ui = None # 把U[i]评价过的电影的特征列都挑选出来,组成一个(K * N_U[i])的小型矩阵\n R_Ui = None # 把M_Ui对应的评分都挑选出来,组成一个(1 * N_U[i])的行向量\n for j in range(n_items):\n if R_train[i, j]:\n if M_Ui is not None:\n M_Ui = np.hstack((M_Ui, Q[:, j:j + 1]))\n R_Ui = np.hstack((R_Ui, R_train[i:i + 1, j:j + 1]))\n else:\n M_Ui = Q[:, j:j + 1]\n R_Ui = R_train[i:i + 1, j:j + 1]\n\n # 有些用户在该数据集中没有评价任何电影\n if M_Ui is None:\n continue\n\n Ai = M_Ui.dot(M_Ui.T) + reg_lambda * N_U[i] * np.eye(K)\n Vi = M_Ui.dot(R_Ui.T)\n\n P[:, i:i + 1] = np.dot(np.matrix(Ai).I.getA(), Vi)\n\n for j in range(n_items):\n U_Mj = None # 把评价过电影M[j]的用户的喜好特征行挑选出来,组成一个(K * N_M[i])的小型矩阵\n R_Mj = None # 把U_Mj对应的评分挑选出来 -- 一个(N_M[i] * 1)的列向量\n for i in range(n_users):\n if R_train[i, j]:\n if U_Mj is not None:\n U_Mj = np.hstack((U_Mj, P[:, i:i + 1]))\n R_Mj = np.vstack((R_Mj, R_train[i:i + 1, j:j + 1]))\n else:\n U_Mj = P[:, i:i + 1]\n R_Mj = R_train[i:i + 1, j:j + 1]\n\n # 有些电影在该数据集中没有被任何用户评价\n if U_Mj is None:\n continue\n\n Aj = np.dot(U_Mj, U_Mj.T) + reg_lambda * N_M[j] * np.eye(K)\n Vj = U_Mj.dot(R_Mj)\n\n Q[:, j:j + 1] = np.dot(np.matrix(Aj).I.getA(), Vj)\n\n curr_loss_train = loss_estimate(R_train, P, Q)\n curr_loss_test = loss_estimate(R_test, P, Q)\n losses_train.append(curr_loss_train)\n losses_test.append(curr_loss_test)\n\n if curr_loss_train <= min_RMSE_threshold:\n break\n\n R_pred = np.dot(P.T, Q)\n\n losses_dict = {\n 'losses_train': losses_train,\n 'losses_test': losses_test,\n }\n\n return R_pred, losses_dict",
"def best_fit(x, y, z, z_real, p = list(range(3, 15)), folds = 4, train = 0.7, seed = 42, n_lambda = 2001, n = 1, m = 1):\n lambdas = np.array([0] + np.logspace(-5.5, -1, n_lambda).tolist())\n polynomials = np.array(p)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n lambda_min_ridge = np.zeros(len(polynomials))\n lambda_min_lasso = np.zeros(len(polynomials))\n R2 = np.zeros((3, len(polynomials)))\n MSE = np.zeros((3, len(polynomials)))\n\n R2_data = np.zeros((3, len(polynomials)))\n MSE_data = np.zeros((3, len(polynomials)))\n\n\n for i in range(len(polynomials)):\n print(i + polynomials[0])\n ridge_sum = 0\n lasso_sum = 0\n model = regression(x, y, z, split = True, train = train, seed = seed, k = polynomials[i])\n z_test = np.ravel(np.copy(model.z_test))\n for j in range(n): #The mean of n times\n ridge_sum += model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True, n_lambda = n_lambda)[0]\n for j in range(m): #The mean of m times\n lasso_sum += model.lambda_best_fit(method = 'Lasso', fold = folds, n_lambda = n_lambda)[0]\n lambda_min_ridge[i] = ridge_sum/n\n lambda_min_lasso[i] = lasso_sum/m\n\n _,_, a, z_real_test = model.train_test(X = model.X_full, z = z_real, train = 0.7, seed = seed) #Both the training set and the test set for z_real in that order in list/tuple\n\n Beta_ols = model.OLS()\n Beta_ridge = model.Ridge(lam = lambda_min_ridge[i])\n Beta_lasso = model.Lasso(lam = lambda_min_lasso[i], max_iter = 1001)\n\n z_tilde_OLS = model.z_tilde(Beta_ols, X = model.X_test)\n z_tilde_Ridge = model.z_tilde(Beta_ridge, X = model.X_test)\n z_tilde_Lasso = model.z_tilde(Beta_lasso, X = model.X_test)\n\n R2[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_real_test)\n R2[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_real_test)\n R2[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n MSE[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_real_test)\n MSE[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_real_test)\n MSE[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n R2_data[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_test)\n R2_data[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_test)\n R2_data[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_test)\n\n MSE_data[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_test)\n MSE_data[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_test)\n MSE_data[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_test)\n\n _, _, lambdas = model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True)\n\n min_MSE = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n min_R2 = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n\n print('Minimum MSE with Frank, OLS: ', np.min(MSE[0]), ' Ridge: ', np.min(MSE[1]), ' Lasso: ', np.min(MSE[2]))\n print('With polynoms: ', np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Minimum MSE with Data, OLS: ', np.min(MSE_data[0]), ' Ridge: ', np.min(MSE_data[1]), ' Lasso: ', np.min(MSE_data[2]))\n print('With polynoms: ', np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2[0]), ' Ridge: ', np.max(R2[1]), ' Lasso: ', np.max(R2[2]))\n print('With polynoms: ', np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2_data[0]), ' Ridge: ', np.max(R2_data[1]), ' Lasso: ', np.max(R2_data[2]))\n print('With polynoms: ', np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n\n error_mins = np.array([[np.min(MSE[0]), np.min(MSE[1]), np.min(MSE[2])],\n [np.min(MSE_data[0]), np.min(MSE_data[1]), np.min(MSE_data[2])],\n [np.max(R2[0]), np.max(R2[1]) , np.max(R2[2])],\n [np.max(R2_data[0]), np.max(R2_data[1]), np.max(R2_data[2])],\n [np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0]],\n [np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0]],\n [np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0]],\n [np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0]]]).T\n\n text = ['MSE Franke', 'MSE Data','R\\(^2\\) Franke', 'R\\(^2\\) Data']\n print(latex_print(error_mins, text = text))\n\n print('Ridge lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Ridge lambda, lowest indexes for Data: ', np.argmin(MSE_data[2]))\n print(lambda_min_ridge)\n print('Lasso lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Lasso lambda, lowest indexes for Data: ', np.argmin(R2_MSE[2]))\n print(lambda_min_lasso)\n #Real Franke\n\n plt.plot(polynomials, R2[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE.png')\n\n plt.show()\n\n #Noise Franke\n\n plt.plot(polynomials, R2_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and data', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly_data.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and data', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE_data.png')\n\n plt.show()\n\n #Polynomial and lambda\n\n plt.plot(polynomials, lambda_min_ridge, 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, lambda_min_lasso, 'go--', label = 'Lasso', color = 'green')\n\n plt.title('The \\'best\\' lambda pr polynomial')\n plt.ylabel('Lambda')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'ridge_lasso_lambda_poly.png')\n plt.show()",
"def optimizer_function(self, num_features, lambda_user, lambda_item, gamma):\n cv = 5\n kf = KFold(n_splits=cv, random_state=self.random_state, shuffle=True)\n test_RMSE_list = []\n for train_indices, test_indices in kf.split(self.indices):\n train, test = self.get_train_test_matrix(train_indices, test_indices)\n self.fit(train, int(num_features), lambda_user, lambda_item, gamma)\n test_rmse = self.get_test_rmse(test)\n test_RMSE_list.append(test_rmse)\n mean_test_rmse = np.mean(test_RMSE_list)\n return -mean_test_rmse",
"def plot_mse(mse, lambda0, lambda1, scale, loc='lower right'):\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(*zip(*mse))\n plt.xlabel('$\\lambda$')\n plt.ylabel('MSE')\n plt.yticks(scale)\n\n ax = fig.add_subplot(212)\n ax.plot(*zip(*lambda0), label='$\\lambda=0$')\n plt.xlabel('Episode')\n plt.ylabel('MSE')\n ax.plot(*zip(*lambda1), label='$\\lambda=1$')\n plt.legend(loc=loc)\n\n plt.show()",
"def evaluation_error(y_real, y_pred, max_rating, min_rating):\n mae = mean_absolute_error(y_real, y_pred)\n nmae = normalized_mean_absolute_error(y_real, y_pred,\n max_rating, min_rating)\n rmse = root_mean_square_error(y_real, y_pred)\n\n return mae, nmae, rmse",
"def MSE_plots(n_min, n_max, save_fig, k = [5], method = 'OLS', lamb = 1, split = False, train = 0.7, N = 1, method2 = 'OLS'):\n n = np.linspace(n_min, n_max, n_max - n_min + 1)\n errors = np.zeros((4, len(k), len(n))) # First index MSE for real FrankeFunction, MSE for the data, R2 for the real FrankeFunction, R2 for the data\n #Second index is the max order of polynomial, third index is for the n-value\n if type(k) != type([2]):\n k = [k]\n\n for j in range(N):\n #print(j)\n for i in range(len(n)):\n #print(i)\n x = np.random.uniform(0, 1, size = int(n[i]))\n y = np.random.uniform(0, 1, size = int(n[i]))\n x, y = np.meshgrid(x, y)\n\n z = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n z_real = FrankeFunction(x, y)\n\n for poly in range(len(k)):\n a = regression(x, y, z, k = k[poly], split = split, train = train)\n\n if method == 'OLS':\n beta = a.OLS()\n elif method == 'Ridge':\n beta = a.Ridge(lam = lamb)\n elif method == 'Lasso':\n beta = a.Lasso(alpha = lamb)\n elif method == 'K-fold':\n beta = a.k_cross(fold = 25, method2 = method2, lam = lamb)[0]\n\n if split == True:\n X = a.design_matrix(k = k[poly])\n X_train, X_test, z_real_train, z_real_test = a.train_test(X = X, z = z_real, train = train)\n z_tilde = a.z_tilde(X = X_test, beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real_test)\n errors[1, poly, i] += a.MSE(z_tilde, a.z_test)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real_test)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = a.z_test)\n else:\n z_tilde = a.z_tilde(beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real)\n errors[1, poly, i] += a.MSE(z_tilde, z)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = z)\n\n n_mid = int(len(n)/2)\n title = ['MSE FrankeFunction', 'MSE data', 'R2 FrankeFunction', 'R2 data']\n y_label = ['MSE', 'MSE', 'R^2', 'R^2']\n errors /= N\n save_name = ['franke', 'data', 'franke', 'data']\n\n if method == 'Ridge':\n method += ' with lambda = ' + str(lamb)\n if method == 'K-fold':\n method += ' using ' + method2\n if method2 == 'Ridge' or method2 == 'Lasso':\n method += ' with lambda = ' + str(lamb)\n\n for i in range(4):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 7))\n for j in range(len(k)):\n ax1.plot(n[:n_mid], errors[i, j, :n_mid], label = 'k = ' + str(k[j]))\n ax2.plot(n[n_mid:], errors[i, j, n_mid:], label = 'k = ' + str(k[j]))\n\n ax1.set_ylabel(y_label[i]); ax2.set_ylabel(y_label[i])\n ax1.set_xlabel('n'); ax2.set_xlabel('n')\n\n if split == True:\n fig.suptitle(title[i] + ' with ' + str(method) + ' with test/training split at ' + str(train) + ' and mean of ' + str(N) + ' runs.')\n else:\n fig.suptitle(title[i] + ' with ' + str(method) + ' without test/training split' + ' and mean of ' + str(N) + ' runs.')\n\n ax1.legend(); ax2.legend()\n #fig.savefig(results_dir + save_fig + method + save_name[i] + y_label[i] + '.png')\n plt.show()",
"def eva_regress(y_true, y_pred):\n\n mape = MAPE(y_true, y_pred)\n vs = metrics.explained_variance_score(y_true, y_pred)\n mae = metrics.mean_absolute_error(y_true, y_pred)\n mse = metrics.mean_squared_error(y_true, y_pred)\n r2 = metrics.r2_score(y_true, y_pred)\n print('explained_variance_score:%f' % vs)\n print('mape:%f%%' % mape)\n print('mae:%f' % mae)\n print('mse:%f' % mse)\n print('rmse:%f' % np.sqrt(mse))\n print('r2:%f' % r2)",
"def demo(N, y, cov_fun_kwargs, loo, K, ylim, figsize, seed, trace, upper_bound):\n\n T = y * N\n cov_fun, cov_kwargs = cov_fun_kwargs\n Sigma, tau = cov_functions[cov_fun](N, seed=seed, **cov_kwargs)\n\n np.random.seed(seed)\n sim = Simulation(Sigma, T)\n\n fig, (ax0, ax1) = plt.subplots(figsize=figsize, ncols=2)\n # ax0.plot(annualize_vol(tau / N), label='true')\n # ax1.plot(annualize_vol(tau / N), label='true')\n # ax0.plot(annualize_vol(lam / N), label='sample')\n # ax1.plot(annualize_vol(lam / N), label='sample')\n\n # Oracle LW NLS shrinkage\n # d_lw_oracle = nls_oracle(sim)\n # d_isolw_oracle = nls_oracle(sim, isotonic=True)\n # ax0.plot(annualize_vol(d_lw_oracle / N), label='lw oracle')\n # ax1.plot(annualize_vol(d_isolw_oracle / N), label='lw iso oracle')\n\n # # LW NLS shrinkage\n # S_lw = nlshrink_covariance(X, centered=True)\n # d_lw = eig(S_lw, return_eigenvectors=False)\n # ax1.plot(annualize_vol(d_lw / N), label='lw')\n\n # if loo:\n # # LOO LW NLS shrinkage\n # _, d_loo = nls_loo_cv(X, S, U)\n # d_isoloo = isotonic_regression(d_loo)\n # ax0.plot(annualize_vol(d_loo / N), label='noisy-loo')\n # ax1.plot(annualize_vol(d_isoloo / N), label='isoloo')\n\n # K-fold LW NLS shrinkage\n # d_lw_loo = nls_loo(sim)\n # d_lw_isoloo = nls_loo(sim, isotonic=True)\n # ax0.plot(annualize_vol(d_lw_loo / N), label='lw_kfold')\n # ax1.plot(annualize_vol(d_lw_isoloo / N), label='lw_isoloo')\n\n d_lw_kfold = nls_kfold(sim, K)\n d_lw_isokfold = nls_kfold(sim, K, isotonic=True)\n ax0.plot(annualize_vol(d_lw_kfold / N), label='lw_kfold')\n ax1.plot(annualize_vol(d_lw_isokfold / N), label='lw_isokfold')\n\n # MinVar NLS shrinkage\n d_mv_oracle = minvar_oracle(\n sim, monotonicity=None, trace=trace, upper_bound=upper_bound)\n d_mv_mono_oracle = minvar_oracle(\n sim, monotonicity='constraint', trace=trace, upper_bound=upper_bound)\n d_mv_iso_oracle = minvar_oracle(\n sim, monotonicity='isotonic', trace=trace, upper_bound=upper_bound)\n\n ax0.plot(annualize_vol(d_mv_oracle / N), label='mv_oracle')\n ax1.plot(annualize_vol(d_mv_mono_oracle / N), label='mv_mono_oracle')\n ax1.plot(annualize_vol(d_mv_iso_oracle / N), label='mv_iso_oracle')\n\n d_mv_loo = minvar_loo(\n sim, monotonicity=None, trace=trace, upper_bound=upper_bound)\n d_mv_mono_loo = minvar_loo(\n sim, monotonicity='constraint', trace=trace, upper_bound=upper_bound)\n d_mv_iso_loo = minvar_loo(\n sim, monotonicity='isotonic', trace=trace, upper_bound=upper_bound)\n\n ax0.plot(annualize_vol(d_mv_loo / N), label='mv_loo')\n ax1.plot(annualize_vol(d_mv_mono_loo / N), label='mv_mono_loo')\n ax1.plot(annualize_vol(d_mv_iso_loo / N), label='mv_iso_loo')\n\n ax0.legend()\n ax1.legend()\n # ax0.set_ylim(*ylim)\n # ax1.set_ylim(*ylim)\n plt.show()",
"def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def make_tuning_plot_rmse(df, error_col_name=\"rmse\",\n error_title = \"Top 10% RMSE\",\n cutoff = 0.10):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n # Get appropriate datasets\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n rmse_sub = sub_df[error_col_name]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n\n rmse_method = rmse_sub[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, rmse_trial in zip(trials_sub, rmse_method):\n num_tested = len(rmse_trial)\n cutoff_index = int(cutoff * num_tested) - 1\n rmse_val = rmse_trial[-cutoff_index]\n to_append = {error_title: rmse_val,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n\n # Filter out dropout\n method_df = method_df[[i != \"dropout\" for i in\n method_df['method_name']]].reset_index()\n\n # Normalize by dataset\n for dataset in datasets:\n # Make a divison vector of ones and change it to a different value only\n # for the correct dataset of interest to set max rmse to 1\n division_factor = np.ones(len(method_df))\n indices = (method_df[\"Data\"] == dataset)\n\n # Normalize with respect to the ensemble so that this is 1\n max_val = method_df[indices].query(\"method_name == 'ensemble'\").mean()[error_title]\n\n # Take the maximum of the AVERAGE so it's normalized to 1\n division_factor[indices] = max_val\n method_df[error_title] = method_df[error_title] / division_factor\n\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=error_title,\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[error_title].mean()\n std = subdf[error_title].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n # Add ensemble baseline\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))",
"def MF_SGD_fit(R_train,\n R_test,\n K,\n learning_rate,\n max_epoch,\n reg_lambda_p,\n reg_lambda_q,\n min_loss_threshold=0.1,\n loss_estimate=MF_RMSE,\n epoch_cnt_per_loss_estimate=1000):\n\n n_users, n_items = R_train.shape\n\n P, Q = np.random.rand(K, n_users), np.random.rand(K, n_items)\n\n losses_train = []\n losses_test = []\n\n # acquire observed rating (u, i) pairs to support random selecting efficently\n observed_rating_ui_pairs = []\n for u in range(n_users):\n for i in range(n_items):\n if R_train[u, i]:\n observed_rating_ui_pairs.append((u, i))\n\n random.shuffle(observed_rating_ui_pairs)\n\n for epoch in range(max_epoch):\n\n u, i = random.choice(observed_rating_ui_pairs)\n\n e_ui = R_train[u, i] - P[:, u] @ Q[:, i]\n # P[:, u] += learning_rate * (2*e_ui*Q[:, i] - reg_lambda_p*P[:, u])\n # Q[:, i] += learning_rate * (2*e_ui*P[:, u] - reg_lambda_q*Q[:, i])\n\n P[:, u], Q[:, i] = P[:, u] + learning_rate * (2*e_ui*Q[:, i] - reg_lambda_p*P[:, u]), \\\n Q[:, i] + learning_rate * (2*e_ui*P[:, u] - reg_lambda_q*Q[:, i])\n\n if epoch % epoch_cnt_per_loss_estimate == 0:\n curr_train_loss = loss_estimate(R_train, P, Q)\n losses_train.append(curr_train_loss)\n\n curr_val_loss = loss_estimate(R_test, P, Q)\n losses_test.append(curr_val_loss)\n\n if curr_train_loss < min_loss_threshold:\n break\n\n R_pred = P.T @ Q\n\n losses_dict = {\n 'losses_train': losses_train,\n 'losses_test': losses_test,\n }\n\n return R_pred, losses_dict",
"def make_plot_of_confidence_intervals_over_warm_up_time(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n num_of_trials,\n min_w=0,\n max_w=500,\n seed_num=None,\n measurement_type=None,\n runtime=1440,\n):\n mean_time = []\n x_axis = []\n warm_up_range = np.linspace(min_w, max_w, 20)\n for warm_up_time in warm_up_range:\n res = get_multiple_runs_results(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n warm_up_time,\n num_of_trials,\n runtime,\n )\n current_mean_time, title, y_axis_label = get_times_and_labels(\n res, measurement_type\n )\n mean_time.append(current_mean_time)\n x_axis.append(round(warm_up_time))\n\n plt.figure(figsize=(23, 10))\n plot = plt.boxplot(mean_time, labels=x_axis, showfliers=False)\n plt.title(title, fontsize=13, fontweight=\"bold\")\n plt.xlabel(\"Warm-up time\", fontsize=13, fontweight=\"bold\")\n plt.ylabel(y_axis_label, fontsize=13, fontweight=\"bold\")\n\n return plot",
"def crossvalidation(X, y, folds, lambdavals):\n n = X.shape[0]\n Errors = np.empty((0, len(lambdavals)))\n index = (list(range(folds)) * (n//folds+1))[0:n]\n np.random.shuffle(index)\n index = np.array(index)\n for i in range(folds):\n X_train_CV = X[index != i, :]\n X_test_CV = X[index == i, :]\n y_train_CV = y[index != i]\n y_test_CV = y[index == i]\n Errorsinter = []\n for lam in lambdavals:\n betas, _ = mylinearsvm(lam, 0.1, 100, X_train_CV, y_train_CV)\n y_pred = np.dot(X_test_CV, betas[-1])\n Errorsinter.append(mean_squared_error(y_test_CV, y_pred))\n Errors = np.vstack((Errors, Errorsinter))\n mean_errors = np.mean(Errors, axis = 0)\n minimum_val = np.max(np.where(mean_errors == mean_errors.min()))\n lambda_best = lambdavals[minimum_val]\n print(\"The best value of lambda is:\", lambda_best)\n return lambda_best",
"def cross_validation(lambd_values = [0.1], maxfun_values = [200]):\n \n n_lambd, n_maxfun = len(lambd_values), len(maxfun_values)\n \n # Creation of the DataFrame where the results are to be stored\n df_results = pd.DataFrame(index = range(n_lambd * n_maxfun))\n df_results['Maxfun'], df_results['Lambd'] = list(maxfun_values) * n_lambd, list(lambd_values) * n_maxfun\n df_results['Hidden layers'] = num_of_hidden_layers\n nodes_avg = np.mean(layers[1:-1])\n df_results['Nodes per hidden layer (avg)'] = nodes_avg\n accuracy_col = []\n \n for lambd in lambd_values:\n \n for maxfun in maxfun_values:\n \n start = time() # start of the timer\n \n res = opt.fmin_l_bfgs_b(costFunction, nn_weights, fprime = backwards, args = (layers, images_validation, labels_training, num_labels, lambd), maxfun = maxfun, factr = 1., disp = True)\n Theta = roll_params(res[0], layers)\n \n # input('\\nProgram paused. Press enter to continue!!!')\n \n # print(\"\\nTesting Neural Network... \\n\")\n \n pred = predict(Theta, images_test)\n end = time() # end of the timer\n accuracy = np.mean(labels_test == pred) * 100\n print('\\nLambda =', lambd)\n print('Maxfun =', maxfun)\n time_complexity = end - start\n print('Time:', time_complexity, 'seconds')\n print('Accuracy =', accuracy, '%')\n \n # Modification of the 'Accuracy' column\n accuracy_col.append(accuracy)\n \n # Accuracy values stored into the dataframe\n df_results['Accuracy'] = accuracy_col\n \n return df_results",
"def validation_curve(x, y, x_val, y_val):\n lambda_vec = np.array([0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])\n error_train = np.zeros(len(lambda_vec))\n error_val = np.zeros(len(lambda_vec))\n m = x.shape[0]\n m_val = x_val.shape[0]\n for i in range(len(lambda_vec)):\n l = lambda_vec[i]\n theta = train_linear_reg(x, y, l)\n error_train[i] = 1.0 / (2 * m) * np.sum(np.square(x.dot(theta) - y))\n error_val[i] = 1.0 / (2 * m_val) * np.sum(np.square(x_val.dot(theta) - y_val))\n\n return lambda_vec, error_train, error_val",
"def ridge_cross_validation_visualization(lambdas, accuracies):\n colors = ['r', 'b', 'y', 'g']\n labels = ['group_0', 'group_1', 'group_2', 'group_3']\n for i in range(len(accuracies)):\n plt.semilogx(lambdas, accuracies[i], marker=\".\", color=colors[i], label=labels[i])\n plt.xlabel(\"lambda\")\n plt.ylabel(\"accuracy\")\n plt.xlim(1e-4, 1)\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"./img/ridge_cross_validation\")",
"def find_best_rmse(name, x_train, y_train, x_test, y_test, k_max=23, metric='euclidean', plot=True, debug=False):\n \"\"\"\n :param k_max: maximum number of k to test for\n :param plot: True if matplotlib plot should be created\n :param debug: print additional info about accuracy test results\n :return: best k and best accuracy for given training and test set with features (k < k_max)\n \"\"\"\n\n # Go through all k between k=1 and k=k_max-1 and find best_k and best_a\n # rsmes = np.zeros(k_max) # Write rsmes for each k into here for plot to work...\n\n rmse_val = [] # to store rmse values for different k\n for k in range(1, k_max):\n model = neighbors.KNeighborsRegressor(n_neighbors=k, metric=metric)\n model.fit(x_train, y_train) # fit the model\n pred = model.predict(x_test) # make prediction on test set\n rsme = sqrt(mean_squared_error(y_test, pred)) # calculate rmse\n\n if k == 1:\n best_rmse = rsme\n best_k = k\n elif rsme < best_rmse:\n best_rmse = rsme\n best_k = k\n\n rmse_val.append(rsme) # store rmse values\n if debug:\n print('RMSE value for k=', k, 'is:', rsme)\n\n if plot:\n t = range(1, k_max)\n plt.plot(t, rmse_val[0:k_max - 1], '--', label=name)\n plt.xticks(t)\n plt.xlabel('# neighbours (k)')\n plt.ylabel('Root Mean Squared Error')\n plt.scatter(best_k, best_rmse)\n plt.legend()\n return best_rmse, best_k",
"def __bestLambda(self):\n\t\t\n\t\t# Determine starting value for brent-method (to avoid local minimum).\n\t\tself.startValue = self.__findStartValue()\n\t\t\t\n\t\t# Check if there exists a minimum within the range of self.lamStart. \n\t\t# Otherwise, use fmin because we cannot provide an interval. \n\t\tif (self.startIdx != 0 and self.startIdx != self.nStartValues-1):\n\t\t\ts = scipy.optimize.brent(self.__minBayesianEvidence, brack=(self.logLamStart[self.startIdx-1], self.logLamStart[self.startIdx], self.logLamStart[self.startIdx+1]))\n\t\telse:\n\t\t\ts = scipy.optimize.fmin(self.__minBayesianEvidence, self.startValue, disp=False)[0]\n\t\t\n\t\treturn 10**s",
"def plot_cv_train_test(test_avg, train_avg, lambdas, path):\n\n plt.plot(lambdas, test_avg, marker = \"o\", color=\"green\", label=\"validating cv error\")\n plt.plot(lambdas, train_avg, marker = \"v\", color=\"blue\", label=\"training cv error\" )\n \n print(train_avg[0])\n print(test_avg[0])\n \n plt.title(\"Cross Validation Error for Different Regularization Parameters\")\n plt.ylabel(\"10f cv RMSE\")\n plt.ylim(0.86 , 0.99)\n plt.xlabel(\"$\\lambda$\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()"
] | [
"0.663487",
"0.6570577",
"0.5965127",
"0.57875526",
"0.5774969",
"0.57696885",
"0.57435316",
"0.5715972",
"0.5654938",
"0.5644364",
"0.56375545",
"0.5616511",
"0.5613352",
"0.5586539",
"0.5506",
"0.5466689",
"0.54639953",
"0.5441683",
"0.54297066",
"0.5401699",
"0.53822684",
"0.53636414",
"0.5363356",
"0.5351001",
"0.5347221",
"0.5339488",
"0.53392667",
"0.53248644",
"0.53192097",
"0.5304949"
] | 0.7175527 | 0 |
recreates figure 2.11 from the book as asked in exercise c using kfold N times with random indexes. The plot is the mean error estimates of N times | def fig_2_11V2(x, y, z, first_poly = 4, complexity = 10, N = 7, method = 'OLS', seed = 42, lam = 0, folds = 5, save_fig = ''):
errors = np.zeros((4, complexity + 1))
bias = np.zeros(complexity + 1)
variance = np.zeros(complexity + 1)
z_real = FrankeFunction(x, y)
complx = np.arange(first_poly, first_poly + complexity + 1, 1)
MSE = np.zeros(complexity + 1)
for i in range(complexity + 1):
print(i)
model = regression(x, y, z, k = first_poly + i, split = False, seed = seed)
for j in range(N):
_, MSE_R2D2, _, _, _, _ = model.k_cross(fold = folds, method2 = method, lam = lam, random_num = True)
errors[:, i] += np.mean(MSE_R2D2, axis = 0)
errors /= N
print(errors)
plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')
plt.plot(complx, errors[0], 'go--', label = 'Test', color = 'blue')
plt.plot(complx, errors[2], 'go--', label = 'Training', color = 'red')
#plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])
plt.legend()
plt.xlabel('Polynomial maximum order', fontsize = 14)
plt.ylabel('MSE', fontsize = 14)
plt.savefig(results_dir + 'tradeoff2MSE' + method + save_fig + '.png')
plt.show()
plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')
plt.xlabel('Polynomial maximum order', fontsize = 14)
plt.ylabel('R2', fontsize = 14)
plt.plot(complx, errors[1], 'go--', label = 'Test', color = 'blue')
plt.plot(complx, errors[3], 'go--', label = 'Training', color = 'red')
#plt.ylim([np.min(errors_R2[3]*1.2), np.max(errors_R2[1]*1.2)])
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'tradeoff2R2' + method + save_fig + '.png')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def problem1():\n n_i = 10\n k = 5\n num_samples = 1000\n total_draws = 50\n\n plt.figure()\n for num_samples in [100, 1000, 10000]:\n experiment_results = []\n for samples in range(num_samples):\n # N = np.random.randint(1, k + 1, n_i * k)\n N = np.array([[i] * n_i for i in range(1, k+1)]).flatten()\n random.shuffle(N)\n experiment_results_for_sample = []\n for n_draws in range(1, total_draws + 1):\n draw = N[:n_draws]\n experiment_result = check_if_all_nums_in_draw(draw, k)\n experiment_results_for_sample.append(experiment_result)\n experiment_results.append(experiment_results_for_sample)\n experiment_results = np.array(experiment_results)\n\n plt.plot(range(1, total_draws + 1), np.sum(experiment_results, axis=0)/num_samples, label=num_samples)\n\n plt.plot([1, total_draws+1], [0.9, 0.9])\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.xlim(1, total_draws)\n plt.legend()\n plt.show()",
"def problem2():\n k = 4\n total_draws = 20\n total_balls = 50\n\n plt.figure()\n for _ in range(50):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:total_draws]\n experiment_result = np.any(draw == 4)\n experiment_results.append(experiment_result)\n plt.plot(np.cumsum(experiment_results) / np.arange(1, num_samples + 1))\n old_result = experiment_results[:]\n\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.show()",
"def kmean_test_n_clusters(data, n_clusters):\n n_clusters += 1\n kmeans_per_k = [KMeans(n_clusters=k, random_state=42).fit(data)for k in range(1, n_clusters)]\n inertias = [model.inertia_ for model in kmeans_per_k]\n silhouette_scores = [silhouette_score(data, model.labels_)\n for model in kmeans_per_k[1:]]\n\n fig, (ax1, ax2) = plt.subplots(2,1, figsize=(8, 3.5))\n\n ax1.plot(range(1, n_clusters), inertias, \"bo-\")\n ax1.set_xlabel(\"$k$\", fontsize=14)\n ax1.set_ylabel(\"Inertia\", fontsize=14)\n #ax1.annotate('Elbow',\n # xy=(4, inertias[3]),\n # xytext=(0.55, 0.55),\n # textcoords='figure fraction',\n # fontsize=16,\n # arrowprops=dict(facecolor='black', shrink=0.1)\n # )\n ax2.plot(range(2, n_clusters), silhouette_scores, \"bo-\")\n ax2.set_xlabel(\"$k$\", fontsize=14)\n ax2.set_ylabel(\"Silhouette score\", fontsize=14)\n #plt.axis([2, 8, 0.3, 0.475])\n plt.show()",
"def experiment(improved):\n\n N_list = [5 ,10 ,20]\n K_list = [3 , 7 ,9]\n P_list = [0.3 , 0.4 , 0.5 ,0.6 ,0.7]\n\n data = pandas.read_csv('train.csv')\n\n avg_list = []\n for i in range(0,len(N_list) * len(K_list) * len(P_list)):\n avg_list.append([0 , None])\n\n kf = KFold(n_splits=5, shuffle=True, random_state=209418441)\n rotation_index = 1\n for train_index, test_index in kf.split(data):\n\n train = data.iloc[train_index]\n test = data.iloc[test_index]\n index = 0\n for n in N_list:\n for k in K_list:\n for p in P_list:\n\n #print('testing for N= ',n,', K = ',k, 'P = ',p)\n KNN = forest.KNN_forest(N=n, K=k, P=p, data = train , improved=improved)\n success_rate = utls.tests.succ_rate_test.test(test,KNN.Classify)\n avg_list[index][0] += success_rate\n avg_list[index][1] = (n,k,p)\n #print(' rate is: ',avg_list[index][0]/rotation_index)\n index += 1\n rotation_index +=1\n\n\n\n best_option = max(avg_list,key= lambda x:x[0])\n #print(' ****** DONE ******')\n #print('best n,k,p are : ' , best_option[1] , ' with success rate: ' , best_option[0])\n\n return best_option[1]",
"def MSE_plots(n_min, n_max, save_fig, k = [5], method = 'OLS', lamb = 1, split = False, train = 0.7, N = 1, method2 = 'OLS'):\n n = np.linspace(n_min, n_max, n_max - n_min + 1)\n errors = np.zeros((4, len(k), len(n))) # First index MSE for real FrankeFunction, MSE for the data, R2 for the real FrankeFunction, R2 for the data\n #Second index is the max order of polynomial, third index is for the n-value\n if type(k) != type([2]):\n k = [k]\n\n for j in range(N):\n #print(j)\n for i in range(len(n)):\n #print(i)\n x = np.random.uniform(0, 1, size = int(n[i]))\n y = np.random.uniform(0, 1, size = int(n[i]))\n x, y = np.meshgrid(x, y)\n\n z = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n z_real = FrankeFunction(x, y)\n\n for poly in range(len(k)):\n a = regression(x, y, z, k = k[poly], split = split, train = train)\n\n if method == 'OLS':\n beta = a.OLS()\n elif method == 'Ridge':\n beta = a.Ridge(lam = lamb)\n elif method == 'Lasso':\n beta = a.Lasso(alpha = lamb)\n elif method == 'K-fold':\n beta = a.k_cross(fold = 25, method2 = method2, lam = lamb)[0]\n\n if split == True:\n X = a.design_matrix(k = k[poly])\n X_train, X_test, z_real_train, z_real_test = a.train_test(X = X, z = z_real, train = train)\n z_tilde = a.z_tilde(X = X_test, beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real_test)\n errors[1, poly, i] += a.MSE(z_tilde, a.z_test)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real_test)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = a.z_test)\n else:\n z_tilde = a.z_tilde(beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real)\n errors[1, poly, i] += a.MSE(z_tilde, z)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = z)\n\n n_mid = int(len(n)/2)\n title = ['MSE FrankeFunction', 'MSE data', 'R2 FrankeFunction', 'R2 data']\n y_label = ['MSE', 'MSE', 'R^2', 'R^2']\n errors /= N\n save_name = ['franke', 'data', 'franke', 'data']\n\n if method == 'Ridge':\n method += ' with lambda = ' + str(lamb)\n if method == 'K-fold':\n method += ' using ' + method2\n if method2 == 'Ridge' or method2 == 'Lasso':\n method += ' with lambda = ' + str(lamb)\n\n for i in range(4):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 7))\n for j in range(len(k)):\n ax1.plot(n[:n_mid], errors[i, j, :n_mid], label = 'k = ' + str(k[j]))\n ax2.plot(n[n_mid:], errors[i, j, n_mid:], label = 'k = ' + str(k[j]))\n\n ax1.set_ylabel(y_label[i]); ax2.set_ylabel(y_label[i])\n ax1.set_xlabel('n'); ax2.set_xlabel('n')\n\n if split == True:\n fig.suptitle(title[i] + ' with ' + str(method) + ' with test/training split at ' + str(train) + ' and mean of ' + str(N) + ' runs.')\n else:\n fig.suptitle(title[i] + ' with ' + str(method) + ' without test/training split' + ' and mean of ' + str(N) + ' runs.')\n\n ax1.legend(); ax2.legend()\n #fig.savefig(results_dir + save_fig + method + save_name[i] + y_label[i] + '.png')\n plt.show()",
"def part_one(test_data, train_data):\n print \"Part 1.1\"\n range_k = range(1,71, 2)\n train_acc = []\n test_acc = []\n cv_acc = []\n train_neighbors_dists = knn_algo(train_data, train_data)\n test_neighbors_dists = knn_algo(test_data, train_data)\n cv_dists = knn_algo_cross_validate(train_data)\n\n for k in range_k:\n train_neighbors = find_k(train_neighbors_dists, k)\n train_err = (calculate_error(train_neighbors))\n\n test_neighbors = find_k(test_neighbors_dists, k)\n test_err = (calculate_error(test_neighbors))\n\n cv_neighbors = find_k(cv_dists, k)\n cv_err = (calculate_error(cv_neighbors))\n\n ### TODO: cross validation here\n train_acc.append(train_err)\n test_acc.append(test_err)\n cv_acc.append(cv_err)\n\n # part 1.1\n print \"K range: \"\n print range_k\n print \"Train acc: \"\n print train_acc\n print \"Test acc: \"\n print test_acc\n print \"CV acc: \"\n print cv_acc\n\n print \"Part 1.2: \"\n # part 1.2\n plt.plot(range_k, train_acc, label = \"train\")\n plt.plot(range_k, test_acc, label = \"test\")\n plt.plot(range_k, cv_acc, label = \"CV\")\n plt.ylabel(\"percent error\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.show()\n return (range_k, train_acc, test_acc, cv_acc)",
"def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold",
"def k_fold_linear(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n\n test = temp.pop(i)\n train = pd.concat(temp)\n test_labels = list(test['Labels'])\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n test_predictions = [round(x, 1) for x in predict_linear_regression(test.drop(['Labels'], axis=1), model)]\n train_predictions = [round(x, 1) for x in predict_linear_regression(train.drop(['Labels'], axis=1), model)]\n\n Confusion_Matrix(test_predictions, test_labels)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)",
"def k_fold_tree(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n test = temp.pop(i)\n\n test_labels = list(test['Labels'])\n\n train = pd.concat(temp)\n\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n\n test_predictions = predict_data(test, model)\n train_predictions = predict_data(train, model)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)",
"def cv_5_fold(dataFrame):\n dataframe_collection = {}\n i = 0\n j = 0\n l = 0\n guessed_right = 0\n k = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39]\n\n k_values = []\n # array to store the accuracy evaluation for each number of K\n accuracy_values = {}\n\n myDict = {}\n for j in range(len(k)): # for all values of K neighbour\n\n print(k[j])\n predicted_right = 0\n total_number = 0\n five_accuracies = []\n for i in range(0, 5):\n #aggregating dataframes by fold - e.g. 1 fold becomes test dataframe; 2,3,4,5 folds become one training dataframe\n trainingDataFrame = dataFrame.loc[dataFrame[15] != (i / 4.00)]\n trainingDataFrame = trainingDataFrame.drop([15], axis=1).reset_index(drop=True)\n testDataFrame = dataFrame.loc[dataFrame[15] == (i / 4.00)]\n testDataFrame = testDataFrame.drop([15], axis=1).reset_index(drop=True)\n\n # output is an array of predicted income values for testDataFrame\n output = knn(trainingDataFrame, testDataFrame, k[j])\n\n # for every fold validation loop calculate the accuracy:\n for instance in range(len(testDataFrame)):\n # checking number of right predictions\n if (output[instance] == testDataFrame[14].iloc[instance]):\n predicted_right += 1.00\n total_number += 1.00\n\n # calculate accuracy as percentage of number of prediction divided by total\n accuracy = (predicted_right / total_number) * 100.0\n # add acccuracies for each of the 5 fold tests to an array\n five_accuracies.append(accuracy)\n\n # PROVIDE FINAL EVALUATION FOR K = J, BY FINDING OUT AVERAGE ACCURACY OF THE FIVE FOLD LOOPS:\n evaluation = 0.0\n for accuracy in range(len(five_accuracies)):\n evaluation += five_accuracies[accuracy]\n\n evaluation = evaluation / 5\n\n accuracy_values.update({k[j]: evaluation})\n\n accuracy_values = collections.OrderedDict(sorted(accuracy_values.items()))\n\n # compute which number of neigbors garners greatest accuracy:\n maxAccuracy = 0\n best_neighbour = 0\n # loop through dictionary values:\n for v in accuracy_values.items():\n # if the value is greater than the current maximum, make it the maximum\n if (v[1] > maxAccuracy):\n maxAccuracy = v[1]\n best_neighbour = v[0]\n\n print(\"Max accuracy \", maxAccuracy)\n print(\"Best Neighbor: \", best_neighbour)\n\n # make a text file containing the K-number and associated accuracy:\n str_x = \"k value | accuracy\" + \"\\n\"\n for k, v in accuracy_values.items():\n str_x += str(k) + \" | \" + str(v) + \"\\n\"\n print(str_x)\n\n text_file = open(\"grid.results.txt\", 'w')\n text_file.write(str_x)\n text_file.close()",
"def fig5(X_r, Y, TRAIN_SIZE=6000):\n \n # Normalize X_r\n X_n = preprocessing.normalize(X_r)\n \n #kNN weighting and k\n weights = [ \"uniform\", \"distance\" ]\n ks = [2,4,8,16,32,64] \n \n # Little lambda functions to standardize feature extraction\n pca = lambda X,Y: PCA(n_components=128).fit(X).transform(X)\n lda = lambda X,Y: LDA().fit(X, Y).transform(X)\n idn = lambda X,Y: X\n \n # Start the plot\n fig, ax = plt.subplots()\n plt.ylabel(\"Error %\")\n plt.xlabel(\"k\")\n \n \n # Try every combination (product) of weights, feature extraction and normalization\n for weight, feat_reduce, X_ in itertools.product(\n weights, [pca, lda, idn], [X_r, X_n]):\n \n # Reset error rate\n errors = []\n \n #Flags to make things easier\n reduction = \"PCA\" if feat_reduce == pca else \"LDA\"\n normalized = \"n\" if X_ is X_n else \"r\"\n \n #Initialize a black (i.e. key - cmy_K_) line\n linestyle = \"k\"\n \n # Match the point style used in Vailaya\n if weight == \"uniform\":\n if X_ is X_n:\n linestyle += \"x\"\n else:\n linestyle += \"*\"\n if weight == \"distance\":\n if X_ is X_n:\n linestyle += \"o\"\n else:\n linestyle += \"+\"\n \n # As well as the line style\n if feat_reduce is pca:\n linestyle += \":\" # Dotted\n elif feat_reduce is lda:\n linestyle += \"--\" # Solid\n else:\n linestyle += \"-\" # Dashed\n \n # Loop through all k's \n for k in ks:\n #Initialized classifier parameters\n knn = neighbors.KNeighborsClassifier(warn_on_equidistant=False)\n knn.n_neighbors = k\n knn.weights = weight\n \n #Here's where the lambda's come in handy.\n X = feat_reduce(X_,Y)\n \n # Fit the training set\n knn.fit(X[:TRAIN_SIZE], Y[:TRAIN_SIZE])\n \n # Again ugly code for the predictions\n predictions = []\n for i in range(TRAIN_SIZE, len(X)):\n predictions += [ knn.predict(X[i])[0] ] \n \n # Calculate error rate and append it to error rate list\n error = 1.- float(sum(predictions == Y[TRAIN_SIZE:])) / len(predictions)\n errors += [error]\n \n # Print it just for fun. Also in case error rates need to be exported.\n print weight, reduction, normalized, k, error\n \n # Plot the line for all k values \n ax.plot(ks, errors, linestyle)\n \n # Couldn't specify legends properly\n #ax.legend()",
"def get_folds(X, y, k):\n # temporarily change the 1/-1 nature of y to 1/0\n _y = (y + 1) / 2\n # partition the examples into postive and negative sets\n positive_indices = np.where(_y)[0]\n negative_indices = np.where(_y - 1)[0]\n assert len(positive_indices) + len(negative_indices) == len(y)\n\n # shuffle both lists\n np.random.shuffle(positive_indices)\n np.random.shuffle(negative_indices)\n\n # create k buckets of indices of (approximately) equal size\n positive_folds_indices = \\\n np.array(np.array_split(positive_indices, k))\n negative_folds_indices = \\\n np.array(np.array_split(negative_indices, k))\n\n train_X, train_y, test_X, test_y = [], [], [], []\n for i in range(k):\n train_folds = np.concatenate((np.arange(0, i), np.arange(i+1, k)))\n pos_train_indices = np.concatenate(positive_folds_indices[train_folds])\n neg_train_indices = np.concatenate(negative_folds_indices[train_folds])\n pos_test_indices = positive_folds_indices[i]\n neg_test_indices = negative_folds_indices[i]\n\n train_X.append(\n np.concatenate((X[pos_train_indices], X[neg_train_indices]))\n )\n train_y.append(\n np.concatenate((y[pos_train_indices], y[neg_train_indices]))\n )\n test_X.append(\n np.concatenate((X[pos_test_indices], X[neg_test_indices]))\n )\n test_y.append(\n np.concatenate((y[pos_test_indices], y[neg_test_indices]))\n )\n\n return zip(train_X, train_y, test_X, test_y)",
"def eigs(m, n, y, cov_fun, lw, loo, k, ylim, figsize, seed):\n # Setup covariance\n np.random.seed(seed)\n T = y * n\n\n names = ['true', 'sample', 'lw_oracle', 'isolw_oracle', 'kfold', 'isokfold',\n 'mv_oracle', 'isonlsq_mv_oracle', 'isonlsq_mv_kfold']\n if lw:\n names += ['lw']\n if loo:\n names += ['loo', 'isoloo']\n dfs = {\n name: pd.DataFrame(np.zeros((m, n)))\n for name in names\n }\n\n pbar = tqdm(total=m)\n for j in range(m):\n # Build Model\n if cov_fun in ['slr', 'factor']:\n fm_seed = np.random.randint(1, 2**32 - 1)\n Sigma, tmp = cov_functions[cov_fun](n, seed=fm_seed)\n else:\n Sigma, tmp = cov_functions[cov_fun](n)\n dfs['true'].iloc[j, :] = tau = annualize_vol(tmp / n)\n\n if ylim is None:\n ylim = (0., 2 * np.max(tau))\n\n # Generate data\n X = sample(Sigma, T)\n S = cov(X)\n lam, U = eig(S)\n\n # Note: eigenvalues need to be scaled by 1 / n to convert to variance\n # Sample covariance\n dfs['sample'].iloc[j, :] = annualize_vol(lam / n)\n\n # Oracle LW NLS shrinkage\n _, tmp = nls_oracle(X, S, U, Sigma)\n dfs['lw_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isolw_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n\n # LW NLS shrinkage\n if lw:\n S_lw = nlshrink_covariance(X, centered=True)\n tmp = eig(S_lw, return_eigenvectors=False)\n dfs['lw'].loc[j, :] = annualize_vol(tmp / n)\n\n # LOO LW NLS shrinkage\n if loo:\n _, tmp = nls_loo_cv(X, S, U)\n dfs['loo'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isoloo'].iloc[j, :] = annualize_vol(tmp / n)\n\n # K-fold LW NLS shrinkage\n _, tmp = nls_kfold_cv(X, S, U, k)\n dfs['kfold'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isokfold'].iloc[j, :] = annualize_vol(tmp / n)\n\n # MinVar NLS shrinkage\n _, tmp = minvar_nls_oracle(X, S, lam, U, Sigma)\n dfs['mv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n # Note: Applying isotonic regression after solving for the oracle values\n # is consistently way worse than solving the constrained LS problem so\n # it is omitted.\n # lam_1, lam_n = lam[0], lam[-1]\n # tmp = isotonic_regression(tmp, y_min=lam_n, y_max=lam_1)\n # dfs['isomv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n _, tmp = minvar_nls_oracle(X, S, lam, U, Sigma, isotonic=True)\n dfs['isonlsq_mv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n\n _, tmp = minvar_nls_kfold(X, S, lam, U, k)\n dfs['isonlsq_mv_kfold'].iloc[j, :] = annualize_vol(tmp / n)\n\n pbar.update()\n\n # Generate band plots for various shrinkage methods\n fig, (ax0, ax1, ax2) = plt.subplots(figsize=figsize, ncols=3)\n band_plot(dfs['true'], ax0, 'true')\n band_plot(dfs['true'], ax1, 'true')\n band_plot(dfs['true'], ax2, 'true')\n\n band_plot(dfs['sample'], ax0, 'sample')\n band_plot(dfs['sample'], ax1, 'sample')\n band_plot(dfs['sample'], ax2, 'sample')\n\n if lw:\n band_plot(dfs['lw'], ax1, 'lw')\n\n if loo:\n band_plot(dfs['loo'], ax0, 'loo')\n band_plot(dfs['isoloo'], ax1, 'isoloo')\n\n band_plot(dfs['kfold'], ax0, 'kfold')\n band_plot(dfs['isokfold'], ax1, 'isokfold')\n\n band_plot(dfs['mv_oracle'], ax0, 'mv_oracle')\n # band_plot(dfs['isomv_oracle'], ax1, 'isomv_oracle')\n band_plot(dfs['isonlsq_mv_oracle'], ax2, 'isonlsq_mv_oracle')\n band_plot(dfs['isonlsq_mv_kfold'], ax2, 'isonlsq_mv_kfold')\n\n ax0.legend()\n ax1.legend()\n ax2.legend()\n ax0.set_ylim(*ylim)\n ax1.set_ylim(*ylim)\n ax2.set_ylim(*ylim)\n\n plt.show()",
"def for_fun():\n k = 10\n total_draws = 35\n total_balls = 40\n n_experiments = 100\n old_result = None\n\n rand_color = randomcolor.RandomColor()\n fig = plt.figure(constrained_layout=False, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.set_facecolor((0.07, 0.07, 0.05))\n\n # for total_draws, color in zip([20, 25, 30], ['red', 'red', 'red']):\n # for total_draws, color in zip([20, 25, 30], ['purple', 'yellow', 'purple']): # mardi gras argyle\n # for total_draws, color in zip([5, 25, 27, 23, 40], ['purple', 'purple', 'blue', 'blue', 'purple']):\n for total_draws, color in zip([20, 3, 5, 10, 35], ['blue', 'red', 'blue', 'purple', 'blue']): # this one is good\n for _ in range(n_experiments):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:np.random.randint(total_draws - 3, total_draws + 3)]\n experiment_result = np.any(draw == k)\n experiment_results.append(experiment_result)\n if old_result:\n if np.random.uniform(0, 1) > 0.8:\n luminosity = None\n if color == 'green':\n luminosity = 'bright'\n if color == 'yellow':\n luminosity = 'dark'\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity=luminosity, count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.01, 0.2), 0.9])\n ax.fill_between(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n np.cumsum(old_result) / np.arange(1, num_samples + 1),\n alpha=alpha,\n color=tmp_rgb_color)\n if np.random.uniform(0, 1) > 0.95:\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity='dark', count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.1, 0.2), 0.9])\n linewidth = np.min([np.random.exponential(5.0), 0.9])\n ax.semilogx(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n alpha=alpha,\n linewidth=linewidth,\n c=tmp_rgb_color)\n old_result = experiment_results[:]\n\n plt.show()",
"def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error",
"def Naive_Byse_k_folds(train_p):\n all_ex, att = make_examples(copy.deepcopy(train_p))\n F2I = parseAttributes(train_p[0])\n k=5\n accuracy = 0\n data = dev_train_sep(k,data=all_ex)\n for i in range(k):\n dev = data[i]\n train =[]\n for j in range(k):\n if not j == i:\n train += data[j]\n naive_bayes = NaiveBayes(train, dev, attributes=att, F2I=F2I)\n acc= naive_bayes.naiveBayes()\n accuracy +=acc\n avg_acu = \"{0:.2f}\".format(accuracy / k)\n print(\"Naive Byse : \" + str(avg_acu))\n return avg_acu",
"def cross_validation(self):\r\n kfold = KFold(10, shuffle=True, random_state=1)\r\n data = self.read_data()\r\n # error from each kth iteration\r\n errors = []\r\n for train, test in kfold.split(data):\r\n\r\n #Splitting into test and training data\r\n X_test, Y_test = data[test][:, 1], data[test][:, 2]\r\n X_train, Y_train = data[train][:, 1], data[train][:, 2]\r\n\r\n #Training on the split data\r\n weights, design_matrix = self.train(X_train, Y_train)\r\n\r\n y_pred = self.make_prediction(X_test, weights)\r\n self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)\r\n\r\n #error matrix\r\n errors.append(np.mean(y_pred - Y_test) ** 2)\r\n\r\n #cross-validation parameter taken as mean of errors obtained from each iteration\r\n print(\"%0.10f mean with a standard deviation of %0.10f across the k-folds\" % (np.mean(errors), np.std(errors)))",
"def show_result_fold(fold_results, all_exp_setting, other_details):\n header = [\"\", \"MSE\", \"CRPS\", \"Interval Error\"]\n table = []\n\n all_time_step_loss = []\n all_crps_loss = []\n list_exp_setting = others.create_legacy_exp_setting(all_exp_setting)\n\n flatten_dataset = list(itertools.chain(*[\n exp_setting[\"task\"][\"dataset\"]\n for exp_setting in list_exp_setting \n ]))\n clus_num, output_name, method_name, is_show_cluster = other_details\n\n\n for i, fold_result in enumerate(fold_results):\n all_error_ind, all_error_intv = [], []\n all_error_crps = []\n for result in fold_result:\n loss_detail = result.loss_detail\n all_error_ind += loss_detail[\"time_step_error\"]\n all_error_intv += [\n loss for _, _, loss in loss_detail[\"intv_loss\"]\n ]\n all_error_crps.append(loss_detail[\"all_crps\"])\n\n dataset = flatten_dataset[i]\n task_prop = dataset[\"out_feat_tran_lag\"]\n metal = dataset.gen_name()\n\n cal_mean_std = lambda x: (\n np.mean(x), np.std(x)/np.sqrt(len(x))\n )\n\n time_step_mean, time_step_std = cal_mean_std(all_error_ind)\n all_time_step_loss.append((time_step_mean, time_step_std))\n\n intv_mean, intv_std = cal_mean_std(all_error_intv)\n\n crps_mean, crps_std = cal_mean_std(all_error_crps)\n all_crps_loss.append((crps_mean, crps_std))\n\n num_round = 7\n\n cluster_text = f\"(Cluster {clus_num[i]})\\n \" if is_show_cluster else \"\"\n\n table.append([\n f\"Task {i+1} \" + cluster_text + f\" Algorithm: {method_name[i]}\\n Commodity: {output_name[i]}\\n Lag: {task_prop[0]}\", \n f\"{time_step_mean:.{num_round}} ± {time_step_std:.{num_round}}\", \n f\"{crps_mean:.{num_round}} ± {crps_std:.{num_round}}\", \n f\"{intv_mean:.{num_round}} ± {intv_std:.{num_round}}\"\n ])\n\n print(tabulate(table, headers=header, tablefmt=\"grid\"))\n return all_time_step_loss, all_crps_loss",
"def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds",
"def test_k_rank_approximate(corpus):\n return",
"def train_KMean(data: np.array, labels: np.array, n_clusters: int)->None:\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n\n # Scale the data so that Euclidian distance makes sense\n means = np.mean(data, axis = 0)\n stddevs = np.std(data, axis = 0, ddof = 1)\n\n #print(means)\n #print(stddevs)\n\n data_scaled = np.zeros((n_examples, n_features))\n\n for i in range(n_features):\n data_scaled[:, i] = (data[:,i] - means[i]) / stddevs[i]\n\n study_correlation(data_scaled)\n\n # Initialize the centroids\n idx = np.random.randint(n_examples, size = n_clusters)\n centroids = data_scaled[idx, :]\n\n counter = 0\n\n while True:\n\n distances = np.array([[np.sqrt(np.sum(np.square(example-centroid))) for centroid in centroids] for example in data_scaled])\n centroid_idx = np.argmin(distances, axis = 1)\n old_centroids = centroids\n centroids = update_centroids(data_scaled, centroid_idx, n_examples)\n #displacement = get_displacement(old_centroids, centroids)\n displacement = np.linalg.norm(np.array([old - new for old, new in zip(old_centroids, centroids)]))\n\n #assert np.linalg.norm(np.array([old - new for old, new in zip([1, 2, 3, 4], [5, 6, 7, 8])])) == 8\n\n if counter == 0:\n# print(\"Initial displacement = {}\".format(displacement))\n initial_displacement = displacement\n\n counter += 1\n\n if displacement < (initial_displacement / 10000): break\n\n #print(\"Total number of loops before ending : {}\".format(counter))\n converted_predictions = convert_predictions(centroid_idx)\n accuracy = np.mean([p == l for p, l in zip(converted_predictions, labels)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass",
"def generate_statistics_for_recommends(mat,k=20):\r\n dict_userid_to_recommends = create_dict_user_id_to_recommends_from_mat(mat)\r\n dict_userid_to_moviesliked = load_or_create('/DICT/UserIdToLikedMovies.dict', create_dict_user_id_to_liked_items)\r\n dict_ecc = load_or_create('/DICT/MovieIdToItemEccentricity.dict', create_dict_ecc)\r\n user_to_ecc = load_or_create('/DICT/UserIdToUserEccentricity.dict',createDictUserIdToUserEccentricity)\r\n\r\n top_items_ecc_all=[]\r\n user_ecc=[]\r\n user_avg_rec_ecc=[]\r\n to_iter=[i for i in dict_userid_to_recommends]\r\n print(\"starting to calculate plot data...\")\r\n counter_ecc=0\r\n counter_none_ecc=0\r\n print(\"length dict:\",len(dict_userid_to_recommends))\r\n for user in tqdm(to_iter):\r\n\r\n #delete vals which user alreay liked\r\n list_recommends_not_liked_yet=[[i,j]for i,j in dict_userid_to_recommends[user] if i not in dict_userid_to_moviesliked[user]]\r\n list_recommends_not_liked_yet=sorted(list_recommends_not_liked_yet, key=itemgetter(1))\r\n #only take top k\r\n top_items=list_recommends_not_liked_yet[-20:]\r\n top_items_ecc=[dict_ecc[item] for item,val in top_items]\r\n #append ecc vals to plot list\r\n counter_ignored=0\r\n if len(top_items_ecc) > 0:\r\n user_ecc.append(user_to_ecc[user])\r\n if user_to_ecc[user]>0:\r\n counter_ecc+=1\r\n else:\r\n counter_none_ecc+=1\r\n user_avg_rec_ecc.append(mean(top_items_ecc))\r\n else:\r\n print('ignored')\r\n counter_ignored+=1\r\n for i in top_items_ecc:\r\n top_items_ecc_all.append(i)\r\n if user==0:\r\n print(50*\"THIS SHOULD NOT HAPPEN\\n\")\r\n regr = linear_model.LinearRegression()\r\n a=np.array(user_ecc).reshape((len(user_ecc),1))\r\n b=np.array(user_avg_rec_ecc)\r\n print(a.shape,b.shape)\r\n user_ecc_np=np.array(user_ecc).reshape((len(user_ecc),1))\r\n user_avg_rec_ecc_np=np.array(user_avg_rec_ecc)\r\n print(len(user_ecc_np),len(user_avg_rec_ecc_np))\r\n print(user_ecc_np.shape,user_avg_rec_ecc_np.shape)\r\n regr.fit(user_ecc_np, user_avg_rec_ecc_np)\r\n y_pred = regr.predict(user_ecc_np)\r\n print(y_pred[:],user_avg_rec_ecc[:10])\r\n print('Coefficients: \\n', regr.coef_)\r\n # The mean squared error\r\n print(\"Mean squared error: %.2f\"\r\n % mean_squared_error(user_ecc_np, y_pred))\r\n # Explained variance score: 1 is perfect prediction\r\n print('Variance score: %.2f' % r2_score(user_avg_rec_ecc_np, y_pred))\r\n print(\"Pearson relation:\",stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc)))\r\n # Plot outputs\r\n print('Starting to plot:')\r\n print(\"ecc users:\",counter_ecc)\r\n print(\"none ecc users:\",counter_none_ecc)\r\n print(\"ignored users:\",counter_ignored)\r\n #Now plot box plot of all ecc\r\n print(user_ecc_np.shape, y_pred.shape)\r\n plt.scatter(x=user_ecc,y=user_avg_rec_ecc,s=0.3)\r\n plt.text(-2.9, 1.3, \"Mean squared error: %.2f\"\r\n % mean_squared_error(user_avg_rec_ecc_np, y_pred),\r\n color='black', fontsize=12)\r\n plt.text(-2.9, 1.6, \"Correlation:\"+str(stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc))),\r\n color='black', fontsize=12)\r\n plt.plot(user_ecc_np.tolist(), y_pred.tolist(), color='red')\r\n\r\n plt.ylim([-3, +3])\r\n plt.xlim([-3, +3])\r\n plt.xlabel(\"User Eccentricity\")\r\n plt.ylabel(\"Avg. Item Eccentricity in top-20 recommendations\")\r\n plt.show()\r\n print('Overall avg ecc of users in box:',mean(user_ecc))\r\n plt.boxplot(top_items_ecc_all)\r\n plt.show()",
"def cross_validation_experiment(train_data, train_labels):\n accuracies = []\n for i in range(1, 200):\n avg = cross_validation(train_data, train_labels, i, 10)\n accuracies.append(avg)\n fig = plt.figure()\n dim = np.arange(1,len(accuracies)+1)\n plt.plot(dim,accuracies, label='Accuracy')\n plt.xlabel('k')\n plt.ylabel('accuracy')\n plt.grid()\n plt.legend()\n plt.tight_layout()\n fig.savefig('knn_cross_validation.png')\n best_k = np.argmax(accuracies)+1\n return best_k",
"def ks_permutation(df, test, depend, N=100):\n df_miss = df.assign(test_is_null=df[test].isnull()) # Convert to True/False\n \n gpA = df.loc[df_miss['test_is_null'], depend]\n gpB = df.loc[~df_miss['test_is_null'], depend]\n obs_ks, p_val = ks_2samp(gpA, gpB) # Get the ks observed value\n \n kslist = []\n for _ in range(N):\n\n # shuffle the dependent column\n shuffled_dep = (\n df_miss[depend]\n .sample(replace=False, frac=1)\n .reset_index(drop=True)\n )\n\n # \n shuffled = (\n df_miss\n .assign(**{'Shuffled ' + depend: shuffled_dep})\n )\n\n ks, _ = ks_2samp(\n shuffled.loc[shuffled['test_is_null'], 'Shuffled ' + depend],\n shuffled.loc[~shuffled['test_is_null'], 'Shuffled ' + depend]\n )\n\n # add it to the list of results\n kslist.append(ks)\n \n # pd.Series(kslist).plot(kind='hist', density=True, alpha=0.8)\n # plt.scatter(obs_ks, 0, color='red', s=40);\n \n # return np.min([np.count_nonzero(kslist >= obs_ks) / len(kslist), np.count_nonzero(kslist <= obs_ks) / len(kslist)])\n return np.count_nonzero(np.array(kslist) >= obs_ks) / len(kslist) #, np.count_nonzero(kslist <= obs_ks) / len(kslist)])",
"def cv_multiclass_fold(Y,num_fold=10):\n\t\n (K,N) = Y.shape\n indices = dict()\n Nk = dict()\n for k in range(K):\n # select indices belonging to class k\n indices[k] = list((Y[k,:]==1).nonzero()[0])\n rand.shuffle(indices[k])\n Nk[k] = len(indices[k])/num_fold\n\t\n index_list = []\n\n for k in range(K):\n for i in range(num_fold-1):\n # split class-k indices into num_fold random sets\n try:\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n except IndexError:\n index_list.append([])\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n try:\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n except IndexError:\n index_list.append([])\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n\n return index_list",
"def error_profile(y_true: Dict[str, List[np.ndarray]], y_pred: Dict[str, List[np.ndarray]], model_type: str) -> None:\r\n num_folds = len(y_pred[\"train\"])\r\n\r\n acc = {\"train\": [], \"test\": []}\r\n test_predictions = np.array([])\r\n test_labels = np.array([])\r\n\r\n for k in range(num_folds):\r\n y_train_true = y_true[\"train\"][k]\r\n y_train_pred = y_pred[\"train\"][k]\r\n y_test_true = y_true[\"test\"][k]\r\n y_test_pred = y_pred[\"test\"][k]\r\n\r\n # Accuracies\r\n train_acc = np.sum(np.equal(y_train_true, y_train_pred)) / np.shape(y_train_true)[0]\r\n test_acc = np.sum(np.equal(y_test_true, y_test_pred)) / np.shape(y_test_true)[0]\r\n acc[\"train\"].append(train_acc)\r\n acc[\"test\"].append(test_acc)\r\n\r\n test_labels = np.concatenate((test_labels, y_test_true))\r\n test_predictions = np.concatenate((test_predictions, y_test_pred))\r\n\r\n pd.DataFrame(acc).plot().set_title(\"Accuracies for \" + model_type)\r\n plt.xlabel(\"Cross validation fold\")\r\n plt.ylabel(\"Accuracy (max = 1)\")\r\n plt.xticks(list(range(num_folds)))\r\n plt.tight_layout()\r\n plt.savefig(\"Figures/\" + model_type + \"_acc\")\r\n\r\n classes = np.unique(test_labels)\r\n\r\n # Confusion matrix\r\n # we only care for the confusion matrix of the testing set\r\n conf_mat = confusion_matrix(test_labels, test_predictions)\r\n fig, ax = plt.subplots(1, 2, sharey=\"all\", figsize=(16, 9))\r\n sn.heatmap(\r\n conf_mat,\r\n cmap=\"Oranges\",\r\n annot=True,\r\n xticklabels=classes,\r\n yticklabels=classes,\r\n ax=ax[0],\r\n )\r\n ax[0].set_title(\"Confusion matrix\")\r\n conf_mat2 = np.array(conf_mat)\r\n np.fill_diagonal(conf_mat2, -1)\r\n sn.heatmap(\r\n conf_mat2,\r\n cmap=\"Oranges\",\r\n annot=True,\r\n xticklabels=classes,\r\n yticklabels=classes,\r\n ax=ax[1],\r\n )\r\n ax[1].set_title(\"Confusion matrix (ignoring diagonal)\")\r\n fig.suptitle(\"Confusion matrices for \" + model_type)\r\n plt.savefig(\"Figures/\" + model_type + \"cfx_mat\")\r\n\r\n # Evaluate metrics for each class\r\n metrics = {}\r\n total = np.sum(conf_mat)\r\n for class_num in range(np.shape(conf_mat)[0]):\r\n class_metrics = {}\r\n tp = conf_mat[class_num, class_num]\r\n fn = np.sum(conf_mat[class_num, :]) - tp\r\n fp = np.sum(conf_mat[:, class_num]) - tp\r\n tn = total - tp - fn - fp\r\n\r\n class_metrics[\"sens\"] = tp / (tp + fn) # specificity (recall)\r\n class_metrics[\"spes\"] = tn / (tn + fp) # sensitivity\r\n class_metrics[\"ppv\"] = tp / (tp + fp) # positive predictive value (precision)\r\n class_metrics[\"npv\"] = tn / (tn + fn) # negative predictive value\r\n class_metrics[\"F1\"] = (2 * tp) / (2 * tp + fn + fp) # F1 score\r\n class_metrics[\"auc\"] = roc_auc_score( # Area under ROC\r\n test_labels == classes[class_num], test_predictions == classes[class_num]\r\n )\r\n\r\n metrics[classes[class_num]] = class_metrics\r\n\r\n print(\"-\" * 100)\r\n print(\"## Error profile for \" + model_type)\r\n print(\"Cross validated accuracy = {}%\".format(np.mean(acc[\"test\"]) * 100))\r\n print(pd.DataFrame(metrics).to_markdown())\r\n print(\"-\" * 100)",
"def cross_validation(self, k=5):\n test_errors, train_errors = [], []\n\n # For leave out cross validation k = n\n if k == -1:\n k = len(data)\n\n for _ in range(k):\n shuffled_data = data.copy()\n\n # Do not shuffle data for leave one out cross validation\n if k != -1:\n # Copy the original data and shuffle it\n np.random.shuffle(shuffled_data)\n\n # Divide it into k folds\n split = int(((k - 1) / k) * len(data))\n train_data = shuffled_data[:split]\n test_data = shuffled_data[split:]\n\n # Find train_X, train_Y, test_X, test_Y\n # KNN requires access to training data to determine error\n self.train_X = np.asarray(train_data[:, :6])\n self.train_Y = np.asarray(train_data[:, 7])\n\n test_X = np.asarray(test_data[:, :6])\n test_Y = np.asarray(test_data[:, 7])\n\n # Use (k-1) part for training and 1 part for testing\n self.fit(self.train_X, self.train_Y)\n test_error = self.compute_error(test_X, test_Y)\n train_error = self.compute_error(self.train_X, self.train_Y)\n test_errors.append(test_error)\n train_errors.append(train_error)\n\n # Average the error\n avg_train_error = np.round(np.average(np.asarray(train_errors), axis=0), 3)\n avg_test_error = np.round(np.average(np.asarray(test_errors), axis=0), 3)\n print(\"The average error of {} is - Train : {}\\tTest : {} Overfit : {}\".format(self.method,\n avg_train_error, avg_test_error,\n avg_test_error > avg_train_error))\n return avg_test_error, avg_train_error",
"def KMeansClustering(VSA, A, DuplicateSectionsArray, labels,K=None):\r\n Q=labels.max()\r\n NewLabelsInOne=[0]*len(DuplicateSectionsArray)\r\n centroids=[]\r\n AllSeperatedLabels=[]\r\n for o in range(len(DuplicateSectionsArray)):\r\n NewLabelsList=[]\r\n p = DuplicateSectionsArray[o] \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n plt.title(f'Problematic Section {p}')\r\n if K==None:\r\n K=round(A[p-1]/mean(VSA)) #p is the number of the section, but has the index p-1 (python starts at 0)\r\n print(A[p-1]/mean(VSA))\r\n print(K)\r\n y,x=np.where(labels==p)\r\n Coord=np.array([x,y]).T\r\n n_clusters=int(K)\r\n kmeans=KMeans(n_clusters)\r\n kmeans=kmeans.fit(Coord)\r\n kmeans_labels = kmeans.predict(Coord)\r\n centroids=kmeans.cluster_centers_\r\n \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n plt.plot(centroids[:,0],centroids[:,1],'r+', mew=2)\r\n plt.title(f'Problematic Section {p} with K-means clustering centroids displayed')\r\n \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n seperatedlabels=[]\r\n for n in range(max(kmeans_labels)+1):\r\n validlabelx=[]\r\n validlabely=[]\r\n newlabel=np.zeros(np.shape(labels))\r\n for m in range(len(x)):\r\n if kmeans_labels[m]==n:\r\n validlabelx.append(x[m])\r\n validlabely.append(y[m])\r\n newlabel[y[m]][x[m]]=n+1+Q\r\n seperatedlabels.append(newlabel)\r\n plt.plot(validlabelx,validlabely)\r\n plt.title(f'Proposed seperation of problematic section {p}')\r\n AllSeperatedLabels.append(seperatedlabels)\r\n NewLabelsList=NewLabelsList+seperatedlabels\r\n NewLabelsInOne[o]=sum(NewLabelsList).astype(np.int32)\r\n Q=NewLabelsInOne[o].max()\r\n print(Q)\r\n \r\n #skimage slic for k means clustering\r\n return (AllSeperatedLabels,NewLabelsInOne,K)",
"def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)",
"def run_kohonen(data, size_k: int=6, sigma: float=2.0, eta: int=0.9, \n tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #convergence criteria\n eps = 1E-6\n eps_2 = 0.1\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n \n print('start iteration')\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n som_step(centers, data[int(i),:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n \n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n \n print('Total iteration : {}'.format(t))\n return centers, error[1:]"
] | [
"0.642672",
"0.62238246",
"0.606139",
"0.5966277",
"0.5918029",
"0.58358914",
"0.5790589",
"0.5751784",
"0.5710527",
"0.57082236",
"0.56991595",
"0.56929916",
"0.5687621",
"0.56728876",
"0.5656668",
"0.56505597",
"0.5638749",
"0.56250715",
"0.56044215",
"0.5599866",
"0.5587948",
"0.5573161",
"0.5560366",
"0.5550217",
"0.5548502",
"0.55466056",
"0.55085224",
"0.549911",
"0.54726475",
"0.546965"
] | 0.6231291 | 1 |
Finds the best lambda fit for the polynomial orders in p for the training data using kfold and makes multiple plots. Very slow for high n and m. | def best_fit(x, y, z, z_real, p = list(range(3, 15)), folds = 4, train = 0.7, seed = 42, n_lambda = 2001, n = 1, m = 1):
lambdas = np.array([0] + np.logspace(-5.5, -1, n_lambda).tolist())
polynomials = np.array(p)
X, Y = np.meshgrid(lambdas, polynomials)
MSE = np.zeros(np.shape(X))
lambda_min_ridge = np.zeros(len(polynomials))
lambda_min_lasso = np.zeros(len(polynomials))
R2 = np.zeros((3, len(polynomials)))
MSE = np.zeros((3, len(polynomials)))
R2_data = np.zeros((3, len(polynomials)))
MSE_data = np.zeros((3, len(polynomials)))
for i in range(len(polynomials)):
print(i + polynomials[0])
ridge_sum = 0
lasso_sum = 0
model = regression(x, y, z, split = True, train = train, seed = seed, k = polynomials[i])
z_test = np.ravel(np.copy(model.z_test))
for j in range(n): #The mean of n times
ridge_sum += model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True, n_lambda = n_lambda)[0]
for j in range(m): #The mean of m times
lasso_sum += model.lambda_best_fit(method = 'Lasso', fold = folds, n_lambda = n_lambda)[0]
lambda_min_ridge[i] = ridge_sum/n
lambda_min_lasso[i] = lasso_sum/m
_,_, a, z_real_test = model.train_test(X = model.X_full, z = z_real, train = 0.7, seed = seed) #Both the training set and the test set for z_real in that order in list/tuple
Beta_ols = model.OLS()
Beta_ridge = model.Ridge(lam = lambda_min_ridge[i])
Beta_lasso = model.Lasso(lam = lambda_min_lasso[i], max_iter = 1001)
z_tilde_OLS = model.z_tilde(Beta_ols, X = model.X_test)
z_tilde_Ridge = model.z_tilde(Beta_ridge, X = model.X_test)
z_tilde_Lasso = model.z_tilde(Beta_lasso, X = model.X_test)
R2[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_real_test)
R2[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_real_test)
R2[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_real_test)
MSE[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_real_test)
MSE[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_real_test)
MSE[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_real_test)
R2_data[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_test)
R2_data[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_test)
R2_data[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_test)
MSE_data[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_test)
MSE_data[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_test)
MSE_data[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_test)
_, _, lambdas = model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True)
min_MSE = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]
min_R2 = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]
print('Minimum MSE with Frank, OLS: ', np.min(MSE[0]), ' Ridge: ', np.min(MSE[1]), ' Lasso: ', np.min(MSE[2]))
print('With polynoms: ', np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0])
print('----------------------------------------------------------------------------------------------')
print('Minimum MSE with Data, OLS: ', np.min(MSE_data[0]), ' Ridge: ', np.min(MSE_data[1]), ' Lasso: ', np.min(MSE_data[2]))
print('With polynoms: ', np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0])
print('----------------------------------------------------------------------------------------------')
print('Maximum R2 with Frank, OLS: ', np.max(R2[0]), ' Ridge: ', np.max(R2[1]), ' Lasso: ', np.max(R2[2]))
print('With polynoms: ', np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0])
print('----------------------------------------------------------------------------------------------')
print('Maximum R2 with Frank, OLS: ', np.max(R2_data[0]), ' Ridge: ', np.max(R2_data[1]), ' Lasso: ', np.max(R2_data[2]))
print('With polynoms: ', np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0])
print('----------------------------------------------------------------------------------------------')
error_mins = np.array([[np.min(MSE[0]), np.min(MSE[1]), np.min(MSE[2])],
[np.min(MSE_data[0]), np.min(MSE_data[1]), np.min(MSE_data[2])],
[np.max(R2[0]), np.max(R2[1]) , np.max(R2[2])],
[np.max(R2_data[0]), np.max(R2_data[1]), np.max(R2_data[2])],
[np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0]],
[np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0]],
[np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0]],
[np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0]]]).T
text = ['MSE Franke', 'MSE Data','R\(^2\) Franke', 'R\(^2\) Data']
print(latex_print(error_mins, text = text))
print('Ridge lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))
print('Ridge lambda, lowest indexes for Data: ', np.argmin(MSE_data[2]))
print(lambda_min_ridge)
print('Lasso lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))
print('Lasso lambda, lowest indexes for Data: ', np.argmin(R2_MSE[2]))
print(lambda_min_lasso)
#Real Franke
plt.plot(polynomials, R2[0], 'go--', label = 'OLS', color = 'red')
plt.plot(polynomials, R2[1], 'go--', label = 'Ridge', color = 'blue')
plt.plot(polynomials, R2[2], 'go--', label = 'Lasso', color = 'green')
plt.title('R2 error between the model and FrankeFunction', fontsize = 14)
plt.ylabel('R2')
plt.xlabel('Polynomial degree')
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'ridge_lasso_high_order_poly.png')
plt.show()
plt.plot(polynomials, MSE[0], 'go--', label = 'OLS', color = 'red')
plt.plot(polynomials, MSE[1], 'go--', label = 'Ridge', color = 'blue')
plt.plot(polynomials, MSE[2], 'go--', label = 'Lasso', color = 'green')
plt.title('MSE for test data between the model and FrankeFunction', fontsize = 14)
plt.ylabel('MSE')
plt.xlabel('Polynomial degree')
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE.png')
plt.show()
#Noise Franke
plt.plot(polynomials, R2_data[0], 'go--', label = 'OLS', color = 'red')
plt.plot(polynomials, R2_data[1], 'go--', label = 'Ridge', color = 'blue')
plt.plot(polynomials, R2_data[2], 'go--', label = 'Lasso', color = 'green')
plt.title('R2 error between the model and data', fontsize = 14)
plt.ylabel('R2')
plt.xlabel('Polynomial degree')
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'ridge_lasso_high_order_poly_data.png')
plt.show()
plt.plot(polynomials, MSE_data[0], 'go--', label = 'OLS', color = 'red')
plt.plot(polynomials, MSE_data[1], 'go--', label = 'Ridge', color = 'blue')
plt.plot(polynomials, MSE_data[2], 'go--', label = 'Lasso', color = 'green')
plt.title('MSE for test data between the model and data', fontsize = 14)
plt.ylabel('MSE')
plt.xlabel('Polynomial degree')
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE_data.png')
plt.show()
#Polynomial and lambda
plt.plot(polynomials, lambda_min_ridge, 'go--', label = 'Ridge', color = 'blue')
plt.plot(polynomials, lambda_min_lasso, 'go--', label = 'Lasso', color = 'green')
plt.title('The \'best\' lambda pr polynomial')
plt.ylabel('Lambda')
plt.xlabel('Polynomial degree')
plt.legend()
plt.tight_layout()
plt.savefig(results_dir + 'ridge_lasso_lambda_poly.png')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min",
"def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))",
"def Naive_Byse_k_folds(train_p):\n all_ex, att = make_examples(copy.deepcopy(train_p))\n F2I = parseAttributes(train_p[0])\n k=5\n accuracy = 0\n data = dev_train_sep(k,data=all_ex)\n for i in range(k):\n dev = data[i]\n train =[]\n for j in range(k):\n if not j == i:\n train += data[j]\n naive_bayes = NaiveBayes(train, dev, attributes=att, F2I=F2I)\n acc= naive_bayes.naiveBayes()\n accuracy +=acc\n avg_acu = \"{0:.2f}\".format(accuracy / k)\n print(\"Naive Byse : \" + str(avg_acu))\n return avg_acu",
"def run():\n \n #0. DEFINE PARAMETERS FOR OUR RUN\n seed = 1\n \n #not possible yet to run polynomial degrees at the same time.\n degrees = np.array([[10,11],[10,11,12],[14,15],[12,13]])\n k_fold = 4\n lambdas = [np.logspace(-2,2,50),np.logspace(-2,2,50),np.logspace(-2,2,50),np.logspace(-2,2,50)]\n\n export_file=\"test_split_data_param_5\"\n #1. LOAD THE DATA\n print('LOADING THE DATA: ',end=\" \")\n DATA_TRAIN_PATH = '../data/train.csv' # TODO: download train data and supply path here \n y, tX, ids = load_csv_data(DATA_TRAIN_PATH)\n print('DONE')\n \n #2. SPLITTING THE DATA\n \n print('SPLITTING THE DATA: ',end=\" \") \n degree_split = list()\n weight_split = list()\n error_split = list()\n lambda_split = list()\n median_split = list()\n mean_split = list()\n std_split = list()\n \n y_split,tx_split,id_split = split_dataset(y,tX,ids)\n print('DONE') \n #3. RUN CROSS VALIDATION TO GET BEST LAMBDA\n \n for split,(y_s,tx_s,id_s) in enumerate(zip(y_split,tx_split,id_split)):\n #To make sure they are arrays of the correct dimension\n y_s = np.squeeze(y_s)\n tx_s = np.squeeze(tx_s)\n print('\\n\\tCROSS VALIDATION FOR SPLIT NUMBER',split)\n #Perform cross validation and save best output\n best_degree, best_lambda_, best_error = cross_validation(y_s,tx_s,degrees[split],lambdas[split],k_fold,seed,split)\n degree_split.append(best_degree)\n lambda_split.append(best_lambda_) \n error_split.append(best_error)\n \n #4. TRAIN THE MODELS\n #Let us now clean the input\n tx_s = count_NaN(tx_s)\n tx_s,median_tr = sanitize_NaN(tx_s)\n tx_s,mean_tr,std_tr = standardize(tx_s)\n tx_s = build_poly(tx_s,best_degree)\n print('Size of the vectors',y_s.shape,tx_s.shape)\n weights = ridge_regression(y_s, tx_s, best_lambda_)\n \n #Save the calculation of the weights,median,mean,std for each model\n weight_split.append(weights)\n median_split.append(median_tr)\n mean_split.append(mean_tr)\n std_split.append(std_tr)\n \n print('Degrees',degree_split)\n print('Lambdas',lambda_split)\n print('Errors',error_split)\n #5. TEST THE MODEL AND EXPORT THE RESULTS\n prediction_data(median_split,mean_split,std_split,degree_split,weight_split,export_file)",
"def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()",
"def estimatewithMBAR(u_kln, N_k, reltol, regular_estimate=False):\n\n def plotOverlapMatrix(O):\n \"\"\"Plots the probability of observing a sample from state i (row) in state j (column).\n For convenience, the neigboring state cells are fringed in bold.\"\"\"\n max_prob = O.max()\n fig = pl.figure(figsize=(K/2.,K/2.))\n fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n\n for i in range(K):\n if i!=0:\n pl.axvline(x=i, ls='-', lw=0.5, color='k', alpha=0.25)\n pl.axhline(y=i, ls='-', lw=0.5, color='k', alpha=0.25)\n for j in range(K):\n if O[j,i] < 0.005:\n ii = ''\n elif O[j,i] > 0.995:\n ii = '1.00'\n else:\n ii = (\"%.2f\" % O[j,i])[1:]\n alf = O[j,i]/max_prob\n pl.fill_between([i,i+1], [K-j,K-j], [K-(j+1),K-(j+1)], color='k', alpha=alf)\n pl.annotate(ii, xy=(i,j), xytext=(i+0.5,K-(j+0.5)), size=8, textcoords='data', va='center', ha='center', color=('k' if alf < 0.5 else 'w'))\n\n if P.bSkipLambdaIndex:\n ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]\n ks = numpy.delete(numpy.arange(K+len(ks)), ks)\n else:\n ks = range(K)\n for i in range(K):\n pl.annotate(ks[i], xy=(i+0.5, 1), xytext=(i+0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')\n pl.annotate(ks[i], xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K-(i+0.5)), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')\n pl.annotate('$\\lambda$', xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')\n pl.plot([0,K], [0,0], 'k-', lw=4.0, solid_capstyle='butt')\n pl.plot([K,K], [0,K], 'k-', lw=4.0, solid_capstyle='butt')\n pl.plot([0,0], [0,K], 'k-', lw=2.0, solid_capstyle='butt')\n pl.plot([0,K], [K,K], 'k-', lw=2.0, solid_capstyle='butt')\n\n cx = sorted(2*range(K+1))\n cy = sorted(2*range(K+1), reverse=True)\n pl.plot(cx[2:-1], cy[1:-2], 'k-', lw=2.0)\n pl.plot(numpy.array(cx[2:-3])+1, cy[1:-4], 'k-', lw=2.0)\n pl.plot(cx[1:-2], numpy.array(cy[:-3])-1, 'k-', lw=2.0)\n pl.plot(cx[1:-4], numpy.array(cy[:-5])-2, 'k-', lw=2.0)\n\n pl.xlim(-1, K)\n pl.ylim(0, K+1)\n pl.savefig(os.path.join(P.output_directory, 'O_MBAR.pdf'), bbox_inches='tight', pad_inches=0.0)\n pl.close(fig)\n return\n\n if regular_estimate:\n print \"\\nEstimating the free energy change with MBAR...\"\n MBAR = pymbar.mbar.MBAR(u_kln, N_k, verbose = P.verbose, relative_tolerance = reltol, initialize = P.init_with)\n # Get matrix of dimensionless free energy differences and uncertainty estimate.\n (Deltaf_ij, dDeltaf_ij, theta_ij ) = MBAR.getFreeEnergyDifferences(uncertainty_method='svd-ew', return_theta = True)\n if P.verbose:\n print \"Matrix of free energy differences\\nDeltaf_ij:\\n%s\\ndDeltaf_ij:\\n%s\" % (Deltaf_ij, dDeltaf_ij)\n if regular_estimate:\n if P.overlap:\n print \"The overlap matrix is...\"\n O = MBAR.computeOverlap()[2]\n for k in range(K):\n line = ''\n for l in range(K):\n line += ' %5.2f ' % O[k, l]\n print line\n plotOverlapMatrix(O)\n print \"\\nFor a nicer figure look at 'O_MBAR.pdf'\"\n return (Deltaf_ij, dDeltaf_ij)\n return (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)",
"def degree_average(n0,l,nt,m,display=False):\t\n\n# initialise variables\n\n P = np.zeros((n0+nt,m))\n Pave = np.zeros((n0+nt,m))\n PD = np.zeros((n0+nt,m))\n \n# call stats to assing qnetm\n \n qnetm, _, _ = ns.stats(n0,l,nt,m)\n\n# create Pave \n# extract the unique list of qnet values from n=1->m realizations \n# take the average degree over the n realizations \n\n for n in range(m):\n P,Q = np.unique(qnetm[:,n],return_counts=True)\n for k in range(len(P)):\n PD[P[k],n] = Q[k]\n\n# normalize Pave and remove zeros\n \n Pave = np.mean(PD, axis=1)/(n0+nt)\n Pave = Pave[Pave>0]\n \n# declare our domain of 1->Pave realizations\n\n x = np.arange(1,np.size(Pave)+1)\n\n# seek to solve for k and a satisfying Pave = a*x**(-b)\n# reduce problem to log(Pave) = c - k*log(x) (c = log(a), and flip sgn(b) for now)\n\n b,c = np.polyfit(np.log(x), np.log(Pave), 1)\n\n# create log-log plot for when display is true\n\n if display:\n plt.figure()\n plt.plot(np.log(x), np.log(Pave), 'b')\n plt.plot(np.log(x), c + b*np.log(x), 'r')\n plt.xlabel('log(x) x=1->size(Pave)')\n plt.ylabel('log(Pave)')\n plt.title('log-log plot of x against Pave with power law fit')\n plt.legend(loc='best')\n plt.show()\n\n return -b",
"def plotBestFitOfAllData(x_samples, y_samples, x_poly, y_poly, order, plotFlag= True):\n train(x_samples, y_samples, x_poly, y_poly, order, plotFlag= True) \n plt.title(\"Polynomial function regression\")\n plt.grid()\n plt.plot(x_poly, y_poly, c='black', label='true function')\n plt.scatter(x_samples, y_samples, s=20, c='green', label='sample')\n plt.legend()\n plt.show()",
"def calPFP(n, m, k):\n return pow(1-math.exp(-k*(n+0.5)/(m-1)), k)",
"def MSE_plots(n_min, n_max, save_fig, k = [5], method = 'OLS', lamb = 1, split = False, train = 0.7, N = 1, method2 = 'OLS'):\n n = np.linspace(n_min, n_max, n_max - n_min + 1)\n errors = np.zeros((4, len(k), len(n))) # First index MSE for real FrankeFunction, MSE for the data, R2 for the real FrankeFunction, R2 for the data\n #Second index is the max order of polynomial, third index is for the n-value\n if type(k) != type([2]):\n k = [k]\n\n for j in range(N):\n #print(j)\n for i in range(len(n)):\n #print(i)\n x = np.random.uniform(0, 1, size = int(n[i]))\n y = np.random.uniform(0, 1, size = int(n[i]))\n x, y = np.meshgrid(x, y)\n\n z = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n z_real = FrankeFunction(x, y)\n\n for poly in range(len(k)):\n a = regression(x, y, z, k = k[poly], split = split, train = train)\n\n if method == 'OLS':\n beta = a.OLS()\n elif method == 'Ridge':\n beta = a.Ridge(lam = lamb)\n elif method == 'Lasso':\n beta = a.Lasso(alpha = lamb)\n elif method == 'K-fold':\n beta = a.k_cross(fold = 25, method2 = method2, lam = lamb)[0]\n\n if split == True:\n X = a.design_matrix(k = k[poly])\n X_train, X_test, z_real_train, z_real_test = a.train_test(X = X, z = z_real, train = train)\n z_tilde = a.z_tilde(X = X_test, beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real_test)\n errors[1, poly, i] += a.MSE(z_tilde, a.z_test)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real_test)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = a.z_test)\n else:\n z_tilde = a.z_tilde(beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real)\n errors[1, poly, i] += a.MSE(z_tilde, z)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = z)\n\n n_mid = int(len(n)/2)\n title = ['MSE FrankeFunction', 'MSE data', 'R2 FrankeFunction', 'R2 data']\n y_label = ['MSE', 'MSE', 'R^2', 'R^2']\n errors /= N\n save_name = ['franke', 'data', 'franke', 'data']\n\n if method == 'Ridge':\n method += ' with lambda = ' + str(lamb)\n if method == 'K-fold':\n method += ' using ' + method2\n if method2 == 'Ridge' or method2 == 'Lasso':\n method += ' with lambda = ' + str(lamb)\n\n for i in range(4):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 7))\n for j in range(len(k)):\n ax1.plot(n[:n_mid], errors[i, j, :n_mid], label = 'k = ' + str(k[j]))\n ax2.plot(n[n_mid:], errors[i, j, n_mid:], label = 'k = ' + str(k[j]))\n\n ax1.set_ylabel(y_label[i]); ax2.set_ylabel(y_label[i])\n ax1.set_xlabel('n'); ax2.set_xlabel('n')\n\n if split == True:\n fig.suptitle(title[i] + ' with ' + str(method) + ' with test/training split at ' + str(train) + ' and mean of ' + str(N) + ' runs.')\n else:\n fig.suptitle(title[i] + ' with ' + str(method) + ' without test/training split' + ' and mean of ' + str(N) + ' runs.')\n\n ax1.legend(); ax2.legend()\n #fig.savefig(results_dir + save_fig + method + save_name[i] + y_label[i] + '.png')\n plt.show()",
"def get_fitted_k(col_p, ref_p, ias, ps, no_cloud_mask, k):\n\n from scipy.optimize import curve_fit\n\n # fitting function\n def func(x, a, b):\n return x[0, :]/x[1, :]-k-(a*(1.0/x[2, :])+b*np.log10(x[3, :]))\n\n ix = np.where(no_cloud_mask == 1)[0]\n xdata = np.vstack([np.array(col_p)[ix, :].ravel(),\n np.array(ref_p)[ix, :].ravel(),\n np.array(ias)[ix, :].ravel(),\n np.array(ps)[ix, :].ravel()])\n popt, pcov = curve_fit(func, xdata, xdata[0, :]*0.0)\n return (k+(popt[0]*(1.0/ias)+popt[1]*np.log10(ps)), popt)",
"def brute_leastsquare_fit(fun, x_data, y_data,weight_data=None,p_names=None,p_min_max_steps_dict=None,\r\n const_params=[], visualize=False):\r\n \r\n if p_names == None or p_min_max_steps_dict==None:\r\n raise Exception ('p_names and p_min_max_steps must be given!'+ \r\n 'structure of p_min_max_steps_dict: {\"pname0\":[min0,max0,brute_steps0]}')\r\n \r\n params = Parameters() ### initialize LMfit parameters\r\n for p_name in p_names:\r\n min_val=p_min_max_steps_dict[p_name][0]\r\n max_val=p_min_max_steps_dict[p_name][1]\r\n steps=p_min_max_steps_dict[p_name][2]\r\n params.add(p_name,value=min_val,\r\n min=min_val,\r\n max=max_val,\r\n brute_step=(max_val-min_val)/(steps-1))\r\n \r\n ### define function to be minimized for fit \r\n \r\n def cost_function_fit(p=params):\r\n def minimize_fun(pars):\r\n \r\n v=pars.valuesdict()\r\n arglist=[]\r\n for p_name in p_names:\r\n arglist.append(v[p_name])\r\n \r\n for const_param in const_params:\r\n arglist.append(const_param)\r\n \r\n ret=np.array((fun(x_data,*arglist)-y_data),dtype=float)\r\n if weight_data is not None:\r\n ret=ret*np.sqrt(weight_data)\r\n return(ret)\r\n brute_result=lmfit.minimize(minimize_fun,params,method='brute',nan_policy='omit')\r\n best_result=copy.deepcopy(brute_result)\r\n for candidate in brute_result.candidates[0:5]:\r\n trial = lmfit.minimize(minimize_fun, params=candidate.params,method='leastsq',nan_policy='omit')\r\n if trial.chisqr < best_result.chisqr:\r\n best_result = trial\r\n \r\n return((best_result,brute_result))\r\n \r\n best_result,brute_result = cost_function_fit()\r\n arg_list=[]\r\n for p_name in p_names:\r\n arg_list.append(best_result.params.valuesdict()[p_name])\r\n for const_param in const_params:\r\n arg_list.append(const_param)\r\n \r\n \r\n if visualize == True:\r\n plot_brute_leastsquares_results(brute_result,leastsq_fit_result=best_result)\r\n plt.figure()\r\n plt.plot(x_data,y_data,label='data',color='blue')\r\n plt.plot(x_data,fun(x_data,*arg_list),label='Fit',color='red')\r\n plt.title(best_result.params.valuesdict())\r\n plt.show()\r\n return (arg_list[0:len(p_names)])",
"def trainNN():\n\n yTrain = [] # holds y vals of curves/lines\n trainLabels = [] # holds center labels\n\n tryCenters = np.linspace(1, 9, 45)\n\n for i in range(len(tryCenters)):\n x = np.linspace(tryCenters[i]-.2, tryCenters[i]+.2, 18)\n for j in range(1000):\n centers = round(random.uniform(tryCenters[i]-.05,\n tryCenters[i]+.05), 1)\n y = gauss_func(x, .05, centers, 1)\n yTrain.append(y)\n trainLabels.append(1)\n\n y = gauss_func(x, .05,\n round(random.uniform(tryCenters[i]-.3,\n tryCenters[i]-.17), 1), 1)\n yTrain.append(y)\n trainLabels.append(0)\n\n y = gauss_func(x, .05,\n round(random.uniform(tryCenters[i]+.17,\n tryCenters[i]+.3), 1), 1)\n yTrain.append(y)\n trainLabels.append(0)\n\n y = 0*x\n yTrain.append(y)\n trainLabels.append(0)\n clf = MLPClassifier(solver='lbfgs')\n clf.fit(yTrain, trainLabels)\n return clf",
"def predict(x):\n file_train = open('trains.pkl', \"rb\")\n train = pkl.load(file_train)\n y = []\n k = 5\n x_train = train[0]\n y_train = train[1]\n for q in range(100):\n distance = []\n for i in range(800):\n distance.append(np.linalg.norm(x[q] - x_train[i]))\n\n # distance.append(np.sqrt(sum((x[q] - x_train[i]) ** 2)))\n # u = (x[0] - x_train) ** 2\n # print(distance)\n # distance = np.sqrt([sum(b) for b in u])\n # print(distance)\n minarg = np.argsort(distance)\n i = np.array(np.zeros(10))\n j = 0\n while k not in i:\n i[y_train[minarg[j]]] += 1\n j += 1\n y.append(np.argmax(i))\n return y",
"def fig_2_11V2(x, y, z, first_poly = 4, complexity = 10, N = 7, method = 'OLS', seed = 42, lam = 0, folds = 5, save_fig = ''):\n errors = np.zeros((4, complexity + 1))\n bias = np.zeros(complexity + 1)\n variance = np.zeros(complexity + 1)\n z_real = FrankeFunction(x, y)\n\n complx = np.arange(first_poly, first_poly + complexity + 1, 1)\n\n MSE = np.zeros(complexity + 1)\n\n for i in range(complexity + 1):\n print(i)\n model = regression(x, y, z, k = first_poly + i, split = False, seed = seed)\n\n for j in range(N):\n _, MSE_R2D2, _, _, _, _ = model.k_cross(fold = folds, method2 = method, lam = lam, random_num = True)\n errors[:, i] += np.mean(MSE_R2D2, axis = 0)\n\n errors /= N\n\n print(errors)\n\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.plot(complx, errors[0], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[2], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])\n plt.legend()\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.savefig(results_dir + 'tradeoff2MSE' + method + save_fig + '.png')\n\n plt.show()\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('R2', fontsize = 14)\n plt.plot(complx, errors[1], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[3], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[3]*1.2), np.max(errors_R2[1]*1.2)])\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'tradeoff2R2' + method + save_fig + '.png')\n plt.show()",
"def part_3(l, m, n):\n # initilize variables\n size = 50000\n samples, loops, R = size / 10, 20, None\n lrs = [1.5, 0.25, 0.03, 0.005, 0.001]\n alphas = [1.1, 1.01, 1.005, 1.0005, 1.0001]\n gammas = [2.0, 0.3, 0.04, 0.006, 0.001]\n \n # divide dataset: 10% - training, 10% - test.\n # y, x = [], []\n (y, x) = gen(l, m, n, 10, False)\n (y_train, x_train) = gen(l, m, n, 50000, True)\n (y_test, x_test) = gen(l, m, n, 10000, False)\n \n # build online learning algorithms\n perceptron = Perceptron(R, x_train, x_test, y_train, y_test, n, size, size)\n perceptron_margin = Perceptron_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)\n winnon = Winnon(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas)\n winnon_margin = Winnon_Margin(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, alphas, gammas)\n adagrad = AdaGrad(R, x, x_train, x_test, y, y_train, y_test, n, samples, size, loops, lrs)",
"def find_best_free_param_configuration_LOO_corpus_smoothing(p,alpha_range,beta_range,lambda_range):\n measures_res = linux_base_path+ \"/measures_res_corpus_smoothing_corpus_beta_\"+str(corpus_beta)+\"/\"\n# measures_res = \"/IBM_STORAGE/USERS_DATA/liorab/baseline_ret/sen_ret_corpus_smoothing_res/nDCG_MAP_res_corpus_smoothing/\"\n# measures_res = base_path +\"\\\\measures_res\\\\\"\n claim_dict = read_pickle(\"claim_dict\")\n claim_num_list = [4,7,17,21,36,37,39,40,41,42,45,46,47,50,51,53,54,55,57,58,59,60,61,62,66,69,70,79,80]\n# claim_num_list = [4,47,53,58,7,54]\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res = {} #key is left out claim and and value is the alpha,beta,lambda configuration that led to best measures - avg nDCG and AP across the train claims\n nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf = {} #key - left out claim num, and value is the nDCG and AP of it, in the best configuration without it.\n \n k_val = 50\n prec_at_k_train = rcdtype.recordtype('prec_at_k_train', 'at_5 at_10')\n max_prec_at_k = rcdtype.recordtype('max_prec_at_k', 'max_val max_conf')\n \n for left_out_claim_indx in range(len(claim_num_list)):\n max_nDCG = 0\n max_MAP = 0\n max_nDCG_conf = []\n max_MAP_conf = []\n max_prec_at_5 = max_prec_at_k(0,\"\")\n max_prec_at_10 = max_prec_at_k(0,\"\")\n \n left_out_claim_num = claim_num_list[left_out_claim_indx]\n temp_claim_num_list = claim_num_list[:]\n temp_claim_num_list.remove(left_out_claim_num)\n for alpha in range(alpha_range.start,alpha_range.end,1): #change just for test!\n for beta in range(beta_range.start,beta_range.end,1):\n for lambda_int in range(lambda_range.start,lambda_range.end,1):\n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n AP_all_claims_curr_param_values = read_pickle(measures_res+\"AP_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n nDCG_all_claims_curr_param_values = read_pickle(measures_res+\"NDCG_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f)+\"_at_\"+str(p))\n prec_at_k_all_claims_params_values = read_pickle(measures_res+\"prec_at_k_all_claims_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n avg_nDCG_on_train = 0\n MAP_on_train = 0\n p_at_k_train_avg = prec_at_k_train(0,0)\n for clm_num_train in temp_claim_num_list:\n# avg_nDCG_on_train += nDCG_AP_all_claims_all_param_values[str(clm_num_train),alpha_f,beta_f,lambda_f][0]\n# MAP_on_train += nDCG_AP_all_claims_all_param_values[str(clm_num_train),alpha_f,beta_f,lambda_f][1]\n avg_nDCG_on_train += nDCG_all_claims_curr_param_values[str(clm_num_train)]\n MAP_on_train += AP_all_claims_curr_param_values[str(clm_num_train)] #in this config' -> get the measures\n p_at_k_train_avg.at_5 += prec_at_k_all_claims_params_values[str(clm_num_train)][0]\n p_at_k_train_avg.at_10 += prec_at_k_all_claims_params_values[str(clm_num_train)][1]\n avg_nDCG_on_train = float(float(avg_nDCG_on_train)/float(len(temp_claim_num_list)))\n MAP_on_train = float(float(MAP_on_train)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_5 = float(float(p_at_k_train_avg.at_5)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_10 = float(float(p_at_k_train_avg.at_10)/float(len(temp_claim_num_list)))\n \n if avg_nDCG_on_train > max_nDCG:\n max_nDCG = avg_nDCG_on_train\n max_nDCG_conf = (alpha_f,beta_f,lambda_f)\n if MAP_on_train > max_MAP:\n max_MAP = MAP_on_train\n max_MAP_conf = (alpha_f,beta_f,lambda_f)\n if p_at_k_train_avg.at_5 > max_prec_at_5.max_val:\n max_prec_at_5.max_val = p_at_k_train_avg.at_5\n max_prec_at_5.max_conf = (alpha_f,beta_f,lambda_f)\n if p_at_k_train_avg.at_10 > max_prec_at_10.max_val:\n max_prec_at_10.max_val = p_at_k_train_avg.at_10\n max_prec_at_10.max_conf = (alpha_f,beta_f,lambda_f)\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res[left_out_claim_num] = [(max_nDCG,max_nDCG_conf),(max_MAP,max_MAP_conf),(max_prec_at_5.max_val,max_prec_at_5.max_conf),(max_prec_at_10.max_val,max_prec_at_10.max_conf)]\n #finished leaving out,\n #now calculate the nDCG and MAP of the left out claims with its best configuration results\n avg_nDCG_on_left_out = 0\n MAP_on_left_out = 0\n avg_prec_at_5_on_left_out = 0\n avg_prec_at_10_on_left_out = 0\n #17/11/14 update\n avg_nDCG_on_left_out_based_on_best_AP_conf = 0\n avg_prec_at_5_on_left_out_based_on_best_AP_conf = 0\n avg_prec_at_10_on_left_out_based_on_best_AP_conf = 0\n #end update\n for clm_num in claim_num_list:\n (best_alpha_nDCG,best_beta_nDCG,best_lambda_nDCG) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1]\n (best_alpha_MAP,best_beta_MAP,best_lambda_MAP) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1]\n (best_alpha_prec_at_5,best_beta_prec_at_5,best_lambda_prec_at_5) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1]\n (best_alpha_prec_at_10,best_beta_prec_at_10,best_lambda_prec_at_10) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1]\n \n# nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf[clm_num] = (nDCG_AP_all_claims_all_param_values[str(clm_num),best_alpha_nDCG,best_beta_nDCG,best_lambda_nDCG][0],nDCG_AP_all_claims_all_param_values[str(clm_num),best_alpha_MAP,best_beta_MAP,best_lambda_MAP][1])\n# avg_nDCG_on_left_out += nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf[clm_num][0]\n# MAP_on_left_out += nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf[clm_num][1]\n #read the best config' dict\n best_config_nDCG_dict = read_pickle(measures_res+\"NDCG_all_claims_alpha_\"+str(best_alpha_nDCG)+\"_beta_\"+str(best_beta_nDCG)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_nDCG)+\"_at_\"+str(p))\n best_config_AP_dict = read_pickle(measures_res+\"AP_all_claims_alpha_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP))\n best_config_prec_at_5_dict = read_pickle(measures_res+\"prec_at_k_all_claims_\"+str(best_alpha_prec_at_5)+\"_beta_\"+str(best_beta_prec_at_5)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_5)) #take only the first item in the tuple\n best_config_prec_at_10_dict = read_pickle(measures_res+\"prec_at_k_all_claims_\"+str(best_alpha_prec_at_10)+\"_beta_\"+str(best_beta_prec_at_10)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_10)) #take only the second item in the tuple\n # 17/11/14 update - report the p@5,p@10, and nDCG in the configuration that is the best for AP\n nDCG_from_best_AP_conf_dict = read_pickle(measures_res+\"NDCG_all_claims_alpha_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP)+\"_at_\"+str(p))\n prec_at_k_from_best_AP_conf_dict = read_pickle(measures_res+\"prec_at_k_all_claims_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP))\n # 17/11/14 update - add the last three values ->the nDCG and prec@k from the conf best for AP\n nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf[clm_num] = (best_config_nDCG_dict[str(clm_num)],best_config_AP_dict[str(clm_num)],best_config_prec_at_5_dict[str(clm_num)][0],best_config_prec_at_10_dict[str(clm_num)][1],nDCG_from_best_AP_conf_dict[str(clm_num)],prec_at_k_from_best_AP_conf_dict[str(clm_num)][0],prec_at_k_from_best_AP_conf_dict[str(clm_num)][1])\n avg_nDCG_on_left_out += best_config_nDCG_dict[str(clm_num)]\n MAP_on_left_out += best_config_AP_dict[str(clm_num)]\n avg_prec_at_5_on_left_out += best_config_prec_at_5_dict[str(clm_num)][0]\n avg_prec_at_10_on_left_out += best_config_prec_at_10_dict[str(clm_num)][1]\n #17/11/14 update\n avg_nDCG_on_left_out_based_on_best_AP_conf += nDCG_from_best_AP_conf_dict[str(clm_num)]\n avg_prec_at_5_on_left_out_based_on_best_AP_conf += prec_at_k_from_best_AP_conf_dict[str(clm_num)][0]\n avg_prec_at_10_on_left_out_based_on_best_AP_conf += prec_at_k_from_best_AP_conf_dict[str(clm_num)][1]\n #end update\n save_pickle(measures_res+\"nDCG_AP_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf)\n save_pickle(measures_res+\"best_configuration_for_nDCG_AP_prec_at_k_left_out_res_k_top_docs_\"+str(k_val)+\"_at_\"+str(p),best_configuration_for_nDCG_AP_prec_at_k_left_out_res)\n #report the avg\n avg_nDCG_on_left_out = float(float(avg_nDCG_on_left_out)/float(len(claim_num_list))) \n MAP_on_left_out = float(float(MAP_on_left_out)/float(len(claim_num_list))) \n avg_prec_at_5_on_left_out = float(float(avg_prec_at_5_on_left_out)/float(len(claim_num_list)))\n avg_prec_at_10_on_left_out = float(float(avg_prec_at_10_on_left_out)/float(len(claim_num_list)))\n #17/11/14 update\n avg_nDCG_on_left_out_based_on_best_AP_conf = float(float(avg_nDCG_on_left_out_based_on_best_AP_conf)/float(len(claim_num_list)))\n avg_prec_at_5_on_left_out_based_on_best_AP_conf = float(float(avg_prec_at_5_on_left_out_based_on_best_AP_conf)/float(len(claim_num_list)))\n avg_prec_at_10_on_left_out_based_on_best_AP_conf = float(float(avg_prec_at_10_on_left_out_based_on_best_AP_conf)/float(len(claim_num_list)))\n #end update\n \n #write res to file:\n # claim text, the best nDCG conf and result on train, the nDCG it really has, and the same for AP\n with open(measures_res+\"nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p)+\".csv\", 'wb') as csvfile:\n w = csv.writer(csvfile, delimiter='&', dialect='excel')\n row = \"claim&best_conf_nDCG&best_conf_for_nDCG&best_conf_AP&best_conf_for_AP&best_prec_at_5&best_prec_at_5&best_prec_at_10&best_conf_for_prec_at_10&nDCG_in_best_AP_conf&prec_at_5_in_best_AP_conf&prec_at_10_in_best_AP_conf\"\n w.writerow([row])\n for (clm_num,(nDCG,AP,prec_at_5,prec_at_10,nDCG_based_on_best_AP_conf,p_at_5_based_on_best_AP_conf,p_at_10_based_on_best_AP_conf)) in nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf.items():\n row = claim_dict[str(clm_num)]+\"&\"+'%.3f'%nDCG+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][2])\n row += \"&\"+'%.3f'%AP+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][2])\n row += \"&\"+'%.3f'%prec_at_5+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][2])\n row += \"&\"+'%.3f'%prec_at_10+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][2])\n row += \"&\"+'%.3f'%nDCG_based_on_best_AP_conf+\"&\"+'%.3f'%p_at_5_based_on_best_AP_conf+\"&\"+'%.3f'%p_at_10_based_on_best_AP_conf\n w.writerow([row])\n w.writerow([\"avg_nDCG_on_left_out: \"+ '%.4f'%avg_nDCG_on_left_out ])\n w.writerow([\"MAP_on_left_out: \"+ '%.4f'%MAP_on_left_out])\n w.writerow([\"avg_prec_at_5_on_left_out: \"+ '%.4f'%avg_prec_at_5_on_left_out])\n w.writerow([\"avg_prec_at_10_on_left_out: \"+ '%.4f'%avg_prec_at_10_on_left_out])\n w.writerow([\"avg_nDCG_on_left_out_based_on_best_AP_conf: \"+ '%.4f'%avg_nDCG_on_left_out_based_on_best_AP_conf])\n w.writerow([\"avg_prec_at_5_on_left_out_based_on_best_AP_conf: \"+ '%.4f'%avg_prec_at_5_on_left_out_based_on_best_AP_conf])\n w.writerow([\"avg_prec_at_10_on_left_out_based_on_best_AP_conf: \"+ '%.4f'%avg_prec_at_10_on_left_out_based_on_best_AP_conf])",
"def test_pl_many(k, l, th_list):\n patterns = build_pattern_dict(k, l)\n best_factor = 0\n for x_th, y_th in th_list:\n try:\n factor = test_pl(k, l, x_th, y_th, patterns=patterns)\n if factor > best_factor:\n print(\"New best factor {} @ (x_th,y_th) = ({},{})\".format(factor, x_th, y_th))\n best_factor = factor\n except:\n print(\"An error occured while solving the LP\")\n print(\"Final best factor {}\".format(best_factor))",
"def find_best_k(vector_array, save_plot_dir, max_k=100):\n\n cost = []\n dim = vector_array.shape[1]\n for i in range(1, max_k):\n kmeans = KMeans(n_clusters=i, random_state=0)\n kmeans.fit(vector_array)\n cost.append(kmeans.inertia_)\n\n # plot the cost against K values\n plt.plot(range(1, max_k), cost, color='g', linewidth='3')\n plt.xlabel(\"Value of K\")\n plt.ylabel(\"Squared Error (Cost)\")\n plt.savefig(save_plot_dir + '/cost_' + str(dim) + 'D.png')\n plt.close()",
"def work(i, kfolds, alphas):\n\t# load data\n\tTheta = np.loadtxt('Regression_Data/Theta.txt')\n\tdadt = np.loadtxt('Regression_Data/a_dot.txt')\n\tnsamples, nfeatures = Theta.shape\n\tnn = dadt.shape[1]\n \n\t# average mean square error across the folds\n\tMSE_mean = np.zeros(len(alphas))\n\tMSE_std = np.zeros(len(alphas))\n\tMSE_full = np.zeros(len(alphas))\n\tMSE_full_rel = np.zeros(len(alphas))\n\n\t# number of nonzero coefficients\n\tnnz = np.zeros(len(alphas))\n\tcomm = MPI.COMM_WORLD\n\t# coefficients\n\tcoeffs = np.zeros((len(alphas), nfeatures))\n\n\tfor j, alpha in enumerate(alphas):\n\t\tmodel = linear_model.LassoCV(cv=kfolds,\n\t\t\t\t\t\talphas=[alpha],\n\t\t\t\t\t\tfit_intercept=False,\n\t\t\t\t\t\tmax_iter=3000,\n\t\t\t\t\t\ttol=1e-4).fit(Theta, dadt[:, i])\n \n\t\n\t\tprint('Worker %d :: doing alpha=%.2e :: completed %.2f %%\\n' % (comm.Get_rank(), model.alpha_, 100.0*float(j+1)/len(alphas)))\n\n\t\tsys.stdout.flush()\n\t\t# apparently this mse_path is already taking into\n\t\t# account the whole dataset, so we do not need to multiply by kfolds\n\t\tcoeffs[j] = model.coef_\n\t\tMSE_mean[j] = np.sqrt(nsamples*np.mean(model.mse_path_))\n\t\tMSE_std[j] = np.sqrt(np.std(nsamples*model.mse_path_))\n\n\t\t#MSE_full_rel[j] = np.mean(((np.dot(Theta, model.coef_) - dadt[:, i])**2)/np.linalg.norm(dadt[:, i])**2)\n\t\tMSE_full_rel[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i])/np.linalg.norm(dadt[:, i]))\t\t\n\t\t\n\t\t#MSE_full[j] = np.mean((np.dot(Theta, model.coef_) - dadt[:, i])**2)\t\t\n\t\tMSE_full[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i]))\n\t\t\n\t\tnnz[j] = np.count_nonzero(model.coef_)\n\n\t\t# save data\n\t\ttry:\n\t\t\t#shutil.rmtree('Regression_Results')\n\t\t\tos.mkdir('Regression_Results')\n\t\texcept OSError:\n\t\t\tpass\n\n\t\t\n\t\tnp.savetxt('Regression_Results/MSE_mean_%03d' % i, MSE_mean,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_std_%03d' % i, MSE_std,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_%03d' % i, MSE_full,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_rel_%03d' % i, MSE_full_rel,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/coeffs_%03d' % i, coeffs,delimiter = ' ')\n\t\tnp.savetxt('Regression_Results/nnz_%03d' % i, nnz,delimiter = ' ')\n\n\t\tprint('Done i = %03d\\n' % i)\n\treturn True",
"def leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig = None):\n \n skf = StratifiedKFold(n_splits=int(sum(y==0)//n_scz_te),shuffle=True, random_state = rep)\n count_plt = 0\n fig, ax = plt.subplots(2,3 , figsize=(10,6.5))\n for tr_idx, te_idx in skf.split(X,y):\n # Compute test and train targets\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n \n # Make gridsearch function\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1): #range(len(freq_bands)):\n if count_plt == 6:\n plt.suptitle('Example of line search for the regularization parameter', fontsize= 18)\n plt.tight_layout()\n plt.subplots_adjust(top = 0.84, bottom = 0.15, hspace = 0.5, wspace = 0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor = (0.5, 0.89), \n borderaxespad = 0., loc = 'upper center', ncol = 2)\n \n plt.show()\n fig.savefig('/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg', bbox_inches = 'tight')\n sns.reset_orig()\n raise NameError('This is just a dumb way of stopping the code after 6 iterations')\n \n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha' :parameters[freq_bands[i]]}, \n cv = StratifiedKFold(n_splits = int(sum(y_tr==0)//n_scz_te)), \n scoring = 'roc_auc', n_jobs = -1, return_train_score=True)\n # Compute test and train sets \n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size*i:x_size*(i+1)]\n X_te = X[te_idx, x_size*i:x_size*(i+1)]\n else:\n if x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i+1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i+1]]\n \n \n # Standardize\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n\n # Fit data and save auc scores\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n \n # Make parameter plot\n #plot_grid_search(clf.cv_results_, 'score', parameters[freq_bands[i]], 'log($\\lambda$) ' + freq_bands[i])\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n \n scores_mean = cv_results[('mean_test_' + metric)]\n # scores_sd = cv_results[('std_test_' + metric)]\n scores_mean_tr = cv_results[('mean_train_' + metric)]\n \n # Set plot style\n #plt.style.use('seaborn')\n \n # Plot Grid search scores\n\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean_tr, 'type' : ['train']*len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean, 'type' : ['test']*len(scores_mean_tr)})\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df1, ax = ax[count_plt//3][count_plt%3])\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df2, ax = ax[count_plt//3][count_plt%3])\n\n ax[count_plt//3][count_plt%3].set_xlabel('log($\\lambda$)', fontsize=14)\n ax[count_plt//3][count_plt%3].set_ylabel('CV Average AUC' , fontsize=14) \n \n #pprint(clf.cv_results_)\n #pdb.set_trace() # Type \"exit\" to get out, type \"c\" to continue\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_[coef_idx])\n\n return auc, nz_coef_idx, nz_coef_val, count",
"def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters, \n nFolds = 1, reTrain = True, plotGraphs = False):\n leastLoss = None\n bestModel = None\n bestHyperParams = None\n \n \"\"\"Generate the parameter grid\"\"\"\n parameterGrid = []\n gridKeys = []\n \n parameterGrid = list(product(*hyperParameters.values()))\n hyperParameterKeys = hyperParameters.keys()\n \n \"\"\"For plottong graphs\"\"\"\n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n index = 0\n fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))\n fig = plt.figure()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n ax = fig.add_subplot(111, projection='3d')\n \n\n \"\"\"Grid search for cartesian product of hyperParameters\"\"\" \n for parameterMesh in parameterGrid:\n hyperParameterMesh = {}\n for k,v in zip(hyperParameterKeys, parameterMesh):\n hyperParameterMesh[k] = v\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(hyperParameterMesh)\n \n \"\"\"Perform grid search with cross validation\"\"\"\n if nFolds > 1:\n modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,\n xTrain = xTrain,\n yTrain = yTrain,\n nFolds = nFolds,\n modelParameters = updatedParam) \n \n \n \"\"\"For storing best model\"\"\"\n avg = np.average(analysisMetricList)\n if leastLoss == None or avg < leastLoss:\n leastLoss = avg\n bestModel = modelParams\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting\"\"\"\n if plotGraphs:\n foldIndex = 1\n\n ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'], \n avg, marker = 'o', label = str(hyperParameterMesh))\n \n\n for train, test in zip(trainLossList, testLossList):\n axs[index][0].plot(train, label = \"Fold-\" + str(foldIndex))\n axs[index][1].plot(test, label = \"Fold-\" + str(foldIndex))\n foldIndex = foldIndex + 1\n \n axs[index][0].legend()\n axs[index][0].grid()\n \n axs[index][1].legend()\n axs[index][1].grid()\n \n axs[index][0].set_title(\"Train set for \" + str(hyperParameterMesh))\n axs[index][1].set_title(\"Validation set for \" + str(hyperParameterMesh))\n \n index = index + 1\n \n \n \"\"\"Perform only grid search and no cross validation. Test set will be used for validation\"\"\" \n else:\n trainedModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n \n \"\"\"For storing best model\"\"\"\n if leastLoss == None or testLoss[-1] < leastLoss:\n leastLoss = testLoss[-1]\n bestModel = trainedModel\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting graphs\"\"\"\n if plotGraphs:\n axs[index][0].plot(trainLoss, label = \"Training set Loss for \" + str(hyperParameterMesh))\n axs[index][0].legend()\n axs[index][0].grid()\n axs[index][1].plot(testLoss, label = \"Test set Loss for \" + str(hyperParameterMesh))\n axs[index][1].legend()\n axs[index][1].grid()\n index = index + 1\n \n if plotGraphs:\n ax.legend()\n ax.set_xlabel('alpha')\n ax.set_ylabel('regularizationParameter')\n ax.set_zlabel('RMSE')\n\n plt.show()\n plt.close()\n \n if reTrain:\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(bestHyperParams)\n\n bestModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n print trainLoss[-1]\n print testLoss[-1]\n \n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n fig, axs = plt.subplots(1, 2, figsize = (plotWidth, plotHeight)) \n \n plt.suptitle(\"Best model\")\n\n axs[0].plot(trainLoss, label = \"Training set Loss for \" + str(bestHyperParams))\n axs[0].legend()\n axs[0].grid()\n axs[1].plot(testLoss, label = \"Test set Loss for \" + str(bestHyperParams))\n axs[1].legend()\n axs[1].grid()\n \n plt.show()\n \n \n \n return bestModel, bestHyperParams",
"def fit_lorentzian(comp_key,p0,time_step,conn,func = fitting.fun_lorentzian,fig=None,wind=3):\n\n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",(comp_key,)).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp_key)]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n j = int(time_step/dtime)\n count = g[fd('step',j)]['y']['disp_count'][:]\n count += g[fd('step',j)]['x']['disp_count'][:]\n edges = g[fd('step',j)]['y']['disp_edges'][:]\n\n \n edges = np.array([np.mean(edges[j:j+wind]) for j in range(1,len(count)-wind,wind)])\n count = np.array([np.sum(count[j:j+wind]) for j in range(1,len(count)-wind,wind)])\n \n \n edges = edges[count>30]\n count = count[count>30]\n \n\n out = fitting.fit_curve(edges,count,p0,func)\n fig = fitting.display_fit(edges,count,out.beta,func,fig)\n print out.beta\n return out,fig",
"def run_various_Ks(x, K):\n m = len(x) # length of data points\n min_list = [] # list that will contain minimum costs\n Ks = [i for i in range(1,K+1)] # values of K's\n\n for i in range(1, K+1):\n # runs algorithm with different values of K\n kmeans = KMeans(n_clusters=i, random_state=0).fit(x)\n minval = kmeans.inertia_\n print(minval)\n min_list.append(minval) # appends minimum cost \n\n # Plotting J vs. K to choose best value of K\n plt.plot(Ks, min_list)\n plt.plot(Ks, min_list, '-o')\n plt.xlabel('K (# of clusters)')\n plt.ylabel('Cost function J')\n plt.title('J vs. K plot')\n plt.show()",
"def notebook_01():\n\n freq_list, volt_list = las.load_freq_volt()\n\n n_steps, n_det, n_f, _ = np.shape(volt_list)\n\n #y_sym_mat_o = ds.by_sym_mat(volt_list, det_ind=0)\n #y_sym_mat_i = ds.by_sym_mat(volt_list, det_ind=1)\n\n # print(np.shape(y_sym_mat_o))\n # print(np.shape(y_sym_mat_i))\n # (mu_o, sigma_o) = stats.norm.fit(y_sym_mat_o[:,0])\n # (mu_i, sigma_i) = stats.norm.fit(y_sym_mat_i[:,0])\n # print(mu_o, sigma_o)\n # print(mu_i, sigma_i)\n # print(mu_o*89000, mu_i*89000.0, -mu_i*89000.0, -mu_o*89000.0)\n\n volt_list_sym = ds.volt_list_sym_calc(volt_list)\n\n fit_params_mat = fp.fit_params(ff.f_b_field, volt_list_sym)\n\n fit_params_mat_s = fp.fit_params(ff.f_b_field_off, volt_list_sym)\n\n # pbd.plot_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n # pfp.plot_fit_sym_comp(volt_list_sym, fit_params_mat, fit_params_mat_s, freq_list)\n\n\n # pfp.plot_fit_sym_comp_2(volt_list_sym, fit_params_mat_s, freq_list)\n\n #pfp.plot_symmetry_along_z(volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n fp.fit_params_FH_data(ff.f_b_field)\n\n # pbd.plot_rel_diff_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)",
"def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return",
"def convergence(n0,l,nt,m,numthreads,display=False):\n# call stats and initialise variables\n qnetm,qmaxm,qvarm = ns.stats(n0,l,nt,m)\n qmax_ave = np.zeros(m)\n qmax_vec = np.zeros(m)\n\n# assign qmax_vec the qmax of qnetn value for n=1->m realizations\n# assign qmax_ave the value of the avegerage over the n realizations of qmax \n\n for n in range(1,m+1):\n qmax_vec[n-1] = float(np.amax(qnetm[:,n-1]))\n qmax_ave[n-1] = np.sum(qmax_vec)/(n)\n \n x = np.arange(1,m+1)\n\n# use polyfit to solve for k and a satisfying qmax_ave = a*m**(-k)\n# reduce problem to log(qmax_ave) = c - k*log(m) (c = log(a), and flip sgn(k) for now)\n\n k, c = np.polyfit(np.log(x),np.log(qmax_ave),1)\n\n# if display flag is true, create log-log plot of qmax_ave vs x=1->m \n\n if display:\n #plt.figure()\n #plt.loglog(x,qmax_ave,'b')\n #plt.loglog(x,np.exp(b+k*x),'r')\n #plt.show()\n \n plt.figure()\n plt.plot(np.log(x),np.log(qmax_ave),'b')\n plt.plot(np.log(x),c + k*np.log(x),'r')\n plt.xlabel('log(x) x=1->m')\n plt.ylabel('log(qmax_ave)')\n plt.title('log-log plot of m against qmax_ave with rate of convergence fit')\n plt.legend(loc='best')\n plt.show()\n\n return -k",
"def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)",
"def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def outliers(df):\r\n # LocalOutlierFactor.\r\n start_time = time.time()\r\n print('\\n'+ '# '*10+'[Training] Local Outlier Factor Model (LOF):'+ ' #'*10)\r\n clf = LocalOutlierFactor()\r\n y_pred = clf.fit_predict(df.drop(['label', 'label_encoded'], axis=1))\r\n print('> '*2+'Training and prediction time: %.4f seconds.'%(time.time()-start_time))\r\n # Dataframe with various metrics.\r\n metrics = ['fliers', 'Q1', 'Q3', 'IQR', 'min', 'max', 'median', 'LOF_inliers', 'LOF_outliers', 'LOF_outlier_factor']\r\n df_outliers = pd.DataFrame()\r\n df_outliers['Feature'] = metrics\r\n bp = plt.boxplot([df[i] for i in df.drop(['label', 'label_encoded'], axis=1).columns])\r\n for i in range(len(df.drop(['label', 'label_encoded'], axis=1).columns)):\r\n vals = []\r\n # Fliers.\r\n vals.append(len(bp['fliers'][i].get_ydata()))\r\n # Q1.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.25))\r\n # Q3.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.75))\r\n # IQR.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.75) - df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].quantile(0.25))\r\n # Min.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].min())\r\n # Max.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].max())\r\n # Median.\r\n vals.append(df[df.drop(['label', 'label_encoded'], axis=1).columns[i]].median())\r\n # Local Outlier Factor.\r\n vals.append(y_pred.tolist().count(1)) # Inliers.\r\n vals.append(y_pred.tolist().count(-1)) # Outliers.\r\n vals.append(clf.negative_outlier_factor_)\r\n # Add column and data.\r\n df_outliers[df.columns[i]] = vals\r\n plt.close()\r\n return df_outliers"
] | [
"0.61967266",
"0.5894779",
"0.58709",
"0.5770856",
"0.5696888",
"0.56871897",
"0.56855875",
"0.56651926",
"0.5657436",
"0.5621841",
"0.5600335",
"0.5566454",
"0.5563774",
"0.55629843",
"0.5555587",
"0.5543707",
"0.5534561",
"0.5516074",
"0.5515341",
"0.5506598",
"0.5501438",
"0.54948854",
"0.54699177",
"0.5452492",
"0.5446856",
"0.54467547",
"0.5431992",
"0.5423619",
"0.54184335",
"0.5418139"
] | 0.6229848 | 0 |
A plot of the bias and variance tradeoff by recreating the noise N times for polynomial orders between first_poly first_poly + complexity | def bias_var(x, y, z, first_poly = 4, complexity = 10, N = 100, method = 'OLS', seed = 42, lam = 0, train = 0.7, folds = 5):
bias = np.zeros(complexity + 1)
variance = np.zeros(complexity + 1)
z_real = FrankeFunction(x, y)
complx = np.arange(first_poly, first_poly + complexity + 1, 1)
for i in range(complexity + 1):
print(i)
model = regression(x, y, z, k = first_poly + i, split = True, train = train, seed = seed)
_, _, _, z_real_test = model.train_test(X = model.X_full, z = np.ravel(z_real), train = train, seed = seed)
counter = 0
z_tildes = np.zeros((np.size(z_real_test), N))
for j in range(N):
z_new = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)
_, _, z_train, _ = model.train_test(X = model.X_full, z = np.ravel(z_new), train = train)
if method == 'OLS':
beta = model.OLS(z = z_train)
elif method == 'Ridge':
beta = model.Ridge(lam = lam, z = z_train)
elif method == 'Lasso':
beta = model.Lasso(lam = lam, z = z_train)
z_tilde = model.z_tilde(beta, X = model.X_test)
z_tildes[:, j] = np.ravel(z_tilde)
bias[i] = np.mean((np.ravel(z_real_test).reshape(-1, 1) - np.mean(z_tildes, axis = 1, keepdims = True))**2)
variance[i] = np.mean(np.var(z_tildes, axis = 1, keepdims = True))
plt.title(method + ' with N = ' + str(N) + ' times pr complexity')
plt.plot(complx, bias, 'go--', label = 'Bias', color = 'blue')
plt.plot(complx, variance, 'go--', label = 'Variance', color = 'red')
#plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])
plt.legend()
plt.xlabel('Polynomial maximum order', fontsize = 14)
plt.ylabel('Bias/variance', fontsize = 14)
plt.tight_layout()
plt.savefig(results_dir + 'bias_variance' + method + '.png')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def demo(N, y, cov_fun_kwargs, loo, K, ylim, figsize, seed, trace, upper_bound):\n\n T = y * N\n cov_fun, cov_kwargs = cov_fun_kwargs\n Sigma, tau = cov_functions[cov_fun](N, seed=seed, **cov_kwargs)\n\n np.random.seed(seed)\n sim = Simulation(Sigma, T)\n\n fig, (ax0, ax1) = plt.subplots(figsize=figsize, ncols=2)\n # ax0.plot(annualize_vol(tau / N), label='true')\n # ax1.plot(annualize_vol(tau / N), label='true')\n # ax0.plot(annualize_vol(lam / N), label='sample')\n # ax1.plot(annualize_vol(lam / N), label='sample')\n\n # Oracle LW NLS shrinkage\n # d_lw_oracle = nls_oracle(sim)\n # d_isolw_oracle = nls_oracle(sim, isotonic=True)\n # ax0.plot(annualize_vol(d_lw_oracle / N), label='lw oracle')\n # ax1.plot(annualize_vol(d_isolw_oracle / N), label='lw iso oracle')\n\n # # LW NLS shrinkage\n # S_lw = nlshrink_covariance(X, centered=True)\n # d_lw = eig(S_lw, return_eigenvectors=False)\n # ax1.plot(annualize_vol(d_lw / N), label='lw')\n\n # if loo:\n # # LOO LW NLS shrinkage\n # _, d_loo = nls_loo_cv(X, S, U)\n # d_isoloo = isotonic_regression(d_loo)\n # ax0.plot(annualize_vol(d_loo / N), label='noisy-loo')\n # ax1.plot(annualize_vol(d_isoloo / N), label='isoloo')\n\n # K-fold LW NLS shrinkage\n # d_lw_loo = nls_loo(sim)\n # d_lw_isoloo = nls_loo(sim, isotonic=True)\n # ax0.plot(annualize_vol(d_lw_loo / N), label='lw_kfold')\n # ax1.plot(annualize_vol(d_lw_isoloo / N), label='lw_isoloo')\n\n d_lw_kfold = nls_kfold(sim, K)\n d_lw_isokfold = nls_kfold(sim, K, isotonic=True)\n ax0.plot(annualize_vol(d_lw_kfold / N), label='lw_kfold')\n ax1.plot(annualize_vol(d_lw_isokfold / N), label='lw_isokfold')\n\n # MinVar NLS shrinkage\n d_mv_oracle = minvar_oracle(\n sim, monotonicity=None, trace=trace, upper_bound=upper_bound)\n d_mv_mono_oracle = minvar_oracle(\n sim, monotonicity='constraint', trace=trace, upper_bound=upper_bound)\n d_mv_iso_oracle = minvar_oracle(\n sim, monotonicity='isotonic', trace=trace, upper_bound=upper_bound)\n\n ax0.plot(annualize_vol(d_mv_oracle / N), label='mv_oracle')\n ax1.plot(annualize_vol(d_mv_mono_oracle / N), label='mv_mono_oracle')\n ax1.plot(annualize_vol(d_mv_iso_oracle / N), label='mv_iso_oracle')\n\n d_mv_loo = minvar_loo(\n sim, monotonicity=None, trace=trace, upper_bound=upper_bound)\n d_mv_mono_loo = minvar_loo(\n sim, monotonicity='constraint', trace=trace, upper_bound=upper_bound)\n d_mv_iso_loo = minvar_loo(\n sim, monotonicity='isotonic', trace=trace, upper_bound=upper_bound)\n\n ax0.plot(annualize_vol(d_mv_loo / N), label='mv_loo')\n ax1.plot(annualize_vol(d_mv_mono_loo / N), label='mv_mono_loo')\n ax1.plot(annualize_vol(d_mv_iso_loo / N), label='mv_iso_loo')\n\n ax0.legend()\n ax1.legend()\n # ax0.set_ylim(*ylim)\n # ax1.set_ylim(*ylim)\n plt.show()",
"def demo(n, y, cov_fun, loo, K, ylim, figsize, seed):\n raise NotImplementedError(\n \"Not up-to-date with new proposed estimators for MinVar shrinkage\")\n np.random.seed(seed)\n\n T = y * N\n cov_fun, cov_kwargs = cov_fun_kwargs\n Sigma, tau = cov_functions[cov_fun](N)\n\n sim = Simulation(Sigma, T)\n\n fig, (ax0, ax1) = plt.subplots(figsize=figsize, ncols=2)\n ax0.plot(annualize_vol(tau / n), label='true')\n ax1.plot(annualize_vol(tau / n), label='true')\n ax0.plot(annualize_vol(lam / n), label='sample')\n ax1.plot(annualize_vol(lam / n), label='sample')\n\n # Oracle LW NLS shrinkage\n d_lw_oracle = nls_oracle(sim)\n d_isolw_oracle = nls_oracle(sim, isotonic=True)\n ax0.plot(annualize_vol(d_lw_oracle / n), label='lw oracle')\n ax1.plot(annualize_vol(d_isolw_oracle / n), label='lw oracle')\n\n # LW NLS shrinkage\n d_lw = nls_asymptotic(sim)\n ax1.plot(annualize_vol(d_lw / n), label='lw')\n\n if loo:\n # LOO LW NLS shrinkage\n d_loo = nls_loo(sim)\n d_isoloo = nls_loo(sim, isotonic=True)\n ax0.plot(annualize_vol(d_loo / n), label='noisy-loo')\n ax1.plot(annualize_vol(d_isoloo / n), label='isoloo')\n\n # K-fold LW NLS shrinkage\n d_kfold = nls_kfold(sim, K)\n d_isokfold = nls_kfold(sim, K, isotonic=True)\n ax0.plot(annualize_vol(d_kfold / n), label='noisy-kfold')\n ax1.plot(annualize_vol(d_isokfold / n), label='isokfold')\n\n # MinVar NLS shrinkage\n # d_mv_oracle = minvar_nls_oracle(sim, Sigma)\n # d_isomv_oracle = isotonic_regression(\n # d_mv_oracle, y_min=lam_N, y_max=lam_1)\n # d_isonlsq_mv_oracle = minvar_nls_oracle(\n # X, S, lam, U, Sigma, isotonic=True)\n # ax0.plot(annualize_vol(d_mv_oracle / n), label='noisy-mv_oracle')\n # ax1.plot(annualize_vol(d_isomv_oracle / n), label='buggy-iso-mv_oracle')\n # ax1.plot(annualize_vol(d_isonlsq_mv_oracle / n), label='isolsq-mv_oracle')\n\n ax0.legend()\n ax1.legend()\n ax0.set_ylim(*ylim)\n ax1.set_ylim(*ylim)\n plt.show()",
"def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out",
"def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()",
"def main() -> None:\n seed = 0\n samples = 100_000\n\n np.random.seed(seed)\n\n s = np.random.binomial(1, 0.6, samples)\n\n x_1f = np.random.normal(0, 0.5, samples)\n x_1 = x_1f\n x_2 = np.random.normal(-1, 3, samples)\n x_2f = x_2\n\n y_1 = np.random.binomial(1, sigmoid(x_1))\n y_1f = np.random.binomial(1, sigmoid(x_1f))\n y_2 = np.random.binomial(1, sigmoid(x_2))\n y_2f = y_2\n p = x_1 + x_2\n pf = x_1f + x_2f\n y_3 = np.random.binomial(1, sigmoid(p))\n y_3f = np.random.binomial(1, sigmoid(pf))\n\n noise_1 = np.random.normal(0, 4, samples)\n noise_2 = np.random.normal(3, 7, samples)\n\n print(\n s.mean(),\n x_1.mean(),\n x_1f.mean(),\n x_2.mean(),\n x_2f.mean(),\n y_1.mean(),\n y_1f.mean(),\n y_2.mean(),\n y_2f.mean(),\n y_3.mean(),\n y_3f.mean(),\n )\n\n df = pd.DataFrame(\n data={\n \"x1\": x_1,\n \"x1f\": x_1f,\n \"x2\": x_2,\n \"x2f\": x_2f,\n \"s\": s,\n \"y1\": y_1,\n \"y1f\": y_1f,\n \"y2\": y_2,\n \"y2f\": y_2f,\n \"y3\": y_3,\n \"y3f\": y_3f,\n \"n1\": noise_1,\n \"n2\": noise_2,\n }\n )\n\n # Shuffle the data,\n df = df.sample(frac=1.0, random_state=seed).reset_index(drop=True)\n\n # Save the CSV\n df.to_csv(str(Path(__file__).parent / \"synthetic_scenario_1.csv\"), index=False)",
"def test_variance_of_slope_sums():\n\n ticker = 'GOOG'\n main_df = pd.read_pickle(settings.settings_dict['stock_data_path'])\n\n main_df = sample_slopes.create_slope_sum(main_df)\n\n slope_sums = main_df[ticker + \"slope_sum\"]\n\n print np.mean(main_df[ticker + \"slope_sum\"])\n print np.std(main_df[ticker + \"slope_sum\"])\n\n std = pd.rolling_std(slope_sums, window=20)\n\n _, ax2 = plt.subplots()\n\n ax2.plot(slope_sums)\n ax2.plot(slope_sums + std)\n ax2.plot(slope_sums - std)\n plt.legend(['Slope_Sum ', 'Slope_Sum +1 Std', 'Slope_Sum -1 Std'])\n plt.title(ticker + ' varrience of slope sum')\n plt.show()",
"def _test1():\n import matplotlib.pyplot as plt\n from math import pi, cos, sin\n n = 800\n PI2 = 2.0*pi\n angle = PI2 / n\n for i in range(n):\n pts = []\n beta = i * angle\n for r in range(1, 10):\n x = r*cos(beta)\n y = r*sin(beta)\n pts.append((x, y))\n are_zero = are_residuals_near_zero(pts)\n # print beta, are_zero\n assert are_zero\n plt.scatter([pt[0] for pt in pts], [pt[1] for pt in pts], c='red',\n marker='s', label = 'input')\n # nxs, nys = residuals(pts)\n #plt.scatter(xsadj, ysadj, c='green', marker='o', label = 'adjusted')\n # plt.scatter(nxs, nys, label = 'new')\n #plt.legend()\n plt.show()",
"def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x",
"def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12",
"def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()",
"def prob3(N):\n x, y, n = sy.symbols('x, y, n')\n expr = sy.summation(x**n/sy.factorial(n), (n, 0, N))\n f = sy.lambdify(y, expr.subs(x, -y**2), \"numpy\")\n domain = np.linspace(-2, 2, 100)\n plt.ion()\n plt.plot(domain, np.exp(-1*domain**2), label=\"original function\")\n plt.plot(domain, f(domain), label=\"Maclaurin series\")\n plt.legend()\n plt.show()",
"def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()",
"def plot_bias_variation(self, data_lst, head_lst, time_key='FRAME'):\n \n mean_lst, std_lst = [], []\n time_lst = []\n center_lst = []\n for ifile, (data, head) in enumerate(zip(data_lst, head_lst)):\n mean = np.zeros((3,3), dtype=np.float64)\n std = np.zeros((3,3), dtype=np.float64)\n for y, x in [(y, x) for y in range(3) for x in range(3)]:\n yc = y*500 + 500\n xc = x*500 + 500\n smalldata = data[yc-100:yc+100,xc-100:xc+100]\n mean[y,x] = smalldata.mean(dtype=np.float64)\n std[y,x] = smalldata.std(dtype=np.float64, ddof=1)\n if ifile == 0:\n center_lst.append((xc,yc))\n mean_lst.append(mean)\n std_lst.append(std)\n time_lst.append(head[time_key])\n mean_lst = np.array(mean_lst)\n std_lst = np.array(std_lst)\n \n # convert time string to delta minutes relative to the first image\n date_lst = [dateutil.parser.parse(t) for t in time_lst]\n datenums = mdates.date2num(date_lst)\n minutes = [(d - datenums[0])*24.*60. for d in datenums]\n \n # write the bias levels into run log\n message = ['Variation of bias level with time:',\n 'time, delta_minutes, mean values']\n for k in range(len(time_lst)):\n info = ['%s'%time_lst[k], '%7.3f'%minutes[k]]\n for j, i in [(j, i) for j in range(3) for i in range(3)]:\n info.append('%7.2f'%mean_lst[k,j,i])\n message.append(' '.join(info))\n logger.info((os.linesep+' ').join(message))\n \n # create figure\n fig = plt.figure(figsize=(8,6), dpi=150)\n z1, z2 = 999, -999\n for j, i in [(j, i) for j in range(3) for i in range(3)]:\n ax = fig.add_axes([0.1+i*0.3, 0.7-j*0.3, 0.26, 0.26])\n #ax.plot(minutes, mean_lst[:,i],'o-',alpha=0.7)\n #if i <= 5:\n # ax.set_xticklabels([])\n ax.plot(mean_lst[:,j,i],'o-',alpha=0.7)\n y1,y2 = ax.get_ylim()\n # searching for the minumum and maximum y display ranges\n z1 = min(y1, z1)\n z2 = max(y2, z2)\n for j, i in [(j, i) for j in range(3) for i in range(3)]:\n k = j*3 + i\n ax = fig.get_axes()[k]\n ax.set_ylim(z1,z2)\n x1, x2 = ax.get_xlim()\n ax.text(0.7*x1+0.3*x2, 0.2*z1+0.8*z2,\n 'x,y=(%4d,%4d)'%(center_lst[k][0],center_lst[k][1]),\n fontsize=9)\n if j == 2:\n #ax.set_xlabel('Time (min)')\n ax.set_xlabel('Frame',fontsize=11)\n if i == 0:\n ax.set_ylabel('Mean ADU',fontsize=11)\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n\n # save the figure\n figfile = os.path.join(self.paths['report'], 'bias_variation.png')\n self.report_file.write(' <img src=\"%s\">'%figfile\n +os.linesep)\n fig.savefig(figfile)\n logger.info('Plot variation of bias with time in figure: \"%s\"'%figfile)\n plt.close(fig)",
"def polytropicProfile(n=[1.5]):\n\t\tdx = 0.001\n\t\txs = []\n\t\tthetas = []\n\t\tfor j, index in enumerate(n):\n\t\t\ttheta = [1]\n\t\t\tdtheta_dx = [0]\n\t\t\tx = [1E-6]\n\t\t\ti=0\n\t\t\twhile theta[i] > 0 and i<5000:\n\t\t\t\tdtheta_dx.append(dtheta_dx[i] - (2*dtheta_dx[i]/x[i] + theta[i]**index)*dx)\n\t\t\t\ttheta.append(theta[i] + dtheta_dx[i]*dx)\n\t\t\t\tx.append(x[i] + dx)\n\t\t\t\ti+=1\n\t\t\txs.append(np.array(x))\n\t\t\tthetas.append(np.array(theta))\n\t\treturn xs, thetas",
"def _test5():\n# import matplotlib.pyplot as plt\n from math import pi, cos, sin\n n = 800\n PI2 = 2.0*pi\n angle = PI2 / n\n pts = []\n r = 10.0\n for i in range(n):\n beta = i * angle\n x = r*cos(beta)\n y = r*sin(beta)\n pts.append((x, y))\n print (regress(pts))\n are_zero = are_residuals_near_zero(pts)",
"def get_data_poly_noise(start, stop, noise_rel=0.1, num=50, order=1):\n \n x = (stop - start) * np.random.random_sample(size=num) + start \n #coefficients for the polynomial in [-5,5]\n poly_coeff = 10 * np.random.random_sample(size=order+1) - 5\n \n #create polynomial\n y = np.zeros(x.shape)\n for i in range(order+1):\n y += poly_coeff[i] * x**i\n \n noise_mag = noise_rel * np.abs((np.max(y) - np.min(y)))\n #add noise in [-noise_mag/2, noise_mag/2]\n y += noise_mag * np.random.random_sample(size=num) - noise_mag/2\n \n return (x, y)",
"def shadow_plot(x, y, **kwargs):\n n_samples = y.size # Number of samples\n smooth_factor = 0.5 # Default smooth factor\n label = None\n semilogy = False\n clr = None\n shadow_std = False # Whether to subtend the shadow between -std and +std or not.\n\n ax = plt.gca()\n if get_prop_cycle() is None:\n cmap = plt.cm.get_cmap(name=\"Vega20\", lut=64)\n ax.set_prop_cycle(cycler('color', cmap(range(64))))\n\n for key, value in kwargs.items():\n if key == \"label\":\n label = value\n elif key == \"smooth\": # Smooth factor\n smooth_factor = value\n if smooth_factor < 0 or smooth_factor > 1:\n raise ValueError(\"The smooth factor must lie between 0 and 1.\")\n elif key == \"semilogy\":\n semilogy = value\n if not isinstance(semilogy, bool):\n raise ValueError(\"semilogy can only be True or False.\")\n elif key == \"color\":\n clr = value\n elif key == \"shadowstd\":\n if not isinstance(semilogy, bool):\n raise ValueError(\"shadowstd can only be True or False.\")\n shadow_std = value\n\n n = int(math.ceil(smooth_factor * 0.25 * n_samples)) # Window size for averaging (at most, 25% of all samples)\n mu = np.full(n_samples, np.nan)\n sigma = np.full(n_samples, np.nan)\n perc5 = np.full(n_samples, np.nan)\n perc95 = np.full(n_samples, np.nan)\n for s in range(n_samples):\n s0 = max(s - n + 1, 0)\n mu[s] = np.mean(y[s0:s + 1])\n sigma[s] = np.std(y[s0:s + 1])\n perc5[s] = np.percentile(y[s0:s + 1], 5)\n perc95[s] = np.percentile(y[s0:s + 1], 95)\n assert np.all(sigma >= 0), \"Negative standard deviation\"\n\n if semilogy:\n base_line, = ax.semilogy(x, mu, lw=2, label=label, color=clr)\n else:\n base_line, = ax.plot(x, mu, lw=2, label=label, color=clr)\n\n if shadow_std:\n higher_border = mu + sigma\n lower_border = mu - sigma\n else:\n higher_border = perc95\n lower_border = perc5\n\n if semilogy:\n # If the lower border of the shadowed region is 0 or negative, clip its minimum value. The minimum value shall\n # be equidistant from the average in log scale.\n factor = higher_border / mu\n lower_border = np.clip(lower_border, mu / factor, mu)\n\n ax.fill_between(x, higher_border, lower_border, facecolor=base_line.get_color(), alpha=0.2)",
"def f4():\n n = 4\n v = np.arange(n)**0.75 * 0.2\n e = (np.arange(n)+1)**0.7 * 1e-1\n\n n = 12\n v = np.arange(n)\n e = np.array([0.1]*n) * 10e-0\n\n print(Sumb(v,e))\n\n f = plt.figure()\n a = f.add_subplot(111)\n\n dx = 0.0001\n x = np.arange(-1,v[-1]+1,dx)\n y = x.copy()\n y[:] = 0.\n for i in range(n):\n yx = lg(x,v[i],e[i])\n a.plot(x,np.exp(yx),label='{:d}'.format(i))\n y += yx\n y = np.exp((y - np.max(y))/n**2)\n y /= np.sum(y) * dx \n a.plot(x,y,label='sum')\n s = np.argsort(y)[::-1]\n ys = np.cumsum(y[s]) * dx\n yi = np.argwhere(ys > 0.682689492137)[0][0]\n print('mean = {:2f}'.format(x[s[0]]))\n print('sigma = {:2f}'.format(yi*dx/2))\n xy = np.ndarray((yi+2,2))\n i0,i1 = min(s[:yi]), max(s[:yi])\n xy[:yi,0] = x[i0:i1+1]\n xy[:yi,1] = y[i0:i1+1]\n xy[yi:,1] = 0\n xy[yi:,0] = x[[i1,i0]]\n a.add_patch(Polygon(xy,fill=True,color='green',ec='none',alpha=0.25))\n \n leg = plt.legend()\n plt.draw()",
"def maker(N,n_vars,p):\n x = [] #an empty list to hold the data\n y = np.zeros(N) #an array to hold the dependent variable\n b = [] #an empty list to hold the true bs\n i = 1\n while i <= n_vars: #loop over the variables we want to create\n x_i = np.random.normal(loc = 0.0, scale = 1.0, size = N) #generate the data\n x.append(x_i) #add it to the list of data\n if np.random.uniform(0,1) < p: #if the variable matters...\n b_i = np.random.normal(loc = 0.0, scale = 1.0) #draw a random effect for this variable\n else:\n b_i = 0 #otherwise set it's true effect equal to 0.\n b.append(b_i) #add it to the list of effects\n y = y + b_i*x_i #add the variable effect to the dependent variable\n i += 1 #index up i\n \n b_i = np.random.normal(loc = 0.0, scale = 1.0) #draw a random intercept\n b.append(b_i) #append this intercept to the effects\n y = b_i + y + np.random.normal(loc = 0.0, scale = 1.0, size = N) #add the normally distributed error term and the intercept\n return [np.array(x),np.array(y),np.array(b)]",
"def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12",
"def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min",
"def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def plot_VTx_variance(self, ax):\n V = self.V\n A = self.A\n b = self.b\n Ax = self.Ax\n x_ls_no = solve(A,b)\n x_ls = solve(A,Ax)\n\n ax.plot(dot(V.T, x_ls), 'r-', label='clean', lw=2.0)\n ax.plot(dot(V.T, x_ls_no), 'ko-', label='noisy')\n ax.set_xlabel(r'$i$')\n ax.set_title(r'$\\vec{v}_i^T \\vec{x}_{LS}$')\n ax.grid()\n leg = ax.legend(loc='upper center')\n leg.get_frame().set_alpha(0.5)",
"def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1",
"def fig_2_11V2(x, y, z, first_poly = 4, complexity = 10, N = 7, method = 'OLS', seed = 42, lam = 0, folds = 5, save_fig = ''):\n errors = np.zeros((4, complexity + 1))\n bias = np.zeros(complexity + 1)\n variance = np.zeros(complexity + 1)\n z_real = FrankeFunction(x, y)\n\n complx = np.arange(first_poly, first_poly + complexity + 1, 1)\n\n MSE = np.zeros(complexity + 1)\n\n for i in range(complexity + 1):\n print(i)\n model = regression(x, y, z, k = first_poly + i, split = False, seed = seed)\n\n for j in range(N):\n _, MSE_R2D2, _, _, _, _ = model.k_cross(fold = folds, method2 = method, lam = lam, random_num = True)\n errors[:, i] += np.mean(MSE_R2D2, axis = 0)\n\n errors /= N\n\n print(errors)\n\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.plot(complx, errors[0], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[2], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])\n plt.legend()\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.savefig(results_dir + 'tradeoff2MSE' + method + save_fig + '.png')\n\n plt.show()\n\n plt.title(method + ' Test vs Train error in k-fold with ' + str(folds) + '-folds')\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('R2', fontsize = 14)\n plt.plot(complx, errors[1], 'go--', label = 'Test', color = 'blue')\n plt.plot(complx, errors[3], 'go--', label = 'Training', color = 'red')\n #plt.ylim([np.min(errors_R2[3]*1.2), np.max(errors_R2[1]*1.2)])\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'tradeoff2R2' + method + save_fig + '.png')\n plt.show()",
"def plot_variables(self, n, show=False, diagnostics=False):\n\n if diagnostics:\n fig, ax = plt.subplots(5, 1, sharex = True, figsize = (10, 10))\n else:\n fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10))\n\n plt.subplots_adjust(hspace = 0)\n end = len(n.history[\"det F\"])\n epochs = np.arange(end)\n a, = ax[0].plot(epochs, n.history[\"det F\"], label = 'Training data')\n b, = ax[0].plot(epochs, n.history[\"det test F\"], label = 'Test data')\n # ax[0].axhline(y=5,ls='--',color='k')\n ax[0].legend(frameon = False)\n ax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n ax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det test F\"][-1])\n ax[1].plot(epochs, n.history[\"loss\"])\n ax[1].plot(epochs, n.history[\"test loss\"])\n # ax[1].set_xlabel('Number of epochs')\n ax[1].set_ylabel(r'$\\Lambda$')\n ax[1].set_xlim([0, len(epochs)]);\n \n if diagnostics:\n ax[2].plot(epochs, n.history[\"det C\"])\n ax[2].plot(epochs, n.history[\"det test C\"])\n # ax[2].set_xlabel('Number of epochs')\n ax[2].set_ylabel(r'$|{\\bf C}|$')\n ax[2].set_xlim([0, len(epochs)]);\n \n # Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n , color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n , color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n \"\"\"\n\n # Test Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n , color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Test Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n , color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n ax[3].legend(frameon=False)\n \"\"\"\n\n ax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta$')\n # ax[3].set_xlabel('Number of epochs')\n ax[3].set_xlim([0, len(epochs)])\n\n # Mean of network output summary 1\n ax[4].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n # Mean of test output network summary 1\n ax[4].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n ax[4].set_ylabel('μ')\n ax[4].set_xlabel('Number of epochs')\n ax[4].set_xlim([0, len(epochs)])\n \n\n print ('Maximum Fisher info on train data:',np.max(n.history[\"det F\"]))\n print ('Final Fisher info on train data:',(n.history[\"det F\"][-1]))\n \n print ('Maximum Fisher info on test data:',np.max(n.history[\"det test F\"]))\n print ('Final Fisher info on test data:',(n.history[\"det test F\"][-1]))\n\n if np.max(n.history[\"det test F\"]) == n.history[\"det test F\"][-1]:\n print ('Promising network found, possibly more epochs needed')\n\n plt.tight_layout()\n plt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n if show: plt.show()\n plt.close()",
"def test_exercise_2():\n dirname = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_pickle(f\"{dirname}/material/data-consumption-function.pkl\")\n\n def construct_predicted_values(income, alpha, beta, gamma):\n return alpha + beta * income ** gamma\n\n mock_rslt = [-91.1933, 0.5691, 1.0204]\n income = df[\"realgdp\"].values\n df[\"realcons_pred\"] = construct_predicted_values(income, *mock_rslt)\n\n x = df.index.get_level_values(\"Year\")\n fig, ax = plt.subplots()\n ax.plot(x, df[\"realcons_pred\"], label=\"Predicted\")\n ax.plot(x, df[\"realcons\"], label=\"Observed\")",
"def build_sample_ace_problem_breiman85(N=200):\n x3 = numpy.random.standard_normal(N)\n x = scipy.special.cbrt(x3)\n noise = numpy.random.standard_normal(N)\n y = numpy.exp((x ** 3.0) + noise)\n return [x], y",
"def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y",
"def plot_variance(self, ax):\n sigma = self.sigma\n S = self.S\n\n ax.plot(sigma/S**2, 'ko-', label='variance', lw=2.0)\n ax.set_yscale('log')\n ax.set_title(r'Variance $\\sigma^2/\\sigma_i^2$')\n ax.set_xlabel(r'$i$')\n ax.grid()"
] | [
"0.6384267",
"0.61533856",
"0.59631354",
"0.58542526",
"0.57613903",
"0.570605",
"0.57036245",
"0.5686213",
"0.56431866",
"0.56123537",
"0.55745536",
"0.5549876",
"0.5538616",
"0.55181545",
"0.5514749",
"0.55057865",
"0.5466694",
"0.5443141",
"0.5441746",
"0.5433322",
"0.54145753",
"0.5354365",
"0.5306916",
"0.5300054",
"0.5297128",
"0.52946544",
"0.5275056",
"0.5258131",
"0.5257122",
"0.5248868"
] | 0.66276705 | 0 |
Downloads the MD5 digests associated with the files in a resource. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server | def get_digests(cls, resource):
result = resource.xnat_session.get(resource.uri + '/files')
if result.status_code != 200:
raise NiAnalysisError(
"Could not download metadata for resource {}"
.format(resource.id))
return dict((r['Name'], r['digest'])
for r in result.json()['ResultSet']['Result']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_blob_digests(self):\n pass",
"def local_blob_digests(self):\n pass",
"def CalculateMd5OfEachFile(self, filedic):\n #for eachfiledic in self.fileTobeUploaded:\n fileobj = open(filedic[\"filepath\"], 'rb')\n buf = fileobj.read()\n hash = hashlib.md5()\n hash.update(buf)\n\n digest = hashlib.md5(buf).digest()\n md5enc = base64.b64encode(digest)\n md5tostr = md5enc.decode('utf-8')\n filedic[\"md5\"] = md5tostr\n fileobj.close()\n\n #pprint.pprint(self.fileTobeUploaded)",
"def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()",
"def blob_digests(self):\n pass",
"def blob_digests(self):\n pass",
"def compute_digest(path):\n hash = hashlib.sha512()\n for part in DiskCrawler.partial_reader(path, 4 * 1024 * 1024):\n hash.update(part)\n return hash.digest()",
"def downloadfile(self):\n req = requests.get(self.url, stream=True)\n mdsha256 = hashlib.sha256()\n with gzip.open(self.file_path, \"wb\") as gfile:\n for line in req.iter_lines():\n if line:\n gfile.write(line + b\"\\n\")\n mdsha256.update(line + b\"\\n\")\n\n with open(self.sha_file_name, \"wb\") as sfile:\n sfile.write(mdsha256.digest())\n\n sha256 = mdsha256.digest()\n if self.sha256 != sha256:\n self.sha256 = sha256\n print(\"File updated!\")\n else:\n print(\"File not updated!\")",
"def checksum(self):\n def stat_string(path):\n stat = os.stat(path)\n return '%s,%s' % (str(stat.st_size), str(stat.st_mtime))\n\n return dict((path, stat_string(path))\n for path in self.crawl()\n if os.path.exists(path))",
"def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)",
"def get_md5_hash(file_path: str) -> str:\n from hashlib import md5\n\n # local file\n if file_path.startswith('/'):\n return md5(open(file_path, 'rb').read()).hexdigest()\n\n # remote file\n httpresponse = url_is_alive(file_path)\n if not httpresponse:\n error_open_mess(file_path)\n return ''\n\n md5hash = md5()\n max_file_size = 100 * 1024 * 1024\n total_read = 0\n while True:\n data = httpresponse.read(4096)\n total_read += 4096\n\n if not data or total_read > max_file_size:\n break\n\n md5hash.update(data)\n\n httpresponse.close()\n return md5hash.hexdigest()",
"def digest(dirname, glob=None):\n md5 = hashlib.md5()\n if glob is None:\n fnames = [fname for _, fname in list_files(Path(dirname))]\n for fname in sorted(fnames):\n fname = os.path.join(dirname, fname)\n md5.update(open(fname, 'rb').read())\n else:\n fnames = Path(dirname).glob(glob)\n for fname in sorted(fnames):\n md5.update(fname.open('rb').read())\n return md5.hexdigest()",
"def hashes(cls, dir_path, filenames): \n block_size = 65536\n hasher = hashlib.md5()\n hashes = []\n for filename in filenames:\n try:\n with open(dir_path + '/' + filename, 'rb') as afile:\n buf = afile.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(block_size)\n hashes.append(hasher.hexdigest())\n except (IOError, Exception):\n hashes.append(None)\n return hashes",
"def hash_file(self, filename_or_bytestream):\n\n try:\n for data in self._read_blocks(filename_or_bytestream):\n self._update(data)\n except OSError as e:\n print('digest: ', filename_or_bytestream, ': ', e.strerror, sep='', file=sys.stderr)\n return None\n return self._hexdigests()",
"def get_snapshot(self, absolute_path=\"\", files_subset=None):\n abs_paths = self._get_paths(absolute_path, files_subset)\n return {filepath: md5sum(filepath) for filepath in abs_paths}",
"def md5sum(fileSrc):\n md5 = hashlib.md5()\n try:\n with open(fileSrc, \"rb\") as fd:\n while True:\n content = fd.read(2**20)\n if not content:\n break\n md5.update(content)\n except IOError:\n print(fileSrc + \" Not found\")\n exit(1)\n return md5.hexdigest()",
"def calculate_md5_of_dir(self, verbose=0):\n directory = self.cfg['sharing_path']\n if verbose:\n start = time.time()\n md5Hash = hashlib.md5()\n if not os.path.exists(directory):\n self.stop(1, 'Error during calculate md5! Impossible to find \"{}\" in user folder'.format(directory))\n\n for root, dirs, files in os.walk(directory, followlinks=False):\n for names in files:\n filepath = os.path.join(root, names)\n rel_path = self.relativize_path(filepath)\n if rel_path in self.client_snapshot:\n md5Hash.update(self.client_snapshot[rel_path][1])\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n hashed_file = self.hash_file(filepath)\n if hashed_file:\n md5Hash.update(hashed_file)\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n print \"can't hash file: \", filepath\n\n if verbose:\n stop = time.time()\n print stop - start\n return md5Hash.hexdigest()",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def apkdownloadmirror_get_md5_sum(soup, **_):\n return soup.find(text=re.compile(r'File APK Md5:')).next.text.strip()",
"def md5_sum_file(path):\n with open(path, 'rb') as f:\n m = hashlib.md5()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def get_contents_hash(self):\n md5 = hashlib.md5()\n with closing(self.open()) as handle:\n for chunk in handle.chunks():\n md5.update(chunk)\n return md5.hexdigest()",
"def set_file_md5(self):\n md5 = hashlib.md5()\n with open(self.file_path, 'rb') as f:\n while True:\n block = f.read(2 ** 10) # One-megabyte blocks\n if not block:\n break\n md5.update(block)\n self.md5 = md5.hexdigest()",
"def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()",
"def checksumFile(filename):\n return md5File(filename)",
"def _CalculateDigestHash(self, file_entry, data_stream_name):\n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n return\n\n try:\n file_object.seek(0, os.SEEK_SET)\n\n hasher_object = hashers_manager.HashersManager.GetHasher(u'sha256')\n\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hasher_object.Update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n\n finally:\n file_object.close()\n\n return hasher_object.GetStringDigest()",
"def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()",
"def fingerprint():\n files = (glob.glob(base_dir + '**/*.html') +\n glob.glob(base_dir + '*.html') +\n glob.glob(base_dir + 'core.js'))\n\n md5s = OrderedDict()\n\n for fil in sorted(files):\n name = fil[len(base_dir):]\n with open(fil) as fp:\n md5 = hashlib.md5(fp.read().encode('utf-8')).hexdigest()\n md5s[name] = md5\n\n template = \"\"\"\\\"\\\"\\\"DO NOT MODIFY. Auto-generated by script/fingerprint_frontend.\\\"\\\"\\\"\n\nFINGERPRINTS = {}\n\"\"\"\n\n result = template.format(json.dumps(md5s, indent=4))\n\n with open(fingerprint_file, 'w') as fp:\n fp.write(result)",
"def _get_local_md5(self, blocksize=2**20):\n m = hashlib.md5()\n with open(self.dst, \"rb\") as f:\n buf = f.read(blocksize)\n while buf:\n m.update(buf)\n buf = f.read(blocksize)\n return m.hexdigest()",
"def md5get(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.md5()\n for buf in iter(partial(f.read, 128), b''):\n d.update(buf)\n return d.hexdigest()",
"def md5_checksum(file_path):\n with open(file_path, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()"
] | [
"0.6428023",
"0.6428023",
"0.6357223",
"0.63461673",
"0.6337604",
"0.6337604",
"0.6319121",
"0.62722975",
"0.62688446",
"0.6263971",
"0.6251614",
"0.6246528",
"0.6199179",
"0.61964184",
"0.61926603",
"0.6190321",
"0.61285555",
"0.6124874",
"0.6120538",
"0.60825723",
"0.6071798",
"0.606961",
"0.6046028",
"0.6039274",
"0.6028127",
"0.6015804",
"0.60007495",
"0.5995717",
"0.5980831",
"0.5979509"
] | 0.66637766 | 0 |
This creates a derived session in a way that respects whether the acquired session has been shared into another project or not. If we weren't worried about this we could just use session = xnat_login.classes.MrSessionData(label=proc_session_id, parent=subject) | def _create_session(self, xnat_login, subject_id, visit_id):
uri = ('/data/archive/projects/{}/subjects/{}/experiments/{}'
.format(self.inputs.project_id, subject_id, visit_id))
query = {'xsiType': 'xnat:mrSessionData', 'label': visit_id,
'req_format': 'qa'}
response = xnat_login.put(uri, query=query)
if response.status_code not in (200, 201):
raise NiAnalysisError(
"Could not create session '{}' in subject '{}' in project '{}'"
" response code {}"
.format(visit_id, subject_id, self.inputs.project_id,
response))
return xnat_login.classes.MrSessionData(uri=uri,
xnat_session=xnat_login) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')",
"def create_session(self):\n # TODO refactor bids_import pipeline to use same functions as dcm2bids below. To be done in different PR though\n if self.verbose:\n print(\"Creating visit \" + self.visit_label\n + \" for CandID \" + self.cand_id)\n\n column_names = ('CandID', 'Visit_label', 'CenterID', 'Current_stage')\n values = (self.cand_id, self.visit_label, str(self.center_id), 'Not Started')\n\n if self.project_id:\n column_names = column_names + ('ProjectID',)\n values = values + (str(self.project_id),)\n\n if self.cohort_id:\n column_names = column_names + ('CohortID',)\n values = values + (str(self.cohort_id),)\n\n self.db.insert(\n table_name='session',\n column_names=column_names,\n values=values\n )\n\n loris_session_info = self.get_session_info_from_loris()\n\n return loris_session_info",
"def _createSessionObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n # Get the conference entity\n conf = _getEntityByWebsafeKey(request.websafeConferenceKey,\n 'Conference')\n # Ensure that the current user is the conference organizer\n if user_id != conf.organizerUserId:\n raise endpoints.UnauthorizedException(\n 'Only the conference organizer can create a new session')\n # Verify that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Ensure that the user submitted the required name property\n if not request.name:\n raise endpoints.BadRequestException(\n \"Session 'name' field required\")\n # Copy SessionForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n # Remove data that isn't destined for the Session entity\n del data['websafeConferenceKey']\n del data['websafeSpeakerKey']\n del data['websafeKey']\n # Add default values for those missing in the data model\n for df in SESSION_DEFAULTS:\n if data[df] in (None, []):\n data[df] = SESSION_DEFAULTS[df]\n # Ensure the string version of typeOfSession is what is stored\n # in the NDB model\n data['typeOfSession'] = str(data['typeOfSession'])\n # Convert date from string to Date object\n if data['date'] is not None:\n try:\n data['date'] = datetime.strptime(\n data['date'][:10], '%Y-%m-%d').date()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'date' value\")\n # Convert startTime from string to Time object\n if data['startTime'] is not None:\n try:\n data['startTime'] = datetime.strptime(\n data['startTime'], '%H:%M').time()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'startTime' value\")\n # Create Session\n session = Session(**data)\n session.conference = conf.key\n session.speaker = speaker.key\n session.put()\n # Add the session key to the speaker's sessions list\n speaker.sessions.append(session.key)\n speaker.put()\n # Add a task to task queue which checks if the speaker of this session\n # should be the new featured speaker\n taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey,\n 'websafeConferenceKey': request.websafeConferenceKey},\n url='/tasks/update_featured_speaker'\n )\n # Return SessionForm object\n return self._copySessionToForm(session)",
"def test_childSession(self):\n session = self.mdk.session()\n session.setProperty(\"other\", 123)\n session._context.tick()\n session._context.tick()\n session._context.tick()\n session.setTimeout(13.0)\n session2 = self.mdk.derive(session.externalize())\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)\n self.assertEqual(session2.getRemainingTime(), None)\n self.assertSessionHas(session2, session2._context.traceId, [1],\n other=123)",
"def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )",
"def create_new_session(sessions, segmeta):\n # Find an available session id\n new_sid = 0\n while new_sid in [s[0].meta.sessionid for s in sessions.values()]:\n new_sid += 1\n # Create meta and fill in information of the file\n meta = MetaInfo(segmeta.filename, segmeta.segmentid, new_sid)\n sp = snc_parameters(meta.segsize, 0.01, 16, 64, 1280, BAND_SNC, 1, 1, 0, -1)\n meta.set_snc_params(sp)\n # Fork a child process and build pipe between parent and child\n session = Session(meta)\n (fdp, fdc) = mp.Pipe()\n session.fdp = fdp\n session.fdc = fdc\n logging.info(\"New session created, ID: %d \" % (new_sid,))\n print(session.meta)\n # Fork a process to serve the clients of the session\n child = mp.Process(target=session.main)\n child.start()\n session.fdc.close() # Close parent's fdc\n sessions[(segmeta.filename, segmeta.segmentid)] = (session, child)\n return session",
"def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)",
"def request_session(self):\n if not hasattr(self, \"_request_session\"):\n rqsid = self.shared_vars.pop(\"rqsid\", \"\")\n rqses = self.request_session_manager.pop_request_session(rqsid)\n\n if not rqses:\n if self.is_action():\n del session['VDOM_API_SESSIONS']\n raise RequestSessionDoesntExist\n\n rqses = self.request_session_manager.create_request_session()\n\n else:\n uuid = rqses[\"rqsid_uuid\"]\n if not self.verify_request_session_key(rqsid, uuid):\n del session['VDOM_API_SESSIONS']\n raise RequestSessionInvalidKey\n\n self._request_session = rqses\n\n return self._request_session",
"def new_session_loaded(self):\n session = self.parent.session\n if session is None: return None\n #logger.debug(\"LOADING NEW SESSION\")\n self.figure.new_session(session)\n self.refresh_table()\n self.summarize_current_table()\n self.refresh_plots()\n self.update_fitting_options()\n return None",
"def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0, self.message_mgr)\n self.session = session\n return session",
"def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id, self.message_mgr)\n self.sessions[session.id] = session\n return session",
"def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()",
"def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0)\n self.session = session\n return session",
"def _start_session(self, msg, trigger):\n form = trigger.xform\n context = trigger.context\n language = context.get('_lang') or (msg.contact.language if msg.contact else None)\n select_text_mode = context.get('_select_text_mode')\n now = datetime.utcnow()\n \n # start session in touchforms\n config = XFormsConfig(form_path=form.file.path, \n language=language,\n session_data=context)\n try:\n session_id, responses = tfsms.start_session(config)\n except Exception:\n # this is super ghetto, but currently touchforms fails\n # hard if it can't find the language. So as a fallback just\n # try without specifying. If this failed for any other \n # reason it will just fail again.\n config.language = \"\"\n session_id, responses = tfsms.start_session(config)\n \n # save session in our data models\n session = XFormsSession(start_time=now, modified_time=now, \n session_id=session_id,\n connection=msg.connection, ended=False, \n trigger=trigger, select_text_mode=select_text_mode)\n session.save()\n router_factory.set(session_id, self.router)\n return session, responses",
"def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session",
"def _create_session_data(self, abs_path, sess_root):\n sess_path = os.path.join(abs_path, sess_root)\n if not os.path.exists(sess_path):\n os.makedirs(sess_path)\n sess_id = len(os.listdir(sess_path))\n sess_path = os.path.join(sess_path, str(sess_id))\n print(\"SESSION PATH:\", sess_path)\n print(\"SESSION ID:\", sess_id) \n return sess_id, sess_path",
"def init_session(self) -> Tuple[str, str]:\n study_id = self.storage.create_study(sample_study_spec())\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n return study_id, session.id",
"def new_session(self):\n return self.Session()",
"def _new_session(self, username_key=None, **attributes):\n for key in ['username', 'token', 'tenant_id']:\n if attributes.get(key, None) is None:\n attributes[key] = key + \"_\" + text_type(uuid4())\n if 'expires' not in attributes:\n attributes['expires'] = (\n datetime.utcfromtimestamp(self._clock.seconds())\n + timedelta(days=1)\n )\n session = Session(**attributes)\n if username_key is None:\n username_key = session.username\n self._username_to_token[username_key] = session.token\n self._token_to_session[session.token] = session\n self._tenant_to_token[session.tenant_id] = session.token\n return session",
"def session(self, request: HttpRequest) -> Job:\n job = Job.objects.create(\n project=self,\n creator=request.user if request.user.is_authenticated else None,\n method=JobMethod.session.name,\n params=dict(container_image=self.container_image),\n description=f\"Session for project '{self.name}'\",\n )\n job.add_user(request)\n return job",
"def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()",
"def select_session(base='/groups/svoboda/svobodalab/users/Aaron', session_class='Spine'):\n if session_class == 'Spine':\n from prep.SpineSession import SpineSession as Session\n elif session_class == 'Base':\n from prep.Session import Session as Session\n elif session_class == 'Vis':\n from prep.VisSession import VisSession as Session\n\n def list_dirs(path):\n return [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]\n\n def list_sessions(animal):\n dates.options = sorted([x[1] for x in dir_list if x[2] == animal])\n if dates.options is not None:\n dates.value = dates.options[0]\n list_runs(dates.options[0])\n\n def list_runs(date):\n animal = animals.value\n if animal is not None:\n dir1 = [x[0] for x in dir_list if x[1] == date and x[2] == animal]\n if dir1 is not None:\n dir1 = dir1[0]\n runs.options = sorted(list_dirs(os.path.join(base, dir1)))\n\n def list_files(run):\n animal = animals.value\n date = dates.value\n dir1 = [x[0] for x in dir_list if x[1] == date and x[2] == animal]\n if dir1 is not None:\n dir1 = dir1[0]\n s = os.path.join(base, dir1, '')\n files.options = sorted(map(lambda y: os.path.basename(y)[len(run):-2], glob.glob(s + run + '*.p')))\n files.options = ['None'] + list(files.options)\n\n dirs = list_dirs(base)\n r = re.compile(r\"([0-9]{6})_(.*)\")\n dir_list = [(m.group(0), m.group(1), m.group(2)) for l in dirs for m in [r.search(l)] if m]\n animal_list = sorted(list(set(map(lambda x: x[2], dir_list))))\n animals = widgets.Dropdown(options=animal_list)\n wA = interactive(list_sessions, animal=animals, button_style='danger')\n dates = widgets.Dropdown(options=[])\n wS = interactive(list_runs, date=dates)\n runs = widgets.Dropdown(options=[], )\n wR = interactive(list_files, run=runs)\n files = widgets.Dropdown(options=[], )\n wF = interactive(lambda my_file: my_file, my_file=files)\n load = widgets.Button(description='Load', button_style='danger')\n ipython_display(wA, wS, wR, wF, load)\n\n def on_button_clicked(b):\n animal = wA.kwargs['animal']\n date = wS.kwargs['date']\n run = wR.kwargs['run']\n file_name = wF.kwargs['my_file']\n b.session = Session(basePath=base, animalID=animal, date=date, run=run)\n if file_name is not None and not file_name == 'None':\n b.session = b.session.load(file_name)\n\n load.on_click(on_button_clicked)\n return load",
"def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id",
"def start_session(session, domain, contact, app, form, case_id=None, yield_responses=False):\n # NOTE: this call assumes that \"contact\" will expose three\n # properties: .raw_username, .get_id, and .get_language_code\n session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(\n COMMCONNECT_DEVICE_ID)\n\n kwargs = {}\n if is_commcarecase(contact):\n kwargs['restore_as_case_id'] = contact.case_id\n else:\n kwargs['restore_as'] = contact.raw_username\n\n if app and form:\n session_data.update(get_cloudcare_session_data(domain, form, contact))\n\n language = contact.get_language_code()\n config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),\n language=language,\n session_data=session_data,\n domain=domain,\n **kwargs)\n\n session_start_info = tfsms.start_session(config)\n session.session_id = session_start_info.session_id\n session.save()\n responses = session_start_info.first_responses\n\n if len(responses) > 0 and responses[0].status == 'http-error':\n session.mark_completed(False)\n raise TouchformsError('Cannot connect to touchforms.')\n\n # Prevent future update conflicts by getting the session again from the db\n # since the session could have been updated separately in the first_responses call\n session = SQLXFormsSession.objects.get(pk=session.pk)\n if yield_responses:\n return (session, responses)\n else:\n return (session, _responses_to_text(responses))",
"def session(self):\n ssn = pn_session(self._impl)\n if ssn is None:\n raise (SessionException(\"Session allocation failed.\"))\n else:\n return Session(ssn)",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session",
"def new_session(self):\n body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)\n self.sma_sid = jmespath.search('result.sid', body)\n if self.sma_sid:\n return True\n\n msg = 'Could not start session, %s, got {}'.format(body)\n\n if body.get('err'):\n if body.get('err') == 503:\n _LOGGER.error(\"Max amount of sesions reached\")\n else:\n _LOGGER.error(msg, body.get('err'))\n else:\n _LOGGER.error(msg, \"Session ID expected [result.sid]\")\n return False",
"def session(self):",
"def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())",
"def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)"
] | [
"0.6008737",
"0.59029317",
"0.5520083",
"0.55035335",
"0.5414367",
"0.536214",
"0.53301024",
"0.5318129",
"0.5237338",
"0.5228435",
"0.52107745",
"0.516103",
"0.5145125",
"0.51413447",
"0.5140846",
"0.5119847",
"0.51185715",
"0.5112657",
"0.5101649",
"0.50704604",
"0.50569475",
"0.50530136",
"0.50447285",
"0.5012363",
"0.49796885",
"0.4970847",
"0.49708095",
"0.49694005",
"0.49683565",
"0.49668452"
] | 0.6869065 | 0 |
Caches a single dataset (if the 'path' attribute is accessed and it has not been previously cached for example | def cache(self, dataset, prev_login=None):
if dataset.archive is not self:
raise NiAnalysisError(
"{} is not from {}".format(dataset, self))
assert dataset.uri is not None
with self.login(prev_login=prev_login) as xnat_login:
sess_id, scan_id = re.match(
r'/data/experiments/(\w+)/scans/(.*)',
dataset.uri).groups()
xsession = xnat_login.experiments[sess_id]
xdataset = xsession.scans[scan_id]
xresource = XnatSource.get_resource(xdataset, dataset)
cache_path = self.cache_path(dataset)
XnatSource.download_dataset(
tempfile.mkdtemp(), xresource, xdataset, dataset,
xsession.label, cache_path)
return cache_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_cached(cache_path, in_dir):\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n\n cache=Cache()\n dataset = cache.cache_data(cache_path=cache_path,\n fn=Dataset, in_dir=in_dir)\n\n return dataset",
"def _retrieveCachedData(self):",
"def test_cache_dataset():\n train = (\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5))\n\n t = TabularDataset(train, cache=True)\n\n assert len(t.train.cached_data) == 0\n for i, _ in enumerate(t.train):\n assert len(t.train.cached_data) == i + 1",
"def cache(ds, directory=None, batch_size=1, cache_key=None):\n if directory is None:\n logger.warning(\"Caching dataset in batches of size %d into memory.\", batch_size)\n cache_file = ''\n else:\n if cache_key is None:\n cache_key = str(int(time.time()))\n os.makedirs(directory, exist_ok=True)\n cache_file = os.path.join(directory, cache_key)\n if os.path.exists(cache_file + \".index\"):\n logger.info(\"Loading elements from existing cache in directory '%s' with key '%s'.\", directory, cache_key)\n else:\n logger.info(\"Caching dataset in batches of size %d to directory '%s' with key '%s'.\", batch_size, directory, cache_key)\n\n return (ds.batch(batch_size)\n .prefetch(TF_AUTOTUNE)\n .cache(cache_file)\n .prefetch(TF_AUTOTUNE)\n .unbatch())",
"def _cache(self, path):\n # We import these here because importing them is slow and\n # a significant fraction of numpy's total import time.\n import shutil\n from urllib.request import urlopen\n\n upath = self.abspath(path)\n\n # ensure directory exists\n if not os.path.exists(os.path.dirname(upath)):\n os.makedirs(os.path.dirname(upath))\n\n # TODO: Doesn't handle compressed files!\n if self._isurl(path):\n with urlopen(path) as openedurl:\n with _open(upath, 'wb') as f:\n shutil.copyfileobj(openedurl, f)\n else:\n shutil.copyfile(path, upath)\n return upath",
"def cache_data(name, data):\n cache_path = get_cachefile('%s.cache' % name)\n with open(cache_path, 'wb') as f:\n pickle.dump(data, f)",
"def cache_matrio_data(filename):\n prefix = \"https://data.matr.io/3/api/v1/file\"\n key = MATRIO_DATA_KEYS[filename]\n if not os.path.isfile(filename):\n cache_download(\"{}/{}/download\".format(prefix, key), filename)",
"def cache_dataset(dataset, prefix):\n if not os.path.exists(nmt._constants.CACHE_PATH):\n os.makedirs(nmt._constants.CACHE_PATH)\n src_data = np.concatenate([e[0] for e in dataset])\n tgt_data = np.concatenate([e[1] for e in dataset])\n src_cumlen = np.cumsum([0]+[len(e[0]) for e in dataset])\n tgt_cumlen = np.cumsum([0]+[len(e[1]) for e in dataset])\n np.savez(os.path.join(nmt._constants.CACHE_PATH, prefix + '.npz'),\n src_data=src_data, tgt_data=tgt_data,\n src_cumlen=src_cumlen, tgt_cumlen=tgt_cumlen)",
"def set_to_cache(self, url, data):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n MEM_CACHE[cache_key][cache_lookup] = (data, time.time())",
"def cache_path(self):",
"def cache_path(self):",
"def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def getData(self, local_cache):",
"def get_data(self, path):\n\n if path == self.original_path:\n cache = self._2to3_cache_path(path)\n data = self._load_cached_2to3(path, cache)\n if data is None:\n output, encoding = self._refactor_2to3(path)\n data = bytearray(output, encoding or sys.getdefaultencoding())\n self.set_data(cache, data)\n return data\n\n else:\n return super().get_data(path)",
"def cache_item(self, index: int, data):\n # Unfortunately, we can't put tensors directly in mongo so we must\n # pickle them...\n data_to_store = io.BytesIO()\n torch.save(data, data_to_store)\n data_to_store.seek(0)\n\n self.mongo_database.cache.insert_one(\n {\n \"session_id\": self.session_id,\n \"sample_identifier\": index,\n \"sample\": data_to_store.read(),\n }\n )",
"def get(self, path):\n\t\treturn self.cache.get(path)",
"def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path",
"def __cached(self):\n # already cached stuff\n if self._cached is None:\n self._cached = Cached(self.resource)\n return self._cached",
"def save_cache(self, data, URL):\n\n cache_file = self.get_cache_file_path(URL)\n\n if cache_file.parent.exists():\n with open(cache_file, 'wb') as f:\n f.write(data)\n else:\n os.mkdir(cache_file.parent)\n with open(cache_file, 'wb') as f:\n f.write(data)",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def use_cached_files(self, cache_key):\r\n pass",
"def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator",
"def get_cached_dataset_entry(dataset_id, # type: str\n format='csv', # type: str\n platform_id='public', # type: str\n base_url=None, # type: str\n cache_root=None # type: Union[str, Path]\n ):\n # type: (...) -> CacheEntry\n client = ODSClient(platform_id=platform_id, base_url=base_url)\n return client.get_cached_dataset_entry(dataset_id=dataset_id, format=format, cache_root=cache_root)",
"def cache(self):\n\n if self.url and not self.photo:\n result = urllib.urlretrieve(self.url)\n self.photo.save(\n os.path.basename(self.url),\n File(open(result[0]))\n )\n self.save()",
"def get_cache_path(self):",
"def get_cache_path(self):",
"def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data",
"def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result"
] | [
"0.7209974",
"0.6803537",
"0.674969",
"0.66327983",
"0.65216607",
"0.6515712",
"0.65049577",
"0.6504847",
"0.649996",
"0.6457507",
"0.6457507",
"0.6327937",
"0.63223124",
"0.6298402",
"0.626675",
"0.62393725",
"0.62176406",
"0.62156224",
"0.62143654",
"0.61547595",
"0.6144451",
"0.6137971",
"0.6136507",
"0.61280435",
"0.61071056",
"0.61019963",
"0.6096949",
"0.6096949",
"0.603",
"0.6008549"
] | 0.73272073 | 0 |
Return the tree of subject and sessions information within a project in the XNAT archive | def get_tree(self, subject_ids=None, visit_ids=None):
# Convert subject ids to strings if they are integers
if subject_ids is not None:
subject_ids = [
('{}_{:03d}'.format(self.project_id, s)
if isinstance(s, int) else s) for s in subject_ids]
# Add derived visit IDs to list of visit ids to filter
if visit_ids is not None:
visit_ids = visit_ids + [i + self.PROCESSED_SUFFIX
for i in visit_ids]
subjects = []
sessions = defaultdict(list)
with self.login() as xnat_login:
xproject = xnat_login.projects[self.project_id]
visit_sessions = defaultdict(list)
# Create list of subjects
for xsubject in xproject.subjects.itervalues():
# This assumes that the subject ID is prepended with
# the project ID
subj_id = xsubject.label[(len(self.project_id) + 1):]
if subj_id == XnatArchive.SUMMARY_NAME:
continue
if not (subject_ids is None or subj_id in subject_ids):
continue
logger.debug("Getting info for subject '{}'"
.format(subj_id))
sessions = {}
proc_sessions = []
# Get per_session datasets
for xsession in xsubject.experiments.itervalues():
visit_id = '_'.join(xsession.label.split('_')[2:])
if visit_id == XnatArchive.SUMMARY_NAME:
continue
if not (visit_ids is None or visit_id in visit_ids):
continue
derived = xsession.label.endswith(
self.PROCESSED_SUFFIX)
session = Session(subj_id, visit_id,
datasets=self._get_datasets(
xsession, 'per_session',
subject_id=subj_id,
visit_id=visit_id,
derived=derived),
fields=self._get_fields(
xsession, 'per_session',
subject_id=subj_id,
visit_id=visit_id,
derived=derived),
derived=None)
if derived:
proc_sessions.append(session)
else:
sessions[visit_id] = session
visit_sessions[visit_id].append(session)
for proc_session in proc_sessions:
visit_id = proc_session.visit_id[:-len(
self.PROCESSED_SUFFIX)]
try:
sessions[visit_id].derived = proc_session
except KeyError:
raise NiAnalysisError(
"No matching acquired session for derived "
"session '{}_{}_{}'".format(
self.project_id,
proc_session.subject_id,
proc_session.visit_id))
# Get per_subject datasets
subj_summary_name = self.get_labels(
'per_subject', self.project_id, subj_id)[1]
try:
xsubj_summary = xsubject.experiments[
subj_summary_name]
except KeyError:
subj_datasets = []
subj_fields = []
else:
subj_datasets = self._get_datasets(
xsubj_summary, 'per_subject',
subject_id=subj_id)
subj_fields = self._get_fields(
xsubj_summary, 'per_subject',
subject_id=subj_id)
subjects.append(Subject(subj_id,
sorted(sessions.values()),
datasets=subj_datasets,
fields=subj_fields))
# Create list of visits
visits = []
for visit_id, v_sessions in visit_sessions.iteritems():
(_, visit_summary_sess_name) = self.get_labels(
'per_visit', self.project_id, visit_id=visit_id)
# Get 'per_visit' datasets
try:
xvisit_summary = xproject.experiments[
visit_summary_sess_name]
except KeyError:
visit_datasets = []
visit_fields = {}
else:
visit_datasets = self._get_datasets(
xvisit_summary, 'per_visit', visit_id=visit_id)
visit_fields = self._get_fields(
xvisit_summary, 'per_visit', visit_id=visit_id)
visits.append(Visit(visit_id, sorted(v_sessions),
datasets=visit_datasets,
fields=visit_fields))
# Get 'per_project' datasets
(proj_summary_subj_name,
proj_summary_sess_name) = self.get_labels('per_project',
self.project_id)
try:
xproj_summary = xproject.subjects[
proj_summary_subj_name].experiments[proj_summary_sess_name]
except KeyError:
proj_datasets = []
proj_fields = []
else:
proj_datasets = self._get_datasets(xproj_summary,
'per_project')
proj_fields = self._get_fields(xproj_summary,
'per_project')
if not subjects:
raise NiAnalysisError(
"Did not find any subjects matching the IDs '{}' in "
"project '{}' (found '{}')"
.format(
("', '".join(subject_ids)
if subject_ids is not None else ''),
self.project_id,
"', '".join(
s.label for s in xproject.subjects.values())))
if not sessions:
raise NiAnalysisError(
"Did not find any sessions matching the IDs '{}'"
"(in subjects '{}') for project '{}'"
.format(
("', '".join(visit_ids)
if visit_ids is not None else ''),
"', '".join(
s.label for s in xproject.experiments.values()),
self.project_id))
return Project(sorted(subjects), sorted(visits),
datasets=proj_datasets, fields=proj_fields) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_subject_info_tree(subject_info_pyxb, authn_subj, include_duplicates=False):\n\n class State:\n pass\n\n state = State()\n\n state.subject_info_pyxb = subject_info_pyxb\n state.include_duplicates = include_duplicates\n state.visited_set = set()\n state.tree = SubjectInfoNode(\"Root\", TYPE_NODE_TAG)\n\n _add_subject(state, state.tree, authn_subj)\n symbolic_node = state.tree.add_child(\"Symbolic\", TYPE_NODE_TAG)\n _add_subject(state, symbolic_node, d1_common.const.SUBJECT_AUTHENTICATED)\n _trim_tree(state)\n\n return state.tree",
"def index(self):\n return {'projects': [p for p in self.server.projects.values()]}",
"def _get_project_tina_entries(self,pool='archive',refresh=False,path_folder=None):\n\t\tif not path_folder: path_folder = self.catalog_path\n\t\tif not refresh:\n\t\t\ttry:\n\t\t\t\treturn self.tina_archive_entries\n\t\t\texcept: pass \n\t\tself.tina_archive_entries = Tina.tina_find(\n\t\t\tpath_folder=path_folder,\n\t\t\tapplication=self.application,\n\t\t\tstrat='A',\n\t\t\tskip_filter=self.skip_filter)\n\t\treturn self.tina_archive_entries",
"def query_subject_ids(self, subject_id, nodes=None):\n if nodes == None:\n nodes = self.list_nodes()\n elif isinstance(nodes, str):\n nodes = [nodes]\n\n if \"case\" in nodes:\n subject_node, subject_prop = \"case\", \"case_ids\"\n else:\n subject_node, subject_prop = \"subject\", \"subject_ids\"\n\n # if projects == None: #if no projects specified, get node for all projects\n # projects = list(json_normalize(self.sub.query(\"\"\"{project (first:0){project_id}}\"\"\")['data']['project'])['project_id'])\n # elif isinstance(projects, str):\n # projects = [projects]\n\n query_args = '{}:\"{}\"'.format(subject_prop, subject_id)\n results = {}\n for node in nodes:\n res = self.paginate_query(\n node=node, props=[\"project_id\", \"id\", \"submitter_id\"], args=query_args\n )\n if len(res[\"data\"][node]) > 0:\n results[node] = res[\"data\"][node]\n\n data = {}\n for node in list(results):\n # uuids = [rec['id'] for rec in results[node]]\n dfs = []\n for rec in results[node]:\n project_id = rec[\"project_id\"]\n uuid = rec[\"id\"]\n program, project = project_id.split(\"-\", 1)\n rec = self.sub.export_record(\n program=program,\n project=project,\n uuid=uuid,\n fileformat=\"tsv\",\n filename=None,\n )\n # str_list = rec.split('\\r\\n')\n # headers = str_list[0].split('\\t')\n # data = str_list[1].split('\\t')\n # df = pd.DataFrame(data,columns=headers)\n dfs.append(pd.read_csv(StringIO(rec), sep=\"\\t\", header=0))\n df = pd.concat(dfs, ignore_index=True, sort=False)\n data[node] = df\n\n return data\n\n # visits = list(set([item for sublist in [list(set(list(df['visit_id']))) for df in data.values()] for item in sublist if not pd.isnull(item)]))",
"def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li",
"def queryFlywheel(project):\n\n # Create info dict with entries for each subject.\n info = dict()\n\n # Loop through subjects in project\n #for sub in subjects:\n for sub in project.subjects():\n\n # Loop through sessions in subject\n for ses in sub.sessions():\n ses = ses.reload()\n\n # Loop through acquisitions in session\n for acq in ses.acquisitions():\n acq = acq.reload()\n\n # Loop through files in acquisition\n for f in acq.files:\n \n # Skip over non-nifti files\n if f.type != 'nifti':\n next\n\n # Get Flywheel fileId to use as unique identifier\n fileId = f.id\n\n # Try to get timestamp (sometimes DateTime field isn't present.) \n try:\n timestamp = f.info['AcquisitionDateTime']\n except KeyError:\n try:\n timestamp = f.info['AcquisitionDate']\n # Set to None if field isn't present\n except:\n timestamp = pd.NaT\n \n # Try to get series number (sometimes field isn't present.) \n try:\n seriesNum = f.info['SeriesNumber']\n # Set to None if field isn't present\n except:\n np.NaN \n # Add the folowing metadata to study info dict:\n # fileID: [subId, sesId, acqLabel, fileName, seriesNum, timestamp]\n info[fileId] = [sub.label, ses.label, acq.label, f.name, seriesNum, timestamp]\n \n # Return project info dict\n return info",
"def project_node():",
"def projectInfo(userFolder):\n outstring = '<h2>{}</h2>'.format(projectname)\n\n if userFolder==\"user_admin\":\n outstring += '<h3>Users:</h3><p class=\"data\">'\n usernames = re.split(\",\", userstate.list())\n for uname in usernames:\n outstring += ' - {}<br />'.format(uname)\n outstring += '</p>'\n outstring += '<p><a href=\"javascript:getPath(\\'N\\')\">Create New User</a></p>'\n return outstring\n\n outstring += '<ul class=\"topmenu\"><li class=\"topitem\"><a href=\"javascript:getPath(\\'C\\')\">Create new worktable</a></li>'\n outstring += '<li class=\"topitem\"><a href=\"javascript:getPath(\\'S\\')\">Create new service</a></li>'\n outstring += '<li class=\"topitem\"><a href=\"javascript:getPath(\\'U\\')\">Browse user files</a></li>'\n outstring += '<li class=\"topitem\"><a href=\"javascript:getPath(\\'m\\')\">Merge tables</a></li></ul>'\n wtmap, parents, children = getnetwork(glob.glob(os.path.join(targetFolder, userFolder, \"*.wtx\")))\n for servfile in glob.glob(os.path.join(targetFolder, userFolder, \"*.service\")):\n servfilename = os.path.split(servfile)[1]\n if \".action\" in servfilename:\n parents[servfilename] = []\n with open(servfile, \"r\") as s:\n stype = s.readline().strip()\n starget = s.readline().strip()\n children[servfilename] = starget\n for i, column in enumerate(wtmap):\n if starget in column:\n coln = i\n if coln==0:\n wtmap = [[servfilename]] + wtmap\n else:\n wtmap[coln-1].append(servfilename)\n\n else:\n children[servfilename] = []\n with open(servfile, \"r\") as s:\n ssource = s.readline().strip()\n if ssource in children:\n parents[servfilename] = [ssource]\n children[ssource].append(servfilename)\n for i,column in enumerate(wtmap):\n if ssource in column:\n if i==len(wtmap)-1:\n wtmap.append([])\n wtmap[i+1].append(servfilename)\n\n fixwidth = len(wtmap)*130 + (len(wtmap)-1)*125\n outstring += '<p><table class=\"wttab\"><tbody style=\"display: table;\">'\n\n nrows = 0\n irn = np.random.randint(9999)\n for n in range(len(wtmap)):\n nrows = max(len(wtmap[n]), nrows)\n for r in range(nrows):\n cells = \"\"\n for c in range(len(wtmap)):\n if len(wtmap[c])>r:\n if c>0:\n for i in range(len(wtmap[c-1])):\n if wtmap[c-1][i] in parents[wtmap[c][r]]:\n break\n arrowfile_list = glob.glob(os.path.join(targetFolder, userFolder,\"log/linkarrow_{}_{}*.svg\".format(c,i)))\n if not arrowfile_list:\n arrowfile = os.path.join(userFolder,\"log/linkarrow_{}_{}_{}.svg\".format(c,i,irn)) \n svgbar(arrowfile, i, arrow=True)\n else:\n arrowfile = os.path.join(userFolder, \"log\", os.path.split(arrowfile_list[0])[1])\n linkbar = '<img class=\"linkbar\" width=\"25\" src=\"/file/{}\" />'.format(arrowfile)\n cells += '<td class=\"spacecell\">{}</td>'.format(linkbar)\n wtfile = wtmap[c][r]\n wtpath = os.path.join(targetFolder, userFolder, wtfile)\n cells += '<td class=\"wtcell\">'\n command = 't' if '.wtx' in wtpath else 's'\n cells += '<a href=\"javascript:getPath(\\'{}{}\\')\">'.format(command, wtpath)\n if \".service\" in wtfile:\n icon = \"service\"\n wtfile = wtfile[:-8]\n if \".action\" in wtfile:\n wtfile = wtfile[:-7]\n icon = \"clock\"\n else:\n icon = \"worktable\"\n wtfile = wtfile[:-4]\n cells += '<img class=\"wtimage\" width=\"50px\" height=\"50px\" src=\"static/{}.svg\" />'.format(icon)\n cells += '<p class=\"wttext\">{}</p>'.format(wtfile)\n cells += '</a></td>' \n else:\n if c>0:\n cells += '<td class=\"spacecell\"> </td>'\n cells += '<td class=\"wtcell\"> </td>'\n if c<(len(wtmap)-1):\n barfile_list = glob.glob(os.path.join(userFolder,\"log/linkbar_{}_{}*.svg\".format(c,r)))\n if not barfile_list:\n barfile = os.path.join(userFolder,\"log/linkbar_{}_{}_{}.svg\".format(c,r,irn))\n svgbar(barfile, r)\n else:\n barfile = barfile_list[0]\n linkbar = '<img class=\"lineel\" src=\"/file/{}\" />'.format(barfile)\n if len(wtmap[c])>r:\n if len(children[wtmap[c][r]])==0:\n linkbar = ' '\n cells += '<td class=\"spacecell\">{}</td>'.format(linkbar if len(wtmap[c])>r else '')\n svgfilename_list = glob.glob(os.path.join(userFolder,\"log/line_{}_{}*.svg\".format(c,r)))\n if not svgfilename_list: \n svgfilename = os.path.join(userFolder,\"log/line_{}_{}_{}.svg\".format(c,r,irn))\n lmap = []\n for parent in wtmap[c]:\n lmaplist = []\n index = 0\n for child in wtmap[c+1]:\n if child in children[parent]:\n lmaplist.append(index) \n index += 1\n lmap.append(lmaplist)\n svgline(svgfilename, r, lmap)\n else:\n svgfilename = svgfilename_list[0]\n linkline = '<img class=\"lineel\" src=\"/file/{}\" />'.format(svgfilename)\n cells += '<td class=\"linecell\">{}</td>'.format(linkline)\n outstring += '<tr>'+cells+'</tr>' #outstring += '<tr style=\"width: {}px !important;\">'.format(fixwidth)+cells+'</tr>'\n #print(cells+\"\\n\\n\")\n outstring += '</tbody></table></p>'\n return outstring",
"def print_starlingx_projects(G):\n count = 0\n for n, attrdict in G.node.items():\n if attrdict['ntype'] == 'stx_patched':\n count = count + 1\n print(n, attrdict)\n print('nodes with stx_patched attribute: {}'.format(count))",
"def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}",
"def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects",
"def project_node_list(project):\n q = client.project.nodes_in(project)\n sys.stdout.write('Nodes allocated to %s: ' % project + \" \".join(q) + '\\n')",
"def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names",
"def get_project_templates(session=konfuzio_session()) -> List[dict]:\n url = get_project_url()\n r = session.get(url=url)\n r.raise_for_status()\n sorted_templates = sorted(r.json()['section_labels'], key=itemgetter('id'))\n return sorted_templates",
"def tree(self):\n keys = [\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'channel',\n 'Speed',\n 'Privacy',\n 'Cipher',\n 'Authentication',\n 'Power',\n 'beacons',\n 'IV',\n 'LANIP',\n 'IDlength',\n 'ESSID',\n 'Key']\n\n c_keys = [\n 'Station MAC',\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'Power',\n 'Packets',\n 'BSSID',\n 'ProbedESSIDs'\n ]\n\n self.update_results()\n aps = {}\n for ap_ in self._aps:\n bssid = ap_.pop(0)\n aps[bssid] = dict(zip(keys, ap_))\n aps[bssid]['clients'] = []\n\n for client in self.clients:\n if client[0] == bssid:\n aps[bssid]['clients'].append(dict(zip(c_keys, client)))\n return aps",
"def get_projects_xml(indir=\"../projects\"):\n return to_xml(get_projects(indir))",
"def _get_results(tree, year):\n projects = []\n \n for t in tree.findAll('li', {'class': 'mdl-list__item mdl-list__item--one-line'}): \n org = _clean(t.findChildren('a')[0].text)\n a = t.findChildren('a')[0]['href']\n\n org_url = 'https://www.google-melange.com' + a\n org_tree = _get_tree(org_url)\n\n for t1 in org_tree.findAll('span', {'class': 'mdl-list__item-primary-content'}):\n a1 = t1.findChildren('a')\n projs = [a['href'] for a in a1]\n\n for p in projs:\n proj_url = 'https://www.google-melange.com' + p\n proj_tree = _get_tree(proj_url)\n \n title = _clean(proj_tree.findAll('h3')[0].text)\n p = proj_tree.findAll('p')\n bio = _clean(p[0].text)\n \n student = bio.split('by')[-1].split('for')[0]\n description = _clean(p[1].text)\n projects.append((title, org, student, description))\n\n _save_results(projects, year)",
"def extract_subjects(subject_info_xml, primary_str):\n subject_info_pyxb = deserialize_subject_info(subject_info_xml)\n subject_info_tree = d1_common.cert.subject_info.gen_subject_info_tree(\n subject_info_pyxb, primary_str\n )\n return subject_info_tree.get_subject_set()",
"def _get_project_names(self):\n for folder in os.listdir(self.root):\n if folder[0].isdigit():\n try:\n self.new_source_paths[folder]\n pass\n except:\n self.new_source_paths[folder] = {}\n return self.new_source_paths",
"def infotoids(seqsinfo, outdir):\n allids = [x.patient_id for x in seqsinfo]\n # TODO: check all patient_ids are the same\n s = allids[0]\n\n return({'subject': \"sub-\" + IDLOOKUP.get(s, 'UNKNOWN'),\n 'locator': None, 'session': None})",
"def get_project_info(self):\n return self.project_info",
"def details(thread, path):\n parts = [int(x) for x in path.split('.')]\n node = DATA.threads[thread].tree[parts[0]]\n for part in parts[1:]:\n node = node.children[int(part)]\n result = formatNode(node)\n result['children'] = [formatNode(child) for child in node.children]\n return json.dumps(result)",
"def get_org_projects_info(org_link):\n response = get_response(org_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n projects_li = soup.find_all(\n 'li', attrs={'layout': True}\n )\n project_info = []\n for proj_html in projects_li:\n proj_info = {}\n proj_title = proj_html.get('aria-label').replace('\\n', '')\n proj_desc = proj_html.find(\n 'div', attrs={'class': 'archive-project-card__content'}).text.replace('\\t', '')\n proj_relative_link = proj_html.select('a')[0].get('href')\n proj_full_link = HOME_PAGE + proj_relative_link\n proj_info['title'] = proj_title\n proj_info['description'] = proj_desc\n proj_info['link'] = proj_full_link\n project_info.append(proj_info)\n return project_info",
"def projects(self):\r\n return p.Projects(self)",
"def subjects_parser(root, study, outdir):\n # Welcome\n print(\"Starting subjects parsing...\")\n\n # Parameters\n subjects = defaultdict(\n lambda: defaultdict(dict))\n participants_file = os.path.join(root, \"participants.tsv\")\n\n # Parse the paticipants tsv file\n with open(participants_file) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\"\\t\")\n for row in reader:\n subject = row[\"participant_id\"].replace(\"sub-\", \"\")\n center = row.get(\"site\", DEFAULT_CENTER)\n subjects[center][subject] = {\n \"identifier\": md5_sum(subject),\n \"code_in_study\": subject,\n \"handedness\": row.get(\"handedness\", \"unknown\"),\n \"gender\": row.get(\"gender\", \"unknown\")}\n\n # Save the results\n print(\"Saving data in '{0}'...\".format(outdir))\n save_parsing(subjects, outdir, study, \"subjects\")\n\n # Goodbye\n print(\"Done.\")\n\n return subjects",
"def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList",
"def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]",
"def get_project_info():\n\n title = request.args.get('project')\n\n project_info_list = hackbright.get_project_by_title(title)\n\n html = render_template(\"project_info.html\",\n project_info_list=project_info_list)\n return html",
"def search_key_for_project(project):\n elements = []\n elements.append(project['name'])\n elements.append(project['client'])\n elements.append(project['project_state'])\n elements.append(str(project['project_code']))\n return u' '.join(elements)",
"def get_project(self, project):\n return Dict(self.projects.get_entry(pk=project, _fields=[\"_all\"]).result())"
] | [
"0.6218537",
"0.5201899",
"0.51904243",
"0.5146706",
"0.51441693",
"0.51321507",
"0.5081233",
"0.50617266",
"0.5056689",
"0.5044258",
"0.49721885",
"0.4900442",
"0.48878756",
"0.48863026",
"0.48748198",
"0.4855289",
"0.48542503",
"0.48475945",
"0.4798768",
"0.47958493",
"0.47921586",
"0.47912034",
"0.47663957",
"0.4759937",
"0.47570875",
"0.47183412",
"0.47173098",
"0.47169167",
"0.47150457",
"0.46901146"
] | 0.6834698 | 0 |
Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. | def get_labels(cls, frequency, project_id, subject_id=None,
visit_id=None):
if frequency == 'per_session':
assert visit_id is not None
assert subject_id is not None
subj_label = '{}_{}'.format(project_id, subject_id)
sess_label = '{}_{}_{}'.format(project_id, subject_id,
visit_id)
elif frequency == 'per_subject':
assert subject_id is not None
subj_label = '{}_{}'.format(project_id, subject_id)
sess_label = '{}_{}_{}'.format(project_id, subject_id,
cls.SUMMARY_NAME)
elif frequency == 'per_visit':
assert visit_id is not None
subj_label = '{}_{}'.format(project_id, cls.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(project_id, cls.SUMMARY_NAME,
visit_id)
elif frequency == 'per_project':
subj_label = '{}_{}'.format(project_id, cls.SUMMARY_NAME)
sess_label = '{}_{}_{}'.format(project_id, cls.SUMMARY_NAME,
cls.SUMMARY_NAME)
else:
raise NiAnalysisError(
"Unrecognised frequency '{}'".format(frequency))
return (subj_label, sess_label) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nodes(self, ids):\n return [self.node_labels[i] for i in ids]",
"def get_labels():\n\n logging.info(\"Getting metadata about labels\")\n\n labels = []\n\n if len(args.labels) == 0:\n logging.warning(\"No labels specified, assuming all labels. If you have a lot of labels in your inbox you could hit API limits quickly.\")\n results = GMAIL_CLIENT.users().labels().list(userId='me').execute()\n\n labels = results.get('labels', [])\n else:\n logging.info('Using labels: %s ', args.labels)\n\n for label in args.labels:\n labels.append({'id': label})\n\n if not labels:\n logging.info('No labels found.')\n sys.exit()\n\n return labels",
"def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]",
"def evaluate(predictions, ids, label_identifiers):\n\n labels = []\n #For every prediction\n for i in range(len(predictions)):\n sentence_predictions = predictions[i]\n id_sequence = ids[i]\n sequence_labels = []\n counter = 0\n #For every predicted token\n for j in range(len(id_sequence)):\n word_prediction = sentence_predictions[j]\n id = id_sequence[j]\n #Take only the lemmas that have to be disambiguated\n if not id == '0':\n #Extract the identifiers of the sensekeys associated to the lemma\n indexes = label_identifiers[i][counter]\n new_predictions = []\n #Check if the identifier is a number \n for elem in indexes:\n try:\n index = int(elem)\n new_predictions.append(predictions[i][j][index])\n except ValueError:\n #If is not, MFS was applied\n new_predictions.append(elem)\n #Do the argmax on the extracted prediction indexes\n argmax = np.argmax(new_predictions)\n label = label_identifiers[i][counter][argmax]\n sequence_labels.append(label)\n counter += 1\n labels.append(sequence_labels)\n\n return labels",
"def get_xreaded_label(xbibs):\n\t\n\t# our array length\n\tnxb = len(xbibs)\n\t\n\t# initialisation IDs et NOs\n\txml_ids_map = [None for j in range(nxb)]\n\txml_no_strs_map = [None for j in range(nxb)]\n\txml_no_ints_map = [None for j in range(nxb)]\n\n\t# remplissage selon xpaths @id et @n\n\tfor j, xbib in enumerate(xbibs):\n\t\t\n\t\t# il devrait toujours y avoir un ID, mais en réalité parfois absent\n\t\txbib_id_nodes = xbib.xpath(\"@xml:id\") ;\n\t\t\n\t\t# si présent, l'attribut \"@n\" reprend le label (devrait toujours être numérique ?)\n\t\txbib_no_nodes = xbib.xpath(\"@n\") ;\n\t\t\n\t\tfound_id = (len(xbib_id_nodes) == 1)\n\t\tfound_no = (len(xbib_no_nodes) == 1 \n\t\t\t\t\t and re.match(r'^[\\[\\]\\.0-9 ]+$', str(xbib_no_nodes[0])))\n\t\t\n\t\t# récup id et numérotation\n\t\tif found_id and found_no:\n\t\t\t# lecture attributs XML\n\t\t\tthisbib_id = xbib_id_nodes.pop()\n\t\t\tthisbib_no_str = xbib_no_nodes.pop()\n\t\t\tthisbib_no_int = int(re.sub(\"[^0-9]+\",\"\", thisbib_no_str))\n\t\t\n\t\t# récup id et astuce numérotation attendue en fin d'ID \n\t\telif found_id and not found_no:\n\t\t\tthisbib_id = xbib_id_nodes.pop()\n\t\t\t\n\t\t\t# on cherche le dernier nombre pour mettre dans xml_nos_map\n\t\t\t# par ex: 1,2 ou 3 dans DDP278C1 DDP278C2 DDP278C3\n\t\t\tpostfix_num_match = re.search(r\"[0-9]+$\", thisbib_id)\n\t\t\t\n\t\t\t# todo pour les cas comme \"1.\", \"2.\" \n\t\t\t# => re.search(r\"([0-9]+)[^0-9]*$\")\n\t\t\t# avec match.group(1)\n\t\t\t\n\t\t\tif postfix_num_match:\n\t\t\t\tthisbib_no_str = postfix_num_match.group(0)\n\t\t\t\tthisbib_no_int = int(postfix_num_match.group(0))\n\t\t\telse:\n\t\t\t\t# rien trouvé pour le no: on mettra None dans xml_nos_map\n\t\t\t\tthisbib_no_str = None\n\t\t\t\tthisbib_no_int = None\n\t\t\n\t\t# les 2 cas restants: trouvé no sans id OU trouvé aucun\n\t\telse:\n\t\t\tthisbib_id = None\n\t\t\tthisbib_no_str = None\n\t\t\tthisbib_no_int = None\n\t\t\n\t\t# stockage des readed_label/bibids trouvés\n\t\t# -----------------------------------\n\t\txml_ids_map[j] = thisbib_id\n\t\txml_no_strs_map[j] = thisbib_no_str\n\t\txml_no_ints_map[j] = thisbib_no_int\n\t\t\n\n\t# check consecutivity\n\t# (ce diagnostic pourrait aussi être fait dès la boucle) \n\tflag_std_map = True # temporaire\n\tfor j, no in enumerate(xml_no_ints_map):\n\t\tif (no is None) or (int(no) != j+1):\n\t\t\tflag_std_map = False\n\t\n\t\n\treturn (xml_ids_map, xml_no_strs_map, xml_no_ints_map, flag_std_map)",
"def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels",
"def labels(self):\n return self.label2cc.keys()",
"def extractLabelIdentifier(sentences, ids, lemmas_mapping, vocab_identifier, wordnetCompression):\n\n identifiers_list = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n id_sequence = ids[i]\n sentence_ids = []\n for j in range(len(sentence)):\n word = sentence[j]\n id = id_sequence[j]\n word_ids = []\n #Check if the current word was met during training\n if not id == '0':\n if word in lemmas_mapping:\n #If it is, ectract the sensekeys associated to the lemma\n sensekeys = lemmas_mapping[word]\n #Then search for all the sensekeys their identifier\n for sensekey in sensekeys:\n word_ids.append(vocab_identifier[sensekey])\n else:\n #Take the most frequent sense from wordnet\n mfs = str(wn.synsets(word)[0])[8:-2]\n #Retrieve the correspondent sensekey\n sensekey = wn.synset(mfs).lemmas()[0].key()\n if wordnetCompression:\n #Transform the senekey into a wordnet synset\n pos = wn.lemma_from_key(sensekey).synset().pos()\n offset = wn.lemma_from_key(sensekey).synset().offset()\n wn_synset = \"wn:\" + str(offset).zfill( 8) + pos\n word_ids.append(wn_synset)\n else:\n word_ids.append(sensekey)\n if word_ids:\n sentence_ids.append(word_ids)\n identifiers_list.append(sentence_ids)\n\n return identifiers_list",
"def get_labels(self):\n return [token.label for token in self.tokens]",
"def decode(self, labels):\n if len(labels.shape) == 1:\n return self.sp.DecodeIds([int(l) for l in labels])\n\n sentences = list()\n for batch in labels:\n sentence = str()\n for label in batch:\n sentence = self.sp.DecodeIds([int(l) for l in label])\n sentences.append(sentence)\n return sentences",
"def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]",
"def _extract_labels(self, samples: List):\n targets = [\n self.sp_model.encode(sample[2].lower().replace(\"<unk>\", \"<garbage>\").replace(\"\\n\", \"\"))\n for sample in samples\n ]\n targets = [\n [ele if ele != 4 else self.sp_model.unk_id() for ele in target] for target in targets\n ] # map id of <unk> token to unk_id\n lengths = torch.tensor([len(elem) for elem in targets]).to(dtype=torch.int32)\n targets = torch.nn.utils.rnn.pad_sequence(\n [torch.tensor(elem) for elem in targets],\n batch_first=True,\n padding_value=1.0,\n ).to(dtype=torch.int32)\n return targets, lengths",
"def get_labels():\n return if_found(dao.get_labels())",
"def labels(self):\n return self._get_labels(self.label_vector)",
"def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout",
"def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title",
"def get_class_label_names(class_idx_predictions, class_to_idx, class_to_label):\r\n # Flatten to 1D tensor and convert to ndarray\r\n class_idx_predictions = np.array(np.squeeze(class_idx_predictions))\r\n\r\n # Switch key to value and value to key\r\n idx_to_class = {idx: cls for cls, idx in class_to_idx.items()}\r\n\r\n # class_idx_predictions represents an class index, e.g. provided by model prediction\r\n # Get the label from the class that matches the index\r\n class_labels_predictions = [class_to_label.get(idx_to_class.get(idx, None), idx) for idx in class_idx_predictions]\r\n\r\n # Return list\r\n return class_labels_predictions",
"def lookup_label_id(service, labels):\n if not labels:\n return\n\n labelids = {} # label name => label id\n results = service.users().labels().list(userId='me').execute()\n mylabs = results.get('labels', [])\n for lab in mylabs:\n if len(labelids) == len(labels):\n break\n if lab['name'] in labels:\n labelids[lab['name']] = lab['id']\n return labelids",
"def labels(self) -> List[str]:\n\n return list(self.t0.keys())",
"def get_labels(self):\n return get_labels(self.api_key)",
"def gen_labels(self, nidxs=None, condense_labels=False):\n\n if nidxs is None:\n nidxs = self.nidx_train\n\n y = []\n\n for r in nidxs:\n y.append(self.node_labels[r])\n\n if condense_labels:\n # This should be improved, since this will fail if there are labels with exactly the same number of samples\n # Current solution use a bit of noise to minimize conflicts/favors\n y = self.encode_labels(y)\n lab_weights = 1. - np.mean(y, axis=0)\n noise = np.random.normal(loc=0, scale=0.0001, size=np.shape(y))\n y_condensed = np.argmax(minmax_scale(y * lab_weights + noise, axis=1), axis=1)\n return y_condensed\n\n return self.encode_labels(y)",
"def make_labels_common(self, results):\n labels = []\n keys = self.sort_keys( results )\n for label in keys:\n labels.append( str(label) )\n labels.reverse()\n return labels",
"def get_tpx_labels():\n\t\n\tlabels_abs = get_tpx_labels_abs()\n\tlabels_rel = get_tpx_labels_rel()\n\tlabels_prop = get_tpx_labels_prop()\n\tlabels_special = get_tpx_labels_special()\n\tlabels = copy.copy(labels_abs) + copy.copy(labels_rel) + copy.copy(labels_prop) + copy.copy(labels_special)\n\t\n\treturn labels",
"def get_data(ids):\n\n sequences = FileManager.read_fasta(FileSetter.fasta_file())\n max_length = ProteinInformation.determine_max_length(sequences, ids)\n labels = ProteinInformation.get_labels(ids, sequences)\n\n return sequences, max_length, labels",
"def get_labels():\n json_request = request.json # get the json from the server\n keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids)\n labels = []\n for k in keys:\n # get the labels that the user input to the UI\n val = (json_request[k]['text'], json_request[k]['value'])\n labels.append(val)\n return labels",
"def get_labels(self):\n raise NotImplementedError",
"def get_video_labels(RekognitionJobID):\n\n ImageClient = boto3.client('rekognition')\n\n AnalysisResult = ImageClient.get_label_detection(JobId = RekognitionJobID)\n Labels = AnalysisResult['Labels']\n\t\n while 'NextToken' in AnalysisResult.keys(): # If there are more results to get there will be a NextToken parameter\n next_token = AnalysisResult['NextToken']\n AnalysisResult = ImageClient.get_label_detection(JobId = RekognitionJobID, NextToken = next_token)\n Labels.extend(AnalysisResult['Labels']) # extend() extends the list, append() would just add a single item\n\t\n return Labels",
"def node_labels(self, n_id=None):\n if n_id is None:\n return frozenset(self._nodes_by_label.keys())\n else:\n try:\n node_entry = self._nodes[n_id]\n except KeyError:\n return None\n else:\n return node_entry.labels",
"def get_labels(ids, sequences, file_prefix=None):\n labels = dict()\n\n if file_prefix is None:\n metal_residues = FileManager.read_binding_residues(FileSetter.binding_residues_by_ligand('metal'))\n nuclear_residues = FileManager.read_binding_residues(FileSetter.binding_residues_by_ligand('nuclear'))\n small_residues = FileManager.read_binding_residues(FileSetter.binding_residues_by_ligand('small'))\n else:\n metal_residues = FileManager.read_binding_residues('{}_metal.txt'.format(file_prefix))\n nuclear_residues = FileManager.read_binding_residues('{}_nuclear.txt'.format(file_prefix))\n small_residues = FileManager.read_binding_residues('{}_small.txt'.format(file_prefix))\n\n for prot_id in ids:\n prot_length = len(sequences[prot_id])\n binding_tensor = np.zeros([prot_length, 3], dtype=np.float32)\n\n metal_res = nuc_res = small_res = []\n\n if prot_id in metal_residues.keys():\n metal_res = metal_residues[prot_id]\n if prot_id in nuclear_residues.keys():\n nuc_res = nuclear_residues[prot_id]\n if prot_id in small_residues.keys():\n small_res = small_residues[prot_id]\n\n metal_residues_0_ind = ProteinInformation._get_zero_based_residues(metal_res)\n nuc_residues_0_ind = ProteinInformation._get_zero_based_residues(nuc_res)\n small_residues_0_ind = ProteinInformation._get_zero_based_residues(small_res)\n\n binding_tensor[metal_residues_0_ind, 0] = 1\n binding_tensor[nuc_residues_0_ind, 1] = 1\n binding_tensor[small_residues_0_ind, 2] = 1\n\n labels[prot_id] = binding_tensor\n\n return labels",
"def labeler(self, labels, tokens):\n encoded = []\n for idx, document in enumerate(tqdm(tokens)):\n tmp = [0 for char in range(len(document))]\n for name in labels[idx]:\n if re.match(r\"[^a-zA-Z]\", name):\n pattern = list(name)\n else:\n pattern = name\n # for indexes in re.finditer(name, document):\n # tmp[indexes.span()[0]:indexes.span()[1]] = [1 for _ in range(indexes.span()[1] - indexes.span()[0])]\n for i in range(len(document)):\n if document[i] == pattern[0] and document[i:i+len(pattern)] == pattern:\n tmp[i:i+len(pattern)] = [1 for _ in range(len(pattern))]\n encoded.append(tmp)\n\n # # Sanity check\n # for doc, enc in zip(tokens, encoded):\n # print(f\"{len(doc)}, {len(enc)}\")\n\n return encoded"
] | [
"0.60455763",
"0.58538216",
"0.5673425",
"0.56661934",
"0.55925333",
"0.5571956",
"0.553905",
"0.55372113",
"0.5479797",
"0.5430343",
"0.5406244",
"0.53669673",
"0.53579235",
"0.5339231",
"0.5337022",
"0.53341526",
"0.5321965",
"0.5311045",
"0.5294377",
"0.5279869",
"0.5278892",
"0.52738416",
"0.5255996",
"0.5252411",
"0.5237829",
"0.52185065",
"0.5212548",
"0.5208907",
"0.5204223",
"0.51977867"
] | 0.654024 | 0 |
Downloads a single dataset from an XNAT server | def download_dataset(download_path, server, user, password, session_id,
dataset_name, data_format=None):
with xnat.connect(server, user=user, password=password) as xnat_login:
try:
session = xnat_login.experiments[session_id]
except KeyError:
raise NiAnalysisError(
"Didn't find session matching '{}' on {}".format(session_id,
server))
try:
dataset = session.scans[dataset_name]
except KeyError:
raise NiAnalysisError(
"Didn't find dataset matching '{}' in {}".format(dataset_name,
session_id))
if data_format is None:
data_format = guess_data_format(dataset)
download_resource(download_path, dataset, data_format, session.label) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_dataset(self):\n raise NotImplementedError",
"def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))",
"def download(args):\n with_dataset(args, Dataset._download)",
"def _download_mnist_realval(dataset):\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print 'Downloading data from %s' % origin\n urllib.urlretrieve(origin, dataset)",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def download():\n toydata = requests.get(DATA_URL).json()\n return toydata",
"def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts",
"def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")",
"def downloadDataset(datasetName, url):\n\n baseFolder = os.path.dirname(os.path.abspath(__file__))\n destinationFolder = os.path.join(baseFolder, \"DataSets\", datasetName)\n testFolder = os.path.join(destinationFolder, \"test\")\n trainFolder = os.path.join(destinationFolder, \"train\")\n\n if not os.path.exists(os.path.join(destinationFolder, \"test\")):\n filename = os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS.zip\")\n if not os.path.exists(filename):\n print(\"Downloading data from \" + url + \"...\")\n urlretrieve(url, filename)\n\n try:\n print(\"Extracting \" + filename + \"...\")\n with zipfile.ZipFile(filename) as myzip:\n myzip.extractall(destinationFolder)\n print(\"Distributing the Dataset...\")\n distributeDataset(destinationFolder, testFolder, trainFolder)\n print(\"Renaming the files...\")\n renameFiles(testFolder)\n renameFiles(trainFolder)\n finally:\n os.remove(filename)\n print(\"Done.\")\n else:\n print(\"Data already available at \" + baseFolder + \"/\" + datasetName)",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def download(data_root, version):\n if version not in GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys():\n raise ValueError(\n f\"A valid dataset version is required. Available versions are:\"\n f\"{GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys()}\"\n )\n dest_path = os.path.join(\n data_root, GroceriesReal.LOCAL_PATH, f\"{version}.zip\"\n )\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n extract_folder = os.path.join(data_root, GroceriesReal.LOCAL_PATH)\n if os.path.exists(dest_path):\n logger.info(\"The dataset file exists. Skip download.\")\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError:\n logger.info(\n \"The checksum of the previous dataset mismatches. \"\n \"Delete the previously downloaded dataset.\"\n )\n os.remove(dest_path)\n if not os.path.exists(dest_path):\n source_uri = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].source_uri\n GroceriesReal._download_http(source_uri, dest_path, version)\n GroceriesReal._extract_file(dest_path, extract_folder)",
"def test_download_dataset_full(tmp_path):\n\n pennylane.data.data_manager._download_dataset(\n \"dataset/path\",\n tmp_path / \"dataset\",\n attributes=None,\n )\n\n with open(tmp_path / \"dataset\", \"rb\") as f:\n assert f.read() == b\"This is binary data\"",
"def download_compressed_dataset(url):\n raise NotImplementedError",
"def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def download_mnist (data='training'):\n assert data in ['training', 'testing']\n \n if data == 'training':\n images_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'\n else:\n images_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n \n (images_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')\n (labels_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')\n return (images_fn_gz, labels_fn_gz)",
"def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise",
"def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)",
"def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)",
"def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")",
"def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")",
"def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)",
"def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)",
"def _download_data_from_nfs_connection(self) -> 'DataFrame':\n\n # note: as we need to load a data into the memory,\n # we are using pure requests and helpers from the WML client\n data_path = self.location.path\n connection_id = self.connection.asset_id\n\n return self._download_data_from_nfs_connection_using_id_and_path(connection_id, data_path)",
"def download():\n raise NotImplementedError",
"def __getFile_urllib(self, _src, _dst):\n\n #-------------------- \n # Open the local destination file \n # so that it can start reading in the buffers.\n #-------------------- \n try:\n dstDir = os.path.dirname(_dst) \n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n dstFile = open(_dst, \"wb\")\n except Exception as e:\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n\n #-------------------- \n # Construct the request and authentication handler\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _src)\n request = urllib.request.Request(xnatUrl)\n request.add_header(\"Authorization\", \n self.authHeader['Authorization'])\n\n\n\n #-------------------- \n # Get the response from the XNAT host.\n #-------------------- \n try:\n response = urllib.request.urlopen(request)\n\n\n\n\n #-------------------- \n # If the urllib.request version fails then use http.client.\n # See get_http.client for more details.\n #-------------------- \n #except urllib.request.HTTPError, e:\n except Exception as e:\n #print(str(e))\n #print(f\"{_src} {_dst}\")\n #print(d)\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n #-------------------- \n # Get the content size, first by checking log, then by reading \n # header\n #-------------------- \n self.downloadTracker['downloadedSize']['bytes'] = 0 \n self.downloadTracker['totalDownloadSize'] = \\\n self.getFileSize(xnatUrl)\n if not self.downloadTracker['totalDownloadSize']['bytes']:\n # If not in log, read the header\n if response.headers and \"Content-Length\" in response.headers:\n self.downloadTracker['totalDownloadSize']['bytes'] = \\\n int(response.headers[\"Content-Length\"]) \n self.downloadTracker['totalDownloadSize']['MB'] = \\\n Xnat.utils.bytesToMB(\\\n self.downloadTracker['totalDownloadSize']['bytes'])\n\n\n #-------------------- \n # Start the buffer reading cycle by\n # calling on the buffer_read function above.\n #-------------------- \n bytesRead = self.__bufferRead(xnatUrl, dstFile, response)\n dstFile.close()",
"def ferry_data_download(URL):\n explanation = 'File exists'\n file_downloaded = True\n # Request if the thredds server is working, add .html to URL\n req = requests.get(URL + '.html')\n if req.status_code == 200:\n \"\"\"File exists and is good for download, so write file\"\"\"\n print('File is ok')\n explanation = 'Good URL, File downloaded'\n file_downloaded = True\n ferry = xr.open_dataset(URL)\n else:\n print('File not found or unavailable')\n explanation = ' File not found or unavailable'\n file_downloaded = False\n ferry = np.nan\n return (ferry, file_downloaded, explanation)",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download():\n\treturn response.download(request, db)"
] | [
"0.6454303",
"0.645106",
"0.64145815",
"0.622972",
"0.6204956",
"0.6166584",
"0.60750073",
"0.5986293",
"0.5952546",
"0.5930593",
"0.59178406",
"0.58890903",
"0.58786213",
"0.58727086",
"0.5863751",
"0.5862628",
"0.58624923",
"0.5855549",
"0.5848393",
"0.58119965",
"0.5810328",
"0.5781682",
"0.5770772",
"0.5755618",
"0.5742303",
"0.5741513",
"0.5733845",
"0.5729881",
"0.5726934",
"0.57191795"
] | 0.66168547 | 0 |
Return the total amount of calories for each item in the _calories dictionary. | def adding_total_calories(total_calories: int) -> int:
for item in _calories:
total_calories = total_calories + _calories[item]
return total_calories | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories",
"def get_total_expenses(self):\n return sum(self.expenses.values())",
"def calories(self) -> List[RecipeObjectNutrientsCalories]:\n return self._calories",
"def total_calories(self, weight=75):\n return weight * 0.862911 * self.total_distance",
"def total(self):\n return sum(self.d.values())",
"def total(self):\n total = sum(self.d.values())\n return total",
"def calories(self, calories: List[RecipeObjectNutrientsCalories]):\n\n self._calories = calories",
"def total_wc(d):\n return sum(d.values())",
"def total(evictiondata):\r\n total = 0\r\n for index, row in evictiondata.iterrows():\r\n total += row['filings_2020']",
"def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)",
"def totalCount(self):\n return sum(self.values())",
"def totalCount(self):\n return sum(self.values())",
"def totalCount(self):\n return sum(self.values())",
"def amount_gathering(user_recipe):\r\n #Forms Dictionary\r\n sales_stats = dictionary_formation()\r\n amount_list = []\r\n month_list = [\"Nov\", \"Dec\", \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\",\r\n \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\"]\r\n for month in month_list:\r\n bottles_amount = 0\r\n dicts_read = 2\r\n for dicts_read in sales_stats:\r\n analyse_dict = sales_stats[str(dicts_read)]\r\n if month in analyse_dict[\"date_required\"]:\r\n if analyse_dict[\"recipe\"] == user_recipe:\r\n bottles_amount += analyse_dict[\"quantity_ordered\"]\r\n amount_list.append(bottles_amount)\r\n return amount_list",
"def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total",
"def total(self):\r\n total = 0\r\n in_currency = None\r\n for key in self.attr_dict.keys():\r\n cur = Currency(self.attr_dict[key])\r\n if cur.amount > 0:\r\n # Add currency to the total\r\n total += cur.amount * cur.value\r\n if in_currency is None:\r\n # Use the first currency we find\r\n in_currency = cur\r\n elif cur.value < in_currency.value:\r\n # We want the lowest currency value\r\n in_currency = cur\r\n\r\n # Now we want to return what we have collected\r\n if in_currency is None:\r\n return {'total': 0, 'name': ''}\r\n else:\r\n return {'total': total, 'name': in_currency.name}",
"def sum(self):\n return sum(self.items())",
"def carbs(foods, foods_used):\n carbs = 0.0\n for i, count in foods_used.items():\n carbs += (foods[i]['carbs'] * count)\n return carbs",
"def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances",
"def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total",
"def total_clearance(self):\n total_clearances = 0\n debit = 0 #variable to track the remaining debit\n clearances = self.clearance_set.all() #grab all the previous clerances\n for clearance in clearances:\n total_clearances += clearance.paid_value\n return total_clearances",
"def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price",
"def total_sales(self):\n total_sales = 0\n items = self.item_set.filter(status=\"sold\")\n for item in items:\n total_sales += item.price\n return total_sales",
"def dp_calories_only(foods, cal_goal):\n macros = init_one_d_array(cal_goal, 999999999)\n foods_used = init_one_d_array(cal_goal, {})\n for i in range(cal_goal):\n for j in range(len(foods)):\n food = foods[j]\n if int(food['calories']) <= i:\n if macros[i - int(food['calories'])] == 999999999:\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = macros[i - int(food['calories'])]\n prev_foods_used = foods_used[i -\n int(food['calories'])].copy()\n if macros[i] > prev_cost + food['serving_cost']:\n macros[i] = prev_cost + food['serving_cost']\n try:\n prev_foods_used[j] += 1\n except KeyError:\n prev_foods_used[j] = 1\n foods_used[i] = prev_foods_used\n return foods_used[cal_goal - 1]",
"def get_total_appliance(self):\n total = 0\n for appliance in self.get_appliances():\n total += appliance.get_total()\n return total",
"def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())",
"def get_salario_total(self):\n s = 0\n for e in self.empleados:\n s += e.get_salario()\n return s",
"def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)",
"def collect_adjustment_totals(team_dict):\n franchise = team_dict['team'].franchise.id\n year = team_dict['team'].year\n adjustments = Payroll.objects.filter(\n Q(paying=franchise) | Q(receiving=franchise)\n )\n adjustments = adjustments.filter(year__gte=year).order_by('note', 'year')\n adjustments_list = [0, 0, 0, 0, 0]\n for a in adjustments:\n if a.receiving is not None:\n if a.receiving.id == franchise:\n adjustments_list[a.year - year] -= a.money\n if a.paying is not None:\n if a.paying.id == franchise:\n adjustments_list[a.year - year] += a.money\n return adjustments_list",
"def fat(foods, foods_used):\n fat = 0.0\n for i, count in foods_used.items():\n fat += (foods[i]['fat'] * count)\n return fat"
] | [
"0.7841667",
"0.6843834",
"0.6591413",
"0.6374997",
"0.6353281",
"0.6308069",
"0.627658",
"0.6155628",
"0.615305",
"0.61347705",
"0.61346525",
"0.61346525",
"0.61346525",
"0.6096279",
"0.6092271",
"0.59906465",
"0.5984482",
"0.5926618",
"0.5919394",
"0.5877268",
"0.5825999",
"0.5810855",
"0.57998663",
"0.57892364",
"0.5762975",
"0.5757599",
"0.5749801",
"0.57428557",
"0.5737804",
"0.5722138"
] | 0.798169 | 0 |
Inserting a new value into the calories dictionary. | def insert_calorie_value(new_item: str) -> None:
new_item_calories = int(input("Enter calories for " + new_item + ": "))
_calories[new_item] = new_item_calories | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)",
"def __setitem__(self, key, value):\n self.insert(key, value)",
"def insert(self, key, val):\n self.dict.setdefault(key, []).append(val)",
"def add(self, key, value, location):\r\n i = self.index_for_location(location)\r\n if i is not None:\r\n self.insert(i, key, value)\r\n else:\r\n self.__setitem__(key, value)",
"def _insert(self, key, value):\n entry = self._lookup(key)\n if entry.value is None:\n self.used += 1\n if entry.key is not dummy:\n self.filled += 1\n entry.key = key\n entry.hash = self.first_hash(key)\n entry.value = value",
"def add(self, key, value):",
"def add_dict_entry(dictionary: dict, key: Any, value: Any) -> None:\n try:\n dictionary[key].append(value)\n except KeyError:\n dictionary[key] = [value]",
"def __setitem__(self,k,v):\n self.insert(k,v)",
"def add(self, key, value):\n self._data.add_last(self._Item(key, value))",
"def add(self, key, value):\n self._store[key] = value",
"def add_value(self, thing_key, dkey, value):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n dic = {}\n dic[dkey] = value\n self.things_dict[thing_key] = dic\n self.changed.append(thing_key)\n return True\n return False",
"def add(self, item):\n self._dict[item] = item",
"def insert(self, key, value):\n\t\tself.__insert(key, value, key[1:])",
"def add_value(self, indep, key, value):\n self.add_dict(indep, {key: value})",
"def add(self, val):\n self.lookup[val] = self.lookup.get(val, 0) + 1",
"def LocalAddOrUpdateEntry(self, key, value):\r\n self.localData[key] = value",
"def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value",
"def insert(self, e):\n try:\n self.vals[e] += 1\n except:\n self.vals[e] = 1",
"def insert(self, pos, value):\n\t\titems = self.__dict__.values()\n\t\tif not isinstance(pos, int) or pos < 0:\n\t\t\traise ValueError(\"'pos' value is not positive integer.\")\n\t\telif pos > len(items):\n\t\t\traise ValueError(\"'pos' value is not a position in self.__dict__\")\n\t\titems.insert(pos, value)\n\t\tnew_dict = {}\n\t\tfor x, y in enumerate(items):\n\t\t\tnew_dict.update({x: y})\n\t\tself.__dict__ = new_dict",
"def add_value(dict_, key, value):\n values = dict_.get(key)\n if not values:\n dict_[key] = [value]\n else:\n values.append(value)",
"def __setitem__(self, key, value):\n\t\tself.__dStore[key] = value",
"def add(enforcer_dict, key, value):\n enforcer_dict['d'] = 4\n assert other.keystring == 'abcd'\n assert other.valuesum == 10\n\n enforcer_dict.update(dict(e=5, f=6))\n assert other.keystring == 'abcdef'\n assert other.valuesum == 21",
"def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)",
"def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)",
"def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)",
"def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)",
"def __setitem__(self, key, value):\n self._maps[0][key] = value",
"def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk",
"def add(self, key, value):\n self.data.append((key, value))",
"def insert(self, key, value):\n tags = self.__all_tags()\n if value not in tags:\n tags.insert(key, value)\n self.__post_changes(tags)"
] | [
"0.6804416",
"0.65591997",
"0.644645",
"0.64100003",
"0.6371622",
"0.63484925",
"0.62549883",
"0.6218331",
"0.62002844",
"0.618095",
"0.6173622",
"0.6158189",
"0.6128213",
"0.6082548",
"0.60798395",
"0.60317045",
"0.601796",
"0.60129565",
"0.60097075",
"0.5991703",
"0.59874636",
"0.5984133",
"0.5977535",
"0.5975113",
"0.5975113",
"0.59672433",
"0.59530574",
"0.5907364",
"0.588334",
"0.5874757"
] | 0.7667434 | 0 |
Print the food items, total calories and average calories. | def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:
avg_calories = total_calories / len(_calories)
print("\nFood Items:", sorted(food_item_names))
print("Total Calories:", total_calories,
"Average Calories: %0.1f\n" % avg_calories) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_food(self):\n for dish in self.food:\n print(dish.get_name())",
"def calories() -> None:\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")\n while new_item != \"q\":\n insert_calorie_value(new_item)\n total_calories = 0\n total_calories = adding_total_calories(total_calories)\n food_item_names = []\n appending_food_item_names(food_item_names)\n printing_food_and_calories(food_item_names, total_calories)\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")",
"def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories",
"def view(self):\n\n print('Here\\'s your expense and income records:\\n'+' '*3+'Category'+' '*7+\\\n 'Description'+' '*4+'Amount\\n'+'='*40)\n line = 1\n amount = self._initial_money\n for n in self._records:\n m = n.split() # m is a list in the form ['category', 'item', 'money']\n print(f'{line:<3}{m[0]:<15}{m[1]:<15}{m[2]}')\n amount += int(m[2])\n line += 1\n print('='*40 + f'\\nNow you have {amount} dollars.')",
"def food_eaten(self):\r\n # get values from GUI\r\n\r\n foodList = \"\"\r\n foodCost=0\r\n if self.is_eggs.get():\r\n foodList += \"eggs $2.00\\n\"\r\n foodCost+=2\r\n if self.is_bacon.get():\r\n foodList += \"bacon $4.00\\n\"\r\n foodCost += 4\r\n if self.is_sausage.get():\r\n foodList += \"sausage $4.00\\n\"\r\n foodCost += 4\r\n if self.is_oj.get():\r\n foodList += \"OrangeJuice $3.00\\n\"\r\n foodCost += 3\r\n foodCost = ('%.2f' % foodCost)\r\n\r\n # Create the output to screen of foodlist\r\n story = (\"\\nThank you for joining us here at Order Up!\\n\\nThe foods that you ate are as follows:\\n\\n\\n\"+foodList+\"\\nThe total amount owed is: $\"+foodCost)\r\n # display the summary\r\n self.story_txt.delete(0.0, END)\r\n self.story_txt.insert(0.0, story)",
"def report():\n print(\"Donor Name | Total Given | Num Gifts | Average Gift\")\n print(\"------------------------------------------------------------------\")\n for key, val in data.items():\n print(f\"{key:25} $ {float(sum(val)):>12.2f} {len(val):>8} $ {float(sum(val))/len(val):>11.2f}\")",
"def inventory_report(products):\r\n names = set()\r\n total_price = 0\r\n total_weight = 0\r\n total_flammability = 0\r\n for product in products:\r\n names.add(product.name)\r\n total_price += product.price\r\n total_weight += product.weight\r\n total_flammability += product.flammability\r\n\r\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\r\n print(\"Unique product names: {}\".format(len(names)))\r\n print(\"Average price: {}\".format(total_price / len(products)))\r\n print(\"Average weight: {}\".format(total_weight / len(products)))\r\n print(\"Average flammability:{}\".format(\r\n total_flammability / len(products)))\r\n\r\n print(\"Following is useful starting code for acme_report.py:\")",
"def inventory_report(products, prices, weights, flammabilities):\n num_product = len(products)\n avg_price = mean(prices)\n avg_weight = mean(weights)\n avg_flam = mean(flammabilities)\n\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names: {}\".format(num_product))\n print(\"Average price: {}\".format(avg_price))\n print(\"Average weight: {}\".format(avg_weight))\n print(\"Average flammability: {}\".format(avg_flam))",
"def basic ( ) :\n \n die_file_paths = _sort_all_apropriate_files(options.input)\n \n for die_file_path in die_file_paths :\n \n print\n #print (\"loading die information from file: \" + die_file_path)\n die_description, die_roll_dict = _read_die_file (die_file_path)\n \n print (\"data for die with description: \" + die_description.strip())\n \n print \n \n print (\"raw roll data:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n print (\"rolled \\t\" + str(roll_value) + \"\\t on the die \\t\"\n + str(die_roll_dict[roll_value]) + \"\\t time(s)\")\n \n print \n \n print (\"simple roll histogram:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n bar_text = \"*\" * die_roll_dict[roll_value]\n print (str(roll_value) + \"\\t\" + bar_text)\n \n print \n \n side_val = numpy.array(die_roll_dict.keys( ), dtype=numpy.float)\n rolls = numpy.array(die_roll_dict.values( ), dtype=numpy.float)\n num_rolls = float(numpy.sum(rolls))\n avg_result = numpy.sum(rolls * side_val) / num_rolls\n \n print (\"average roll: \" + str(avg_result))\n \n print (\"------------\")",
"def print_items(self,items):\n self.n_char(SN_RULER,PAPER_MAX_WIDTH,w=1,h=1)\n self.esprint(LINE_BREAK)\n self.esprint(ITEM_TITLE)\n self.n_char(SPACE_CHAR,ITEM_TITLE_SPACE_AFTER)\n self.esprint(QTY_TITLE)\n self.n_char(SPACE_CHAR,QTY_TITLE_SPACE_AFTER)\n self.esprint(SP_TITLE)\n self.n_char(SPACE_CHAR,SP_TITLE_SPACE_AFTER)\n self.esprint(TOTAL_TITLE)\n self.n_char(SPACE_CHAR,TOTAL_TITLE_SPACE_AFTER)\n self.esprint(LINE_BREAK)\n self.n_char(DO_RULER,PAPER_MAX_WIDTH,w=1,h=2)\n self.esprint(LINE_BREAK)\n sum_total = 0\n for item in items:\n product = item[ITEM] if len(item[ITEM]) < ITEM_MAX_WIDTH else item[ITEM][:ITEM_MAX_WIDTH-ELIPSES_WIDTH]+ELIPSES\n qty = item[QTY] if isinstance(item[QTY],int) else round(str_2_num(item[QTY]),2)\n price = round(str_2_num(item[SP]),2)\n total = round(qty*price,2)\n sum_total += total\n self.print_space(product,ITEM_MAX_WIDTH)\n self.print_space(str(qty),QTY_MAX_WIDTH)\n self.print_space(str(price),SP_MAX_WIDTH)\n self.print_space(str(total),TOTAL_MAX_WIDTH)\n self.esprint(LINE_BREAK)\n self.n_char(SN_RULER,PAPER_MAX_WIDTH,w=1,h=1)\n self.esprint(LINE_BREAK)\n self.esprint(TOTAL_TXT)\n leave = TOTAL_TXT_WIDTH\n self.n_char(SPACE_CHAR,PAPER_MAX_WIDTH-TOTAL_MAX_WIDTH-leave,w=1,h=1)\n self.esprint(str(sum_total))\n self.esprint(LINE_BREAK)\n self.n_char(DO_RULER,PAPER_MAX_WIDTH,w=1,h=2)\n self.esprint(LINE_BREAK)",
"def displayInventory(bag):\n print(\"Inventory:\")\n item_total = 0\n for k, v in bag.items():\n print(str(v) + ' ' + str(k))\n item_total += v\n print(\"Total number of items: \" + str(item_total))\n print('\\n')",
"def feed(self):\n if not self.food_bag or sum(self.food_bag.values()) == 0:\n # if the food_bag is empty:\n print('You ran out of food! Go back to the shop to buy new animal food')\n else:\n print('You currently have:\\n')\n for k, v in self.food_bag.items():\n print(\"{:<8} {:<10}\".format(v,k))\n print(\"\\n\")\n response = input(f'What would you like to feed the {self.animals[self.park_location]}?')\n while response not in self.food_bag.keys():\n response = input(f\"Please enter one of:B {' '.join(list(self.food_bag.keys()))}:\")\n print(f\"The {self.animals[self.park_location]} are eating your {response}.\")",
"def print_report():\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_data):\n name = donor[\"name\"]\n total = sum(donor[\"donations\"])\n num_gift = len(donor[\"donations\"])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)",
"def inventory_report(products):\n name_list = set()\n price_list = []\n wt_list = []\n flamablity_list = []\n\n for p in products:\n name_list.add(p.name)\n price_list.append(p.price)\n wt_list.append(p.weight)\n flamablity_list.append(p.flammability)\n# Calculating average for report\n unique_names = len(name_list)\n avg_price = sum(price_list)/len(price_list)\n avg_weight = sum(wt_list)/len(wt_list)\n avg_flammability = sum(flamablity_list)/len(flamablity_list)\n# Printing\n print(\"$ python acme_report.py \")\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names:\", unique_names)\n print(\"Average price:\", round(avg_price, 2))\n print(\"Average weight:\", avg_weight)\n print(\"Average flammability:\", avg_flammability)",
"def print_average_open():\n days = 0\n incidents = 0\n\n with open('service_now_ticket_sample.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader: \n days = days + int(row['calendar_duration (days)'])\n incidents = 1 + incidents\n \n u_item_dict[row['u_category_gear']][0] = int(u_item_dict[row['calendar_duration (days)']])\n u_item_dict[row['u_category_gear']][0] += 1\n \n print(u_item_dict)\n for item in u_item_dict:\n print (item, item[0], item[1])",
"def receipt(basket):\n\n cprint(\"\"\"\\n\\n Item Price Discount Final Price\n------------------------------------------------------------------\"\"\")\n sigma_all = sum([e[1] for e in basket])\n sigma_discount = 0\n for name, price, discount in basket:\n discounted_price = (100 - discount) / 100 * price\n cprint(\"| %16s | £%10.2f | %3d\" % (name, price, discount) + \"%\" + f\" | £%10.2f |\" % discounted_price)\n sigma_discount += discounted_price\n cprint(\"|________________________________________________________________|\")\n\n cprint(\"\\n\\nTotal Price: £%.2f\" % sigma_all)\n cprint(\"Total Discount: £%.2f\" % (sigma_all - sigma_discount))\n cprint(\"Final Price: £%.2f\" % sigma_discount)\n\n cprint(\"\\nThank you for shopping at \" + SHOP_NAME)",
"def print_inventory(self):\n\t\tfor item, amount in self.inventoryDictionary.items():\n\t\t\tprint (\"Item: \" + item.name + \" Quantity: \" + str(amount))\n\t\t\tprint (item.description + \"\\n\")\n\n\t\tprint(\"Currently equipped: \")\n\t\tprint(\"Main Hand: \" + self.equippedMainHand.name)\n\t\tprint(\"Armor: \" + self.equippedArmor.name)",
"def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')",
"def func(self):\n from commands.base_commands.guest import census_of_fealty\n\n fealties = census_of_fealty()\n table = PrettyTable([\"{wFealty{n\", \"{w#{n\"])\n for fealty in fealties:\n table.add_row([fealty, fealties[fealty]])\n self.msg(table)",
"def inventory_report(products):\n unique_names = []\n total_price = 0\n total_weight = 0\n total_flammability = 0\n num_products = len(products)\n for i in range(num_products):\n if products[i].name not in unique_names:\n unique_names.append(products[i].name) \n total_price += products[i].price\n total_weight += products[i].weight\n total_flammability += products[i].flammability\n mean_price = total_price / num_products\n mean_weight = total_weight / num_products\n mean_flammability = total_flammability / num_products\n print('ACME CORPORATION OFFICIAL INVENTORY REPORT')\n print(f'Unique product names: {len(unique_names)}')\n print(f'Average price: {mean_price}')\n print(f'Average weight {mean_weight}')\n print(f'Average flammabilitiy {mean_flammability}')\n return unique_names, mean_price, mean_weight, mean_flammability",
"def print_summary_fuel_reactor(fs):\n print(\"\\nResults:\")\n print(\"==========================================\")\n print(\"---Moving Bed Fuel Reactor---\") \n \n print(\"\\nInlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[0,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[0,'H2O']), \"mol/s\",\n \"\\nCH4: \", value(fs.MB_fuel.F[0,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[0,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[0,'H2O']), \"kg/s\",\n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[0,'CH4']), \"kg/s\")\n print(\"\\nOutlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[1,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[1,'H2O']), \"mol/s\", \n \"\\nCH4: \", value(fs.MB_fuel.F[1,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[1,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[1,'H2O']), \"kg/s\", \n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[1,'CH4']), \"kg/s\")\n print(\"\\nInlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[1,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[1,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[1,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[1,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[1,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[1,'Al2O3']), \"kg/s\")\n print(\"\\nOutlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[0,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[0,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[0,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[0,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[0,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[0,'Al2O3']), \"kg/s\") \n \n print(\"\\nGas inlet velocity: \", value(fs.MB_fuel.vg[0]), \"m/s\")\n print(\"Gas outlet velocity: \", value(fs.MB_fuel.vg[1]), \"m/s\")\n print(\"Solids velocity: \", value(fs.MB_fuel.vs), \"m/s\") \n \n print(\"\\nHeat of reaction @ z=0: \", \n value(fs.MB_fuel.DH_rxn_s[0]), \"J/(mol reaction)\")\n print(\"Heat of reaction @ z=1: \", \n value(fs.MB_fuel.DH_rxn_s[1]), \"J/(mol reaction)\")\n \n print(\"\\nCH4 conversion: \", value(fs.MB_fuel.X_gas)*100, \" %\")\n print(\"Fe2O3 conversion: \", value(fs.MB_fuel.X_OC)*100, \" %\")\n \n print('\\nPressure @inlet: ', value(fs.MB_fuel.P[0]))\n print('Pressure @outlet: ', value(fs.MB_fuel.Gas_Out_P))\n \n print(\"\\nReactor bed height:\", value(fs.MB_fuel.L), \" m\")\n print(\"Reactor bed diameter:\", value(fs.MB_fuel.Dr), \" m\")\n# print(\"Refractory wall thickness\", value(fs.MB.refractory_th), \" m\")\n \n print(\"\\nInlet gas flow:\", value(fs.MB_fuel.Gas_In_F), \" mol/s\")\n print(\"Outlet gas flow:\", value(fs.MB_fuel.Ftotal[1]), \" mol/s\")\n print(\"Inlet solids flow:\", value(fs.MB_fuel.Solid_In_M), \" kg/s\")\n print(\"Outlet solids flow:\", value(fs.MB_fuel.Solid_Out_M), \" kg/s\")\n print(\"Inlet solids temperature:\", value(fs.MB_fuel.Solid_In_Ts), \" K\")\n print(\"Outlet solids temperature:\", value(fs.MB_fuel.Solid_Out_Ts), \" K\")\n \n print(\"Inlet gas temperature:\", value(fs.MB_fuel.Tg[0]), \" K\")\n print(\"Outlet gas temperature:\", value(fs.MB_fuel.Tg[1]), \" K\") \n \n print(\"\\nInlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[1,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[1,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[1,'Al2O3']))\n print(\"Outlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[0,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[0,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[0,'Al2O3']))",
"def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total",
"def print_items(self):\n for items in inventory:\n print(f\"- {items.upper()}\")",
"def inventory_report(self):\n mean_price = sum(Product.price for Product in sample) / len(sample)\n mean_weight = sum(Product.weight for Product in sample) / len(sample)\n mean_flam = sum(Product.flammability for Product in sample) / len(sample)\n return 'Unique Product Names: ', sample.unique, '/n Average Price: ', mean_price, \n '/n Average Weight: ', mean_weight, '/n Average Flammability: ', mean_flam",
"def printCurrent():\n print (\"Total:\", calc_get_total())",
"def print_results(weeks, days, bus_days, total):\n print(\"{} remaining.\\n\".format(total))\n print(\"{} weeks and {} days.\".format(weeks, days - (weeks * 7)))\n print(\"{} business days.\".format(bus_days))",
"def fat(foods, foods_used):\n fat = 0.0\n for i, count in foods_used.items():\n fat += (foods[i]['fat'] * count)\n return fat",
"def printValues():\r\n grand_prod_cost = []\r\n grand_album_sales = []\r\n for x in d:\r\n print(\"----------------------------------------------\")\r\n print(\"Statistics for Band '\" + x + \"'\")\r\n thisDict = d[x]\r\n print(\"1)What is the total production cost of the album? :\", round(sum(thisDict[ProdCost]), 2))\r\n print(\"2)What is the total sales of the album? :\", round(sum(thisDict[AlbumSales]), 2))\r\n print(\"3)What is the average production cost of the album?:\", round(mean(thisDict[ProdCost]), 2))\r\n print(\"4)What is the average of the album sale? :\", round(mean(thisDict[AlbumSales]), 2))\r\n print(\"5)Net Profit/Loss :\", round(sum(thisDict[AlbumSales]) - sum(thisDict[ProdCost]), 2))\r\n\r\n grand_prod_cost +=thisDict[ProdCost]\r\n grand_album_sales +=(thisDict[AlbumSales])\r\n\r\n print('**********************************************************************************')\r\n print('Statistics of all albums')\r\n print('1)What is the total production cost of all albums? :', round(sum(grand_prod_cost), 2))\r\n print('2)What is the total sales of all albums? :', round(sum(grand_album_sales), 2))\r\n print('3)What is the average production cost of all albums?:', round(mean(grand_prod_cost),2))\r\n print('4)What is the average of all album sales? :', round(mean((grand_album_sales)),2))\r\n print('5)Net Profit/Loss :', round((sum(grand_album_sales) - sum(grand_prod_cost)),2))\r\n print('**********************************************************************************')",
"def caloriesCmd(bot, trigger):\n replyFmt = '\"%s\" has %i kcal per 100 g'\n lookup = lookupFoodAndReply(bot, trigger, replyFmt, nutrient=CALORIE)",
"def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary"
] | [
"0.7097891",
"0.6548885",
"0.64881444",
"0.6290056",
"0.62568957",
"0.62152535",
"0.6121451",
"0.6117783",
"0.6099719",
"0.6090612",
"0.6011658",
"0.59537023",
"0.5920604",
"0.5900643",
"0.58737713",
"0.5869495",
"0.586078",
"0.5847227",
"0.58046967",
"0.5803036",
"0.5749345",
"0.5722241",
"0.57005703",
"0.567472",
"0.56734765",
"0.5646785",
"0.56440824",
"0.5636894",
"0.5625723",
"0.55959076"
] | 0.88680345 | 0 |
Ask user for a food item and add it to list, calculate the average the calories and the total calories. Press q to quit. | def calories() -> None:
new_item = input("Enter food item to add, or ’q’ to exit: ")
while new_item != "q":
insert_calorie_value(new_item)
total_calories = 0
total_calories = adding_total_calories(total_calories)
food_item_names = []
appending_food_item_names(food_item_names)
printing_food_and_calories(food_item_names, total_calories)
new_item = input("Enter food item to add, or ’q’ to exit: ") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)",
"def buy_animal_food(self):\n if self.location == \"Shop\":\n response = input(\"How many bananas do you want to buy?\")\n while response not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n response = input(\"Please specify the number of beers\")\n money = self.money - int(response)\n if money >= 0:\n self.food_bag['Bananas'] += int(response)\n self.money = money\n print(f'You now got {response} more bananas than before, be careful not to drop one - you might slip!')\n else:\n print(\"You idiot apparently spent all your money on beers! You can\\'t buy animal food anymore - \"\n \"better go gambling!\")\n else:\n print('Your are not at the Shop right now, hard to find animal food here.')",
"def feed(self):\n if not self.food_bag or sum(self.food_bag.values()) == 0:\n # if the food_bag is empty:\n print('You ran out of food! Go back to the shop to buy new animal food')\n else:\n print('You currently have:\\n')\n for k, v in self.food_bag.items():\n print(\"{:<8} {:<10}\".format(v,k))\n print(\"\\n\")\n response = input(f'What would you like to feed the {self.animals[self.park_location]}?')\n while response not in self.food_bag.keys():\n response = input(f\"Please enter one of:B {' '.join(list(self.food_bag.keys()))}:\")\n print(f\"The {self.animals[self.park_location]} are eating your {response}.\")",
"def food_eaten(self):\r\n # get values from GUI\r\n\r\n foodList = \"\"\r\n foodCost=0\r\n if self.is_eggs.get():\r\n foodList += \"eggs $2.00\\n\"\r\n foodCost+=2\r\n if self.is_bacon.get():\r\n foodList += \"bacon $4.00\\n\"\r\n foodCost += 4\r\n if self.is_sausage.get():\r\n foodList += \"sausage $4.00\\n\"\r\n foodCost += 4\r\n if self.is_oj.get():\r\n foodList += \"OrangeJuice $3.00\\n\"\r\n foodCost += 3\r\n foodCost = ('%.2f' % foodCost)\r\n\r\n # Create the output to screen of foodlist\r\n story = (\"\\nThank you for joining us here at Order Up!\\n\\nThe foods that you ate are as follows:\\n\\n\\n\"+foodList+\"\\nThe total amount owed is: $\"+foodCost)\r\n # display the summary\r\n self.story_txt.delete(0.0, END)\r\n self.story_txt.insert(0.0, story)",
"def average_calc():\n addCount = 0\n addSum = 0\n \n while True:\n addInput = input(\"Please provide a number to add. Quit with 'done' \")\n if addInput == 'done':\n break\n \n addCount += 1\n addSum = addSum + int(addInput)\n addAveradge = addSum / addCount\n \n print(\"Total sum: \", addSum)\n print(\"Average: \", addAveradge)",
"def shopping_cart(self):\r\n self.customer_id = int(input(\"Enter your customer id: \"))\r\n self.item_name = input(\"Enter item name: \")\r\n self.price = int(input(f\"Enter price of {self.item_name}: \"))\r\n self.qty = int(input(f\"Enter quantity of {self.item_name}: \"))\r\n self.calculate_discount()\r\n self.get_total_amount()",
"def restaurant():\n\n # Initialize variables.\n menu = {'egusi': 150, \n 'akpu': 150, \n 'onugbu': 200, \n 'okro': 150, \n 'garri': 150, \n 'nsala': 300, \n 'rice': 150, \n 'stew': 150, \n 'isiewu': 1000\n }\n total = 0.0\n\n print()\n # Request input from user. Exit program if blank line is entered.\n while True:\n order = input(\"Order: \").strip().lower()\n if not order:\n break\n \n # Check if customer order is available in the menu. Increment total\n # if order is available and display appropriate message.\n if order in menu:\n total += menu[order]\n print(f'{order} cost {menu[order]}, total is {total}')\n else:\n print(f'Sorry, we are fresh out of {order} today.')\n\n # print(f'Your total is {total}')\n\n return total",
"def main():\n\n sum_of_values = 0.0\n count = 0\n\n x = input('Enter the next number in list to average. (\"q\" to exit) ')\n while 'q' not in x.lower():\n print ('x =', x)\n sum_of_values = sum_of_values + float(x)\n count += 1\n x = input('Enter number in list to avg. (\"q\" to exit) ')\n\n print(\"The average of {0} values is {1}\".format(\n count,\n sum_of_values / count))",
"def add_to_list(my_list):\n\n list_item = raw_input(\"What would you like to add to the list? \")\n my_list.append(list_item)",
"def insert_calorie_value(new_item: str) -> None:\n new_item_calories = int(input(\"Enter calories for \" + new_item + \": \"))\n _calories[new_item] = new_item_calories",
"def action_fill(self):\n print('Write how many ml of water do you want to add:')\n self.water += int(input().strip())\n\n print('Write how many ml of milk do you want to add:')\n self.milk += int(input().strip())\n\n print('Write how many grams of coffee beans do you want to add:')\n self.beans += int(input().strip())\n\n print('Write how many disposable cups of coffee do you want to add:')\n self.cups += int(input().strip())",
"def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories",
"def addnewitem():\n\n itemcode = input(\"Enter item code: \")\n itemdescription = input(\"Enter item description: \")\n itemrentalprice = input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n itemprice = get_latest_price(itemcode)\n\n isfurniture = input(\"Is this item a piece of furniture? (Y/N): \")\n if isfurniture.lower() == \"y\":\n add_furniture(itemcode, itemdescription, itemprice, itemrentalprice)\n else:\n iselectricappliance = input(\"Is this item an electric appliance?\"\n \" (Y/N): \")\n if iselectricappliance.lower() == \"y\":\n add_appliance(itemcode, itemdescription, itemprice, itemrentalprice)\n add_non_furniture_nor_appliance(itemcode, itemdescription, itemprice,\n itemrentalprice)\n print(\"New inventory item added\")",
"def buyEssentials(self, amount=5): \n ls = self.location.getEssentialList()\n print(f\"{bcolors.WHITE}\\nGreat job so far! Now, it's time to arm yourself with some essentials you would need to survive in the {self.location.worldType}.{bcolors.ENDC}\")\n print(f\"\\n{bcolors.CYAN}Following are the essential items for {self.location.worldName}. Please choose a minimum of 3 items to proceed.{bcolors.ENDC}\")\n outputList = [str(i+1) + \". \" + ls[i] + \"\\n\" for i in range(len(ls))]\n print(f\"\\n{bcolors.CYAN}{''.join(outputList)}{bcolors.ENDC}\")\n sizeEssentialList = len(ls)\n essentialsList = []\n\n \n choiceInput = False\n while choiceInput is False:\n choices = input(f\"{bcolors.CYAN}Input your selection as numbers 1, 2, 3, 4, or 5 separated by comma: {bcolors.ENDC}\")\n choiceInput = True\n choices = choices.split(',')\n for choice in choices:\n if choice not in ('1', '2', '3', '4', '5', 'quit', 'QUIT', 'Quit'):\n print(f\"\\n{bcolors.PINK}Please enter a valid Input{bcolors.ENDC}\\n\")\n choiceInput = False\n break\n \n for choice in choices:\n if choice.capitalize() == \"Quit\":\n # User input \"Quit\" at this stage. So, just quit the game.\n return choices\n \n\n try:\n # Convert input to integer for index in essentialList item\n choices = [int(i) for i in choices]\n except ValueError:\n # If input is not a number, Quit gracefully!\n print(\"Input is not a number. Quit\")\n return essentialsList\n\n if max(choices) > sizeEssentialList:\n print(f\"Invalid input! Input is not in essentialList\")\n return essentialsList\n\n for j in choices:\n if self.spendCoin(amount):\n essentialsList.append(ls[j-1])\n else:\n print(f\"You don't have enough money to buy {j}. You only have {self.coins} coins left.\")\n break\n self.assets = essentialsList\n print(f\"\\n{bcolors.WHITE}Thank you for buying the essentials. Now you are officially ready to enter into the {self.location.worldType}.\\nHere is your current asset bag with essential items and the available coins.{bcolors.ENDC}\")\n print(f\"\\n{bcolors.YELLOW}Asset Bag Contents: {self.assets}\\nCoins: {self.coins}{bcolors.ENDC}\")\n\n return self.assets",
"def user_prompt():\n\n # JSON VARIABLES FROM MENU\n espresso_water = MENU[\"espresso\"][\"ingredients\"][\"water\"]\n espresso_coffee = MENU[\"espresso\"][\"ingredients\"][\"coffee\"]\n espresso_cost = MENU[\"latte\"][\"cost\"]\n\n latte_water = MENU[\"latte\"][\"ingredients\"][\"water\"]\n latte_coffee = MENU[\"latte\"][\"ingredients\"][\"coffee\"]\n latte_milk = MENU[\"latte\"][\"ingredients\"][\"milk\"]\n latte_cost = MENU[\"latte\"][\"cost\"]\n\n cappuccino_water = MENU[\"cappuccino\"][\"ingredients\"][\"water\"]\n cappuccino_coffee = MENU[\"cappuccino\"][\"ingredients\"][\"coffee\"]\n cappuccino_milk = MENU[\"cappuccino\"][\"ingredients\"][\"milk\"]\n cappuccino_cost = MENU[\"cappuccino\"][\"cost\"]\n\n total_water = espresso_water + latte_water + cappuccino_water\n total_coffee = espresso_coffee + latte_coffee + cappuccino_coffee\n total_milk = latte_milk + cappuccino_milk\n total_cost = espresso_cost + latte_cost + cappuccino_cost\n\n acrued_money = 0\n\n repeat = True\n while repeat:\n choice = user_question()\n if choice == \"espresso\":\n if espresso_water > total_water:\n print(\"Sorry there's not enough water\")\n elif espresso_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n else:\n total_pennies = collect_money()\n if total_pennies > espresso_cost or total_pennies == espresso_cost:\n acrued_money += espresso_cost\n change = float(total_pennies - espresso_cost)\n change_dec = \"%.2f\" % change\n total_water -= espresso_water\n total_coffee -= espresso_coffee\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Espresso\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n elif choice == 'latte':\n if latte_water > total_water:\n print(\"Sorry there's not enough water\")\n elif latte_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n elif latte_milk > total_milk:\n print(\"Sorry there's not enough milk\")\n else:\n total_pennies = collect_money()\n if total_pennies > latte_cost or total_pennies == latte_cost:\n acrued_money += latte_cost\n change = float(total_pennies - latte_cost)\n change_dec = \"%.2f\" % change\n total_water -= latte_water\n total_coffee -= latte_coffee\n total_milk -= latte_milk\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Latte\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n\n elif choice == 'cappuccino':\n if cappuccino_water > total_water:\n print(\"Sorry there's not enough water\")\n elif cappuccino_coffee > total_coffee:\n print(\"Sorry there's not enough coffee\")\n elif cappuccino_milk > total_milk:\n print(\"Sorry there's not enough milk\")\n else:\n total_pennies = collect_money()\n if total_pennies > latte_cost or total_pennies == cappuccino_cost:\n acrued_money += cappuccino_cost\n change = float(total_pennies - cappuccino_cost)\n change_dec = \"%.2f\" % change\n total_water -= cappuccino_water\n total_coffee -= cappuccino_coffee\n total_milk -= cappuccino_milk\n print(f\"Here is ${change_dec} in change\")\n print(\"Here's your order of ☕ Cappuccino\")\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n\n elif choice == 'report':\n print(f\"Water: {total_water}ml\\nMilk: {total_milk}ml\\nCoffee: {total_coffee}g\\nMoney: ${acrued_money}\")\n elif choice == 'off':\n repeat = False\n else:\n print(\"Wrong Input, Start Application again\")\n repeat = False",
"def processmoney(coffee):\n total = int(input(\"How many quarters?: \")) * 0.25\n total += int(input(\"How many cents?: \")) * 0.1\n total += int(input(\"How many nickles?: \")) * 0.05\n total += int(input(\"How many pennies?: \")) * 0.01\n return total",
"def add_to_list(item):\n show_list()\n\n if len(shopping_list):\n position = input(\"Where should I add {}?\\n\"\n \"Press ENTER to add to the end of the list\\n\"\n \"> \".format(item))\n else:\n position = 0\n\n try:\n position = abs(int(position))\n except ValueError:\n position = None\n if position is not None:\n shopping_list.insert(position - 1, item)\n else:\n shopping_list.append(item)\n\n show_list()",
"def heal(self):\n # Creates a list of consumables from the players inventory\n consumables = [item for item in self.inventory\n if isinstance(item, wp.Consumable)]\n # If there are no consumables then tells player he has not healing item\n if not consumables:\n print(\"You don't have any items to heal you!\")\n return\n elif self.hp == 100:\n print('Your Full HP!')\n return\n # Shows an item that can heal you\n for i, item in enumerate(consumables, 1):\n print(\"Choose an item to use to heal: \")\n print(\"{}. {}\".format(i, item))\n\n valid = False\n while not valid:\n print(\"type the number associated with the item to use otherw\\\nise type q to not use\")\n # Gets user input of what item they want to use to heal\n choice = input(\"\")\n # Checks to see if user typed in q\n if choice == 'q':\n # Deny the heal of that particular item/cancel the heal\n break\n # Any other option\n else:\n # Uses the item and heals the player and then removes the\n # item from the players inventory\n try:\n to_eat = consumables[int(choice) - 1]\n self.hp = min(100, self.hp + to_eat.healing_value)\n self.inventory.remove(to_eat)\n print(\"Current HP: {}\".format(self.hp))\n valid = True\n except (ValueError, IndexError):\n print(\"Invalid choice, try again.\")",
"def add_food(add, value):\n global HUNGERVAL\n\n if add == True:\n HUNGERVAL = HUNGERVAL + value\n if HUNGERVAL > 150:\n HUNGERVAL = 150\n else:\n HUNGERVAL = HUNGERVAL - value",
"def dish_get_info() -> Dish:\r\n return Dish(input(\"Please enter the dish's name: \"),\r\n float(input(\"Please enter the price of the dish: \")),\r\n int(input(\"Please enter the number of calories of the dish: \")))",
"def data_update(fname_list,lname_list,favorite_number,freq) :\n\ttemp = []\n\tnum = 0\n\tcounter = 1\n\n\t#Taking the employee name and checking if it is not blank.\n\twhile 1 :\n\n\t\tfname = raw_input(\"Enter your first name:\")\n\t\tlname = raw_input(\"Enter your last name:\")\n\t\t\n\t\tif fname == \"\" or lname == \"\" : \n\t\t\tprint \"Enter a valid Name\"\n\t\t\tcontinue\n\n\t\telse :\t\n\t\t\tfname_list.append(fname)\n\t\t\tlname_list.append(lname)\n\t\t\tbreak\t\n\n\t#Taking in the input for the favorite numbers.\n\twhile counter < 7 :\n\t\ttry :\n\n\t\t\t#Slot #1\n\t\t\tif counter==1 :\n\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 1st # (1 thru 69):\"))\n\n\t\t\t#Slot #2\n\t\t\telif counter==2 : \n\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 2nd # (1 thru 69 excluding \"+ str(temp[0]) + \"):\"))\n\n\t\t\t#Slot #3\n\t\t\telif counter==3 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 3rd # (1 thru 69 excluding \" + str(temp[0]) + \" and \"+ str(temp[1]) + \"):\"))\n\n\t\t\t#Slot #4\n\t\t\telif counter==4 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 4th # (1 thru 69 excluding \" + str(temp[0]) + \", \" + str(temp[1]) + \" and \" + str(temp[2]) + \"):\"))\n\n\t\t\t#Slot #5\n\t\t\telif counter==5 : \n\t\t\t\t\t\t\t\n\t\t\t\tnum = int(raw_input(\"select 5th # (1 thru 69 excluding \" + str(temp[0]) + \", \" + str(temp[1]) + \", \" + str(temp[2]) + \" and \" + str(temp[3]) + \"):\"))\n\n\t\t\t#Powerball slot\n\t\t\telif counter==6 :\n\t\t\t\n\t\t\t\tnum = int(raw_input(\"select Power Ball # (1 thru 26):\"))\n\n\t\t#Non integer input\n\t\texcept ValueError :\t\n\t\t\t\t\n\t\t\t\tprint \"Enter Valid Input\"\n\t\t\t\tcontinue\t\n\n\t\t#Powerball number is out of the specified range.\n\t\tif (counter==6) and (num < 1 or num > 26) :\n\t\t\t\t\n\t\t\t\tprint \"Enter Powerball No. in range\"\n\t\t\t\tcontinue\t\n\n\t\t#One of the first five numbers is either repeated or out of the specified range.\n\t\tif (counter != 6) and ((num < 1 or num > 69) or (num in temp)) :\n\t\t\t\n\t\t\tprint \"Enter Valid Input, Number out of range or duplicate\"\n\t\t\tcontinue\n\n\t\tfreq[counter - 1].append(num)\n\t\tcounter += 1\n\t\ttemp.append(num)\n\t\t\n\t#Adding the current employee's favorite number list to the list for all employees.\n\tfavorite_number.append(temp)\n\treturn",
"def price_food():\r\n try:\r\n price = input(\"What is your budget? Enter a dollar amount(5.50) >\")\r\n price = price.replace('$', '')\r\n price = float(price)\r\n print('\\n')\r\n if price < 6:\r\n if price >= 5:\r\n print(\"Canteen 1 and Quad Cafe have food under 5 dollars\\n\\n\")\r\n\r\n if price >= 4:\r\n print(\"Canteen 2, Canteen 11, Canteen 16 and North Spine Food Court have food under 4 dollars\\n\\n\")\r\n\r\n if price >= 3:\r\n print(\"Canteen 9, Canteen 13, Canteen 14 and South Spine Food Court have food under 3 dollars\\n\\n\")\r\n\r\n else:\r\n print(\"Price is too low, please try another value.\\n\\n\")\r\n price_food()\r\n\r\n else:\r\n print(\"Any canteen's available for you!\\n\\n\")\r\n print('\\n')\r\n\r\n except ValueError:\r\n print(\"Please enter a dollar value\\n\\n\")\r\n price_food()",
"def inp_item_price(self) -> List[str]:\n \n return [str(input(\"Enter desired price for item: \"))]",
"def add_food(self, _food):\n self.food.append(_food)",
"def getItemFromAisle(self):\n category_items = {\"0\": None}\n choice = None\n\n # While not exit\n while choice != \"0\": \n self.cart.refreshCartDF()\n self.updateAisleData()\n \n # Add items from a category into a dictionary to refer to.\n for i, item in enumerate(self.aisle_data.values):\n category_items[f\"{i+1}\"] = [item[0], item[1], int(item[2])] #[Item, price, stock]\n clear()\n\n \"\"\"\n 0) Don't add item to cart\n\n Items Price In stock\n 1) Chicken $5.20 14\n \"\"\"\n print(print_banner(self.name, self.aisle_name))\n print(\"The items on the shelves stare back at you...\")\n print(\"0) Don't add item to cart\\n\")\n print(\" Items Price In stock\")\n for i, item in enumerate(self.aisle_data.values):\n # option_num) Item, price, stock\n print(f\"{i+1}) {item[0]}{get_spaces(12-len(item[0]))} ${item[1]}{get_spaces(7-len(str(item[1])))} {int(item[2])}\") \n\n choice = input(\"\\nAdd an item to cart?\\n\")\n clear()\n print(print_banner(self.name, self.aisle_name))\n if choice == \"\":\n print(\"Please enter something!\")\n elif choice == \"0\":\n break\n elif choice in category_items: # Item chosen to add to cart\n while True: # Check if valid number added to cart\n clear()\n print(print_banner(self.name, self.aisle_name))\n print(f\"Selected item: \\033[1;33;40m{category_items[choice][0]} ({category_items[choice][2]})\\033[0;37;40m\\n\")\n amt = input(\"Number to add (0 to stop): \").strip()\n\n if amt == \"\" :\n print(\"Please enter an amount!\")\n enter_to_continue()\n continue\n elif amt.isnumeric():\n amt = int(amt)\n else:\n amt = -1\n if amt > category_items[choice][2]:\n print(\"That's too many!\")\n enter_to_continue()\n continue\n elif amt >= 0:\n break\n print(\"Invalid option!\")\n enter_to_continue()\n if amt == 0: # Don't add anything\n pass\n else:\n category_items[choice][2] -= amt\n self.cart.addItemToCart(category_items[choice][0], amt, category_items[choice][1]*amt, get_time())\n print(f\"Added {amt} {category_items[choice][0]} to cart\")\n enter_to_continue()\n else:\n print(\"Invalid option!\")\n enter_to_continue()",
"def main():\n catalogue = Catalogue()\n\n # book1 = Book(\"title1\", 22323, \"author\", 4)\n # dvd1 = Dvd(\"dvdt\", 1111, \"jurassic\", 2, \"sept 2\", \"japan\")\n # catalogue.add_item(book1)\n # catalogue.add_item(dvd1)\n\n while True:\n print(\"\"\" ======LIBRARY MENU=======\n 1. Add Item\n 2. Remove item \n 3. Display all items\n 4. Checkout item \n 5. Return item \n 6. Find item \n 7. Exit\n \"\"\")\n choice = int(input(\"Enter Choice:\"))\n if choice == 1:\n catalogue.add_item(catalogue)\n elif choice == 2:\n user_input = int(input(\"enter call number: \"))\n catalogue.remove_item(user_input)\n elif choice == 3:\n catalogue.display_available_items()\n elif choice == 4:\n user_input = int(input(\"enter call number: \"))\n catalogue.check_out(user_input)\n elif choice == 5:\n user_input = int(input(\"enter call number: \"))\n catalogue.return_item(user_input)\n elif choice == 6:\n user_input = input(\"enter title to search: \").capitalize()\n catalogue.search(user_input)\n if choice == 7:\n sys.exit()",
"def coffee_machine():\n MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n }\n my_ingredients= {\"water\":300,\"milk\":200,\"coffee\":100}\n flag=True\n can_make=True\n while flag:\n sum_money=0\n coffee=input(\"What would you want?(espresso/latte/cappuccino):\\n\").lower()\n if coffee==\"off\":\n flag=False\n break\n try:\n for item in my_ingredients:\n if coffee != \"espresso\":\n if my_ingredients[\"water\"] >= MENU[coffee][\"ingredients\"][\"water\"]:\n if my_ingredients[\"milk\"] >= MENU[coffee][\"ingredients\"][\"milk\"]:\n if my_ingredients[\"coffee\"] >= MENU[coffee][\"ingredients\"][\"coffee\"]:\n my_ingredients[\"water\"]-=MENU[coffee][\"ingredients\"][\"water\"]\n my_ingredients[\"milk\"]-=MENU[coffee][\"ingredients\"][\"milk\"]\n my_ingredients[\"coffee\"]-=MENU[coffee][\"ingredients\"][\"coffee\"]\n can_make = True\n break\n can_make, flag = False, False\n print(\"Sorry, there are not enough ingredients\")\n break\n else:\n if my_ingredients[\"water\"] >= MENU[coffee][\"ingredients\"][\"water\"]:\n if my_ingredients[\"coffee\"] >= MENU[coffee][\"ingredients\"][\"coffee\"]:\n my_ingredients[\"water\"] -= MENU[coffee][\"ingredients\"][\"water\"]\n my_ingredients[\"coffee\"] -= MENU[coffee][\"ingredients\"][\"coffee\"]\n can_make = True\n break\n can_make,flag = False,False\n print(\"Sorry, there are not enough ingredients\")\n break\n if can_make:\n print(\"Please insert coins\")\n num_quarters=int(input(\"How many quarters?: \"))\n num_dimes = int(input(\"How many dimes?: \"))\n num_nickles = int(input(\"How many nickles?: \"))\n num_pennies = int(input(\"How many pennies?: \"))\n sum_money=num_quarters*0.25+num_dimes*0.1+num_nickles*0.05+num_pennies*0.1**2\n if sum_money<MENU[coffee][\"cost\"]: print(\"Sorry,that's not enough money. Money refunded.\")\n elif sum_money==MENU[coffee][\"cost\"]:\n print(\"Here is your {}, Enjoy!\".format(coffee))\n else:\n print(\"Here is ${} in change\".format(round(sum_money-MENU[coffee][\"cost\"],2)))\n print(\"Here is your {}, Enjoy!\".format(coffee))\n except:\n print(\"Fix your input please\")",
"def budget_for_necessities():\n print(\"========== Displaying hotel options ==========\")\n for i in range(len(hotel_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n hotel_list[i].print_hotel()\n print(\" \")\n while True:\n try:\n hotel_num = int(input(\"Please choose your hotel option (Enter a number between 1 to 7): \"))\n hotel_num -= 1\n if hotel_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n hotel_stay = int(input(\"Please enter the duration (in days) of your stay: \"))\n if hotel_stay > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n user_hotel = hotel_list[hotel_num]\n user_hotel_price = user_hotel.get_price()\n user_hotel_name = user_hotel.get_name()\n # display car option and ask for user input\n print(\"\\n======== Displaying rental car options =========\")\n for i in range(len(car_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n car_list[i].print_car()\n print(\" \")\n while True:\n try:\n car_num = int(input(\"Please choose your car rental option (Enter a number between 1 to 6): \"))\n car_num -= 1\n if car_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n car_rental_day = int(input(\"Please enter the duration (in days) of your car rental: \"))\n if car_rental_day > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n # calculate user's total cost for car rental and hotel\n user_car = car_list[car_num]\n user_car_price = user_car.get_price()\n user_car_name = user_car.get_name()\n total_hotel_cost = hotel_stay * user_hotel_price\n total_car_rental_cost = car_rental_day * user_car_price\n print(\"\\n=== Displaying your hotel and car rental information ===\")\n print(\"Hotel: \", user_hotel.get_name())\n print(\"Hotel total cost: $\", total_hotel_cost)\n print(\"Car Rental: \", user_car.get_name())\n print(\"Car rental total cost: $\", total_car_rental_cost)\n print(\" \")\n # calculate remaining budget based on hotel and car's cost and/or ask for higher budget\n user_budget.calculate_new_budget(total_hotel_cost + total_car_rental_cost)\n print(\" \")\n return total_hotel_cost, total_car_rental_cost, user_hotel_name, user_car_name",
"def fill(self): # filling\n print('\\nWrite how many ml of water do you want to add:')\n self.water += int(input())\n print('Write how many ml of milk do you want to add:')\n self.milk += int(input())\n print('Write how many grams of coffee beans do you want to add:')\n self.beans += int(input())\n print('Write how many disposable cups of coffee do you want to add:\\n')\n self.cups += int(input())",
"def display_main_menu(my_list1):\n\n user_options = \"\"\"\n \\nWould you like to:\n A. Add a new item\n B. View list\n C. Delete first item in list\n D. Quit the program\n \"\"\"\n\n while True:\n # Collect input and include your if/elif/else statements here.\n print user_options\n user_input = raw_input(\">>> \").upper()\n\n if user_input == \"A\":\n add_to_list(my_list1)\n elif user_input == \"B\":\n view_list(my_list1)\n elif user_input == \"C\":\n delete_first_item(my_list1)\n elif user_input == \"D\":\n break\n else:\n print \"Sorry, I don't know what you mean. Please try again.\""
] | [
"0.6616214",
"0.6435766",
"0.6312016",
"0.6178241",
"0.61382043",
"0.6100695",
"0.6054668",
"0.6004036",
"0.5994595",
"0.5939111",
"0.58855563",
"0.5878791",
"0.5840907",
"0.58006424",
"0.5771299",
"0.57589585",
"0.57553786",
"0.57508004",
"0.57074237",
"0.5698907",
"0.56823206",
"0.565333",
"0.56388986",
"0.56005204",
"0.55536276",
"0.5547263",
"0.5545459",
"0.55354387",
"0.5515347",
"0.54899085"
] | 0.8009774 | 0 |
Computes the weighted F1_score for each label | def F1_score(y_t, y_p, weights):
P = Precision()
R = Recall() #label per label evaluation
F1_score_per_label = [] #store per label
P_per_label = []
R_per_label = []
F1_tot = 0 #weighted sum
for i in range(8):
P.update_state( y_t[:,i], y_p[:,i] )
R.update_state( y_t[:,i], y_p[:,i] )
p = P.result().numpy()
r = R.result().numpy()
P.reset_states()
R.reset_states()
if p+r == 0:
f1 = 0
else:
f1 = 2*p*r/ (p+r)
F1_score_per_label.append(f1)
P_per_label.append(p)
R_per_label.append(r)
F1_tot += f1*weights[i]
return F1_score_per_label, P_per_label, R_per_label, F1_tot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def f1_score(true, pred, n_classes=2, pos_label=1, average=None, weights=None):\n\n if n_classes == 2:\n if np.sum(true * pred) == 0:\n f1 = 0.0\n else:\n f1 = skl_f1_score(true, pred, average='binary', labels=range(n_classes), pos_label=pos_label, sample_weight=weights)\n else:\n if average is None:\n f1 = skl_f1_score(true, pred, average='micro', labels=range(n_classes), pos_label=None, sample_weight=weights)\n else:\n f1 = skl_f1_score(true, pred, average=average, labels=range(n_classes), pos_label=None, sample_weight=weights)\n return f1",
"def f1_score_per_label(y_true, y_pred, threshold, eps=1e-9):\n \n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n f1_label = (precision_label * recall_label).div(precision_label + recall_label + eps) * 2\n\n return f1_label, precision_label, recall_label",
"def f1_score(self):\n self.overall_f1_score = f1_score(\n self.y_true, self.y_pred, average = self.average_type).round(self.digits_count_fp)\n self.classes_f1_score = f1_score(\n self.y_true, self.y_pred, average = None).round(self.digits_count_fp)",
"def f1score(prediction,ytest):\n truepos = np.sum(ytest[prediction == 1])\n predpos = np.sum(prediction)\n actpos = np.sum(ytest)\n precision = truepos/predpos\n recall = truepos/actpos\n return 2*precision*recall/(precision + recall)",
"def binary_f1_score(preds, y):\n rounded_predictions = torch.round(torch.sigmoid(preds))\n\n return f1_score(y.cpu().numpy(), rounded_predictions.cpu().numpy(), average='weighted')",
"def f1_score(self, y_true=None, y_pred=None, labels=None, average=\"macro\", decimal=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize=None)\n metrics = cu.calculate_single_label_metric(matrix, imap, imap_count)\n\n list_f1 = np.array([item[\"f1\"] for item in metrics.values()])\n list_weights = np.array([item[\"n_true\"] for item in metrics.values()])\n\n if average == \"micro\":\n tp_global = np.sum(np.diag(matrix))\n fp_global = fn_global = np.sum(matrix) - tp_global\n precision = tp_global / (tp_global + fp_global)\n recall = tp_global / (tp_global + fn_global)\n f1 = (2 * precision * recall) / (precision + recall)\n elif average == \"macro\":\n f1 = np.mean(list_f1)\n elif average == \"weighted\":\n f1 = np.dot(list_weights, list_f1) / np.sum(list_weights)\n else:\n f1 = dict([(label, np.round(item[\"f1\"], decimal)) for label, item in metrics.items()])\n return f1 if type(f1) == dict else np.round(f1, decimal)",
"def f1_score_loss(predicted_probs: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:\n # Apply a sharpened sigmoid function to approximate the threshold\n thresholded_predictions = predicted_probs - ONE_HALF\n level_predictions = 1.0 / (1.0 + tf.exp(BETA * thresholded_predictions)) # [B, L]\n # predictions = tf.reduce_prod(level_predictions, axis=-1, keepdims=True) # [B, 1]\n predictions = tf.exp(tf.reduce_sum(tf.log(level_predictions), axis=-1, keepdims=True)) # [B, 1]\n\n # Compute the (approximate) F1 score\n f1_score = 2 * tf.reduce_sum(predictions * labels) / (tf.reduce_sum(predictions) + tf.reduce_sum(labels))\n return 1.0 - f1_score",
"def f1_score(confusion):\n sens = sensitivity(confusion)\n prec = precision(confusion)\n return 2 * sens * prec / (sens + prec)",
"def F1Score(labels, gold_file, wordnetCompression):\n\n gold_labels = []\n with open(gold_file, 'r') as file:\n for line in file:\n gold_labels.append(line.split()[1])\n\n flat_decoded_label_list = [item for sublist in labels for item in sublist]\n\n total = 0\n correct = 0\n if wordnetCompression:\n for i in range(len(gold_labels)):\n total += 1\n current_gold = gold_labels[i]\n pos = wn.lemma_from_key(current_gold).synset().pos()\n offset = wn.lemma_from_key(current_gold).synset().offset()\n wn_synset = \"wn:\" + str(offset).zfill( 8) + pos\n current_prediction = flat_decoded_label_list[i]\n #if len(current_prediction) > 1:\n # for elem in current_prediction:\n # if elem == current_gold:\n # correct += 1\n #else:\n if wn_synset == current_prediction:\n correct += 1\n else:\n for i in range(len(gold_labels)):\n if gold_labels[i] == flat_decoded_label_list[i]:\n correct += 1\n total += 1\n \n print(\"F1 SCORE: \", (correct/total)*100 )\n print(\"Total number of labels: \", total)\n print(\"Total correct: \", correct)\n print()",
"def f1_score_model(self, model, X, y):\n\n prediction = model.predict_classes(X)\n f1_macro = f1_score(y, prediction, average='macro')\n f1_micro = f1_score(y, prediction, average='macro')\n print(\"f1_macro: \", f1_score(y, prediction, average='macro'))\n print(\"f1_micro: \", f1_score(y, prediction, average=\"micro\"))\n print(\"f1_weighted: \", f1_score(y, prediction, average=\"weighted\"))\n return f1_macro, f1_micro",
"def F1_score(y, model):\n\tp = precision(y, model)\n\tr = recall(y, model)\n\tf = 2*((p*r)/(p+r))\n\treturn f",
"def f1_loss(y_true, y_pred):\n return 1.0 - f1_score(y_true, y_pred, average='weighted')",
"def f1_score(confusion):\n s = np.power(sensitivity(confusion), -1)\n p = np.power(precision(confusion), -1)\n return 2 / (s + p)",
"def f1_score(prediction, ground_truth):\n return precision_recall_f1(prediction, ground_truth)[2]",
"def per_class_f1(y, pred):\n \n num_classes = len(set(y))\n y = to_array(y, num_classes)\n pred = to_array(pred, num_classes)\n \n results = []\n for j in range(num_classes):\n class_y = y[:,j]\n class_pred = pred[:,j]\n f1 = f1_score(class_y, class_pred, average='binary')\n results.append([f1])\n return np.array(results)",
"def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab",
"def _f1_score_(prediction, ground_truth):\n prediction_tokens = _normalize_answer(prediction).split()\n ground_truth_tokens = _normalize_answer(ground_truth).split()\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1",
"def get_f1(self, predictions):\n preds = []\n ground_truth = []\n for index in range(len(self.test_data)):\n preds.append(predictions[index])\n ground_truth.append(self.test_data[index][self.target_attribute])\n f1 = metrics.f1_score(ground_truth,preds,labels=['win','loss','draw'],average='macro')\n return f1",
"def f1_score(y_true, y_pred):\n num = 2 * precision_score(y_true, y_pred) * recall_score(y_true, y_pred)\n deno = (precision_score(y_true, y_pred) + recall_score(y_true, y_pred))\n return num / deno",
"def get_f1_score(actual_labels, preds_labels, binary_classifcation, pos_label=\"malaria\", confusion_matrix_title=\"\"):\n # demonstration of calculating metrics for a neural network model using sklearn\n if not binary_classifcation:\n # For multiclass classification.\n accuracy = accuracy_score(actual_labels, preds_labels)\n precision = precision_score(actual_labels, preds_labels, average=\"macro\")\n recall = recall_score(actual_labels, preds_labels, average=\"macro\")\n f1 = f1_score(actual_labels, preds_labels, average=\"macro\")\n print('Accuracy: %f' % accuracy)\n print('Precision: %f' % precision)\n print('Recall: %f' % recall)\n print('F1 score: %f' % f1)\n\n else:\n accuracy = accuracy_score(actual_labels, preds_labels)\n print('Accuracy: %f' % accuracy)\n # # precision tp / (tp + fp)\n precision = precision_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Precision: %f' % precision)\n # recall: tp / (tp + fn)\n recall = recall_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Recall: %f' % recall)\n # f1: 2 tp / (2 tp + fp + fn)\n f1 = f1_score(actual_labels, preds_labels, pos_label=pos_label)\n print('F1 score: %f' % f1)\n # ROC AUC\n # auc = roc_auc_score(test_labels, basic_cnn_preds_labels)\n # print('ROC AUC: %f' % auc)\n\n # confusion matrix\n disp = plot_confusion_matrix(y_true=actual_labels, y_pred=preds_labels,\n display_labels=list(np.unique(actual_labels)),\n cmap=plt.cm.Blues,\n normalize=None)\n disp.ax_.set_title(confusion_matrix_title)\n plt.show()\n matrix = confusion_matrix(actual_labels, preds_labels)\n print(matrix)\n # if plot_confusion_matrix:\n # show_confusion_matrix(matrix=matrix, labels=list(np.unique(actual_labels)))",
"def f1_class(target, prediction, params):\n f1_array = f1_score(target, prediction, labels=np.arange(params['classnum']), average=None)\n return np.round(f1_array, 4)",
"def f1_score(y_true, y_pred):\n\ty_true = tf.cast(y_true, \"float32\")\n\ty_pred = tf.cast(tf.round(y_pred), \"float32\") # implicit 0.5 threshold via tf.round\n\ty_correct = y_true * y_pred\n\n\n\tsum_true = tf.reduce_sum(y_true, axis=1)\n\tsum_pred = tf.reduce_sum(y_pred, axis=1)\n\tsum_correct = tf.reduce_sum(y_correct, axis=1)\n\n\n\tprecision = sum_correct / sum_pred\n\trecall = sum_correct / sum_true\n\tf_score = 2 * precision * recall / (precision + recall)\n\tf_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score)\n\n\n\treturn tf.reduce_mean(f_score)",
"def get_f1_scores(labels_true: np.ndarray, labels_pred: np.ndarray, return_precision_recall: bool = False) \\\n -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:\n confusion = get_confusion_matrix(labels_true, labels_pred)\n n_labels = confusion.shape[0]\n counts_correct = confusion.diagonal()\n counts_true = confusion.dot(np.ones(n_labels))\n counts_pred = confusion.T.dot(np.ones(n_labels))\n mask = counts_true > 0\n recalls = np.zeros(n_labels)\n recalls[mask] = counts_correct[mask] / counts_true[mask]\n precisions = np.zeros(n_labels)\n mask = counts_pred > 0\n precisions[mask] = counts_correct[mask] / counts_pred[mask]\n f1_scores = np.zeros(n_labels)\n mask = (counts_true > 0) & (counts_pred > 0)\n f1_scores[mask] = 2 / (1 / precisions[mask] + 1 / recalls[mask])\n if return_precision_recall:\n return f1_scores, precisions, recalls\n else:\n return f1_scores",
"def f1_score(confusion):\n p = precision(confusion)\n r = sensitivity(confusion)\n F1 = (2 * p * r) / (p + r)\n return F1",
"def f1_score(self):",
"def f1(gold_labels, predicted_labels):\n \n precision_value = float(precision(gold_labels, predicted_labels))\n recall_value = float(recall(gold_labels, predicted_labels))\n top = float(2 * precision_value * recall_value)\n bottom = precision_value + recall_value\n\n if(top == 0):\n return 0\n else:\n return float(top / bottom)",
"def f1(y_true, y_pred):\n p = precision(y_true, y_pred)\n r = recall(y_true, y_pred)\n score = 2 * p * r / (p + r)\n return score",
"def performance_metric(y_true, y_pred):\n f1_arr = []\n for i in range(np.shape(y_pred)[1]):\n f1 = f1_score(np.array(y_true)[:, i], y_pred[:, i], average='weighted')\n f1_arr.append(f1)\n \n score = np.mean(f1_arr)\n return score",
"def score(self, df: pd.DataFrame, label_column: str) -> float:\n assert label_column not in self.feature_columns, 'Label column is in the feature list.'\n assert label_column in df.columns, 'Label column is not in the dataframe.'\n\n rounded_preds = self.predict(df).round()\n return f1_score(df[label_column].values, rounded_preds)",
"def calculate_f1_score(predictions, actuals):\n predictions = predictions > 0.5\n # fbeta_score throws a confusing error if inputs are not numpy arrays\n predictions, actuals, = np.array(predictions), np.array(actuals)\n # We need to use average='samples' here, any other average method will generate bogus results\n return fbeta_score(actuals, predictions, beta=1, average='samples')"
] | [
"0.7590878",
"0.74019974",
"0.7304507",
"0.71994865",
"0.7189998",
"0.71749127",
"0.71248233",
"0.691543",
"0.68868256",
"0.6838204",
"0.68092096",
"0.679124",
"0.6774323",
"0.6744536",
"0.67118233",
"0.6662244",
"0.6661274",
"0.66467386",
"0.6592144",
"0.6565224",
"0.6547959",
"0.6517513",
"0.649741",
"0.64757293",
"0.64612716",
"0.64584464",
"0.64376795",
"0.6433072",
"0.64260346",
"0.6423095"
] | 0.7838961 | 0 |
Rescales array of images to specified dimensions. | def resize_array(images, dim=None):
size = images.shape[0]
imgs = np.zeros((size, dim, dim))
for i in range(size):
imgs[i, :, :] = skimage_resize(images[i, :, :], (dim, dim))
return imgs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale_images(images, new_shape):\n\n images_list = list()\n\n for image in images:\n new_image = resize(image, new_shape)\n images_list.append(new_image)\n \n return np.asarray(images_list)",
"def rescale_images(original_images):\n mobile_net_possible_dims = [128, 160, 192, 224]\n dim_goal = 128\n \n for dim in mobile_net_possible_dims:\n if original_images.shape[1] <= dim:\n dim_goal = dim\n break;\n print(f\"Image rescaled from dimension {original_images.shape[1]} to {dim_goal} for MobileNet\")\n scale = dim_goal/original_images.shape[1]\n images = np.empty((original_images.shape[0], dim_goal, dim_goal))\n for i, original_image in enumerate(original_images):\n images[i] = rescale(original_image, (scale, scale), multichannel=False)\n return images",
"def _batch_resize(self, ims, res, strategy='nearest'):\n order = {'nearest': 0, 'bilinear': 1, 'bicubic': 3}[strategy]\n bs = ims.shape[0]\n out = []\n log.info('Input ims shape: %s' % repr(ims.shape))\n has_extra_dim = len(ims.shape) == 4\n if not has_extra_dim:\n ims = ims[..., np.newaxis]\n h, w = ims.shape[1:3]\n for i in range(bs):\n o = interpolation.zoom(\n ims[i, ...], [res[0] / h, res[1] / w, 1.0], np.float32, order=order)\n out.append(o)\n out = np.stack(out)\n if not has_extra_dim:\n out = np.reshape(out, out.shape[:-1])\n return out",
"def resize_images(imgs, size=(720, 720)):\n res = []\n for img in imgs:\n\n factor = max(1, size[0]/float(img.shape[0]), size[1]/float(img.shape[1]))\n if factor != 1: img = scale_image(img, factor)\n\n img = center_crop(img, size)\n res.append(img)\n\n return res",
"def rescale_images(images, rescale='basic', rng=(0,255), dtype='uint8'):\n # Error check\n if rescale not in [None, False, 'basic', 'all', 'average']:\n raise TypeError(\"rescale must be None, 'basic', 'all', or 'average'\")\n\n # Rescale / clip if requested\n if rescale:\n # Apply preliminary rescaling if necessary\n if rescale in ['all', 'average']:\n trg_min, trg_max = rng\n\n # rescale min\n if rescale == 'all':\n _min = np.min(images)\n else: # 'average'\n _min = np.mean([im.min() for im in images])\n\n # Temporarily rescale min to zero\n for im in images:\n im -= _min\n\n # rescale max\n if rescale == 'all':\n _max = np.max(images)\n else: # 'average'\n _max = np.mean([np.max(im) for im in images])\n\n # Rescale max, accounting for target min\n for im in images:\n im *= (trg_max - trg_min) / _max\n im += trg_min\n\n # Clip images to range, cast to requested datatype\n images = [np.clip(im, *rng).astype(dtype) for im in images]\n\n # If rescale is None, cast images to requested datatype without clipping\n else:\n images = [im.astype(dtype) for im in images]\n\n # Return\n return images",
"def resize_images(self, images):\n \n img_list = []\n \n for img in images:\n \n yield np.resize(img, (64, 64, 3))",
"def resize_images(images, size):\n num_images = images.shape[0]\n num_channels = images.shape[3]\n output_images = numpy.empty((num_images, size[0], size[1], num_channels), dtype=images.dtype)\n with tqdm.tqdm(total=num_images) as progress:\n for i in range(num_images):\n output_images[i, :, :, :] = resize_image(images[i, :, :, :], size)\n progress.update(1)\n\n return output_images",
"def resize_batch(images : List[np.ndarray], size : Tuple[int,int,int,int], resize_kind='stretch') :\n assert resize_kind in ['stretch'] and len(size) == 4\n n, w, h, c = size if size[-1]==3 else tuple(size[i] for i in [0,3,1,2])\n resize = lambda x: BaseRuntime.resize_stretch(x, (h,w))\n dtype = images[0].dtype\n n_pad = n - len(images)\n batch_pad = [np.zeros((h,w,c),dtype=dtype)] * n_pad\n batch_image = list(map(resize, images))\n batch_image = batch_image + batch_pad\n return np.stack(batch_image)",
"def reshape_raw_images(raw_im, \n resize_shape,\n ):\n _reshaped_im = np.array([cv2.resize(_lr, tuple(resize_shape[-2:]), \n interpolation=cv2.INTER_AREA) for _lr in raw_im])\n return _reshaped_im",
"def rescale_image(image: np.ndarray, scale: float) -> np.ndarray:\n (height, width) = image.shape[:2]\n new_dims = (int(width * scale), int(height * scale))\n return cv2.resize(image, new_dims, interpolation=cv2.INTER_CUBIC)",
"def resample(image_array, target_pixel_dims_list, is_seg=False):\n # if dealing with segmentation images, do not interpolate\n # this is done by choosing interpolation order == 0\n order = 0 if is_seg == True else 3\n\n original_dims_in_pixels = [image_array.shape[d]\n for d in range(len(image_array.shape))]\n compression_list = [target_pixel_dims_list[d] / original_dims_in_pixels[d]\n for d in range(len(image_array.shape))]\n\n if (is_seg):\n resized_image = ndimage.interpolation.zoom(\n image_array, zoom=compression_list, order=order, cval=0)\n return resized_image\n else:\n resized_image = ndimage.interpolation.zoom(\n image_array, zoom=compression_list, order=order, cval=0)\n return resized_image",
"def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)",
"def resize_image(arr, img_height, img_width):\n arr_pil = Image.fromarray((arr * 255).astype(np.uint8)) # Convert to a PIL Python Image Library format\n out = arr_pil.resize((img_height, img_width))\n a = image.img_to_array(out)\n return a",
"def rescale_img(image: Matrix, scales: List[float]) -> List[Matrix]:\n rescaled_imgs = []\n for scale in scales:\n if scale == 1:\n rescaled_imgs.append(np.copy(image))\n else:\n rescaled_imgs.append(rescale(image, scale))\n return rescaled_imgs",
"def transform_images(img, size):\n return tf.image.resize(img, (size, size)) / 255",
"def transform_images(x_image: tf.Tensor, size: int) -> tf.Tensor:\n x_image = tf.image.resize(x_image, (size, size))\n x_image = x_image / 255\n return x_image",
"def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])",
"def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])",
"def resize_image_data(data, resize_shape):\n if data is None or len(resize_shape) == 0:\n return data\n\n if len(data.shape) > 1 and np.array_equal(data[0].shape, resize_shape):\n return data\n\n output_data = []\n for im in data:\n output_data.append(zoom(input=im, zoom=np.divide(resize_shape, im.shape)))\n\n return np.array(output_data)",
"def resize(image, size):\n return np.array(Image.fromarray(image).resize(size))",
"def _resize_img(self, results):\n for key in results.get('img_fields', ['img']):\n if self.keep_ratio:\n img, scale_factor = general_ocr.imrescale(\n results[key],\n results['scale'],\n return_scale=True,\n backend=self.backend)\n # the w_scale and h_scale has minor difference\n # a real fix should be done in the general_ocr.imrescale in the future\n new_h, new_w = img.shape[:2]\n h, w = results[key].shape[:2]\n w_scale = new_w / w\n h_scale = new_h / h\n else:\n img, w_scale, h_scale = general_ocr.imresize(\n results[key],\n results['scale'],\n return_scale=True,\n backend=self.backend)\n results[key] = img\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],\n dtype=np.float32)\n results['img_shape'] = img.shape\n # in case that there is no padding\n results['pad_shape'] = img.shape\n results['scale_factor'] = scale_factor\n results['keep_ratio'] = self.keep_ratio",
"def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)",
"def rescale_tif(arr, clamp_low=262, clamp_high=1775):\n img_array = exposure.rescale_intensity(arr, in_range=(clamp_low, clamp_high)) # Landsat 5 ARD .25 and 97.75 percentile range used in training\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n img_array = img_as_ubyte(img_array)\n return img_array",
"def read_orig_dim_images_from_list_of_numpy_arrays(list_of_images, target_img_height, target_img_width):\n out = []\n for arr in list_of_images:\n x = resize_image(arr / np.max(arr), target_img_height, target_img_width)\n x = np.expand_dims(x, axis=0)\n out.append(x)\n return np.vstack(out)",
"def rescale(self, img):\n\n if self.scale != 1:\n return imutils.resize(img, width=int(img.shape[1] * self.scale))\n else:\n return img",
"def resizeImages(img_path,size=(50,50)):\n image = cv2.imread(img_path)\n resized = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)\n img=np.array([resized.flatten()])\n #print('resized image')\n return img",
"def _resize_img(self, results):\n for key in results.get('img_fields', ['img']):\n if self.keep_ratio:\n img, scale_factor = mmcv.imrescale(\n results[key],\n results['scale'],\n return_scale=True,\n interpolation=self.interpolation,\n backend=self.backend)\n # the w_scale and h_scale has minor difference\n # a real fix should be done in the mmcv.imrescale in the future\n new_h, new_w = img.shape[:2]\n h, w = results[key].shape[:2]\n w_scale = new_w / w\n h_scale = new_h / h\n else:\n img, w_scale, h_scale = mmcv.imresize(\n results[key],\n results['scale'],\n return_scale=True,\n interpolation=self.interpolation,\n backend=self.backend)\n results[key] = img\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],\n dtype=np.float32)\n results['img_shape'] = img.shape\n # in case that there is no padding\n results['pad_shape'] = img.shape\n results['scale_factor'] = scale_factor\n results['keep_ratio'] = self.keep_ratio",
"def make_multiscale(image, resolutions,\n resize_method=tf.image.ResizeMethod.BICUBIC,\n num_channels=3):\n scaled_images = []\n for height in resolutions:\n scaled_image = tf.image.resize_images(\n image,\n size=[height, height], # assuming that height = width\n method=resize_method)\n scaled_image = tf.to_int64(scaled_image)\n scaled_image.set_shape([height, height, num_channels])\n scaled_images.append(scaled_image)\n\n return scaled_images",
"def preprocess_image(images,\n height=INCEPTION_DEFAULT_IMAGE_SIZE,\n width=INCEPTION_DEFAULT_IMAGE_SIZE,\n scope=None):\n is_single = images.shape.ndims == 3\n with ops.name_scope(scope, 'preprocess', [images, height, width]):\n if not images.dtype.is_floating:\n images = math_ops.to_float(images)\n if is_single:\n images = array_ops.expand_dims(images, axis=0)\n resized = image_ops.resize_bilinear(images, [height, width])\n resized = (resized - 128.0) / 128.0\n if is_single:\n resized = array_ops.squeeze(resized, axis=0)\n return resized",
"def resize_real_images(self, images):\n block_idx = (self.growth_idx + 1) // 2\n height, width = self.params[\"generator_projection_dims\"][0:2]\n resized_image = tf.image.resize(\n images=images,\n size=[\n height * (2 ** block_idx), width * (2 ** block_idx)\n ],\n method=\"nearest\",\n name=\"resized_real_image_{}\".format(self.growth_idx)\n )\n\n return resized_image"
] | [
"0.7039275",
"0.6845864",
"0.67618686",
"0.6744246",
"0.6735477",
"0.6676968",
"0.6545257",
"0.65400434",
"0.64330506",
"0.64327246",
"0.6339051",
"0.6335927",
"0.6315591",
"0.63030136",
"0.62993336",
"0.6288982",
"0.6275241",
"0.6275241",
"0.62733996",
"0.62527394",
"0.6202848",
"0.617878",
"0.61720926",
"0.6147985",
"0.6138468",
"0.6130152",
"0.6121778",
"0.6109838",
"0.6060793",
"0.6056317"
] | 0.73159486 | 0 |
Resizes raw ouput of frifriidownload. Takes ouput h5py file of frifriidownload.ipynb and returns a h5py file with the exact same format, but with resized images. Arguments | def initial_resizing(fr_raw_data_path, fr_data_path, dim=300):
with h5py.File(fr_raw_data_path, 'r') as data:
images = resize_array(np.asarray(data['images'].value), dim=dim)
labels = data['labels'].value
with h5py.File(fr_data_path, 'w') as f:
f.create_dataset('images', data=images)
with h5py.File(fr_raw_data_path, 'r') as data:
f.copy(data['fri_data'], 'fri_data')
f.copy(data['frii_data'], 'frii_data')
f.copy(data['labels'], 'labels') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(inputfile, output, size):\n if not output:\n output = join(dirname(inputfile), str(size))\n if not isdir(output):\n os.mkdir(output)\n\n logger.info('Resizing images from: %s' % inputfile)\n inputfile = realpath(inputfile)\n #/usr/share/datasets/KSCGR_Original/data1/boild-egg/0.jpg 0\n nb_lines = count_lines(inputfile)\n pbar = pb.ProgressBar(nb_lines)\n with open(inputfile) as fin:\n for line in fin:\n path, tl = line.strip().split()\n newpath = create_path(path, output)\n img = cv2.imread(path)\n img = resize_image(img, size)\n cv2.imwrite(newpath, img)\n pbar.update()\n logger.info('Total of images resized: %d' % nb_lines)",
"def convert_tile(fname, out_fname, compression, filter_opts):\n with h5py.File(out_fname, 'w') as fid:\n with rasterio.open(fname) as ds:\n # global attributes\n attach_attributes(fid, ds.tags())\n\n # find and convert every subsdataset (sds)\n for sds_name in ds.subdatasets:\n with rasterio.open(sds_name) as sds:\n ds_name = Path(sds_name.replace(':', '/')).name\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n f_opts = dict()\n else:\n f_opts = filter_opts.copy()\n\n # use sds native chunks if none are provided\n if 'chunks' not in f_opts:\n f_opts['chunks'] = list(sds.block_shapes[0])\n\n # modify to have 3D chunks if we have a multiband sds\n if sds.count == 3:\n # something could go wrong if a user supplies\n # a 3D chunk eg (2, 256, 340)\n f_opts['chunks'].insert(0, 1)\n f_opts['chunks'] = tuple(f_opts['chunks'])\n else:\n f_opts['chunks'] = tuple(f_opts['chunks'])\n\n # subdataset attributes and spatial attributes\n attrs = sds.tags()\n attrs['geotransform'] = sds.transform.to_gdal()\n attrs['crs_wkt'] = sds.crs.wkt\n\n # ensure single band sds is read a 2D not 3D\n data = sds.read() if sds.count == 3 else sds.read(1)\n\n # write to disk as an IMAGE Class Dataset\n write_h5_image(data, ds_name, fid, attrs=attrs,\n compression=compression,\n filter_opts=f_opts)",
"def hdfpath_to_nifti1image(file_path, h5path):\n with h5py.File(file_path, 'r') as f:\n return hdfgroup_to_nifti1image(f[h5path])",
"def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data",
"def read_h5_file(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### cell information\n \n xu = np.array(fl['/cells/comu'], dtype=np.float32)\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n fils = misc_tools.Cells(xu, nbpf, sim)\n \n return sim, fils",
"def ImageOutput(name, out_ds, tile_size, resampling, init_dest, output_dir, verbose,mbtiles):\n\n resampler = Resampler(resampling)\n\n if name == \"hybrid\":\n return HybridImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose)\n\n if name == \"png\":\n image_format = \"PNG\"\n elif name == \"jpeg\":\n image_format = \"JPEG\"\n\n return SimpleImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose, [image_format],mbtiles)",
"def read_h5_file_arvind_format(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### cell information\n \n xu = np.array(fl['/positions/xu'], dtype=np.float32)\n pol = np.array(fl['/positions/ori'], dtype=np.float32)\n pol = np.array([xt.T for xt in pol[:]])\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n fils = misc_tools.Cells(xu, pol, nbpf, sim)\n \n return sim, fils",
"def rewrite_hdf5(\n self, job_name=None, info=False, exclude_groups=None, exclude_nodes=None\n ):\n if job_name is not None:\n state.logger.warning(\n \"Specifying job_name is deprecated and ignored! Future versions will change signature.\"\n )\n file_name = self.file_name\n new_file = file_name + \"_rewrite\"\n\n self_hdf = FileHDFio(file_name=file_name)\n hdf_new = FileHDFio(file_name=new_file, h5_path=\"/\")\n\n old_logger_level = state.logger.level\n state.logger.level = 50\n hdf_new = self.hd_copy(self_hdf, hdf_new)\n state.logger.level = old_logger_level\n\n if info:\n print(\n \"compression rate from old to new: {}\".format(\n self.file_size(self_hdf) / self.file_size(hdf_new)\n )\n )\n print(\n \"data size vs file size: {}\".format(\n self.get_size(hdf_new) / self.file_size(hdf_new)\n )\n )\n self.remove_file()\n os.rename(hdf_new.file_name, file_name)",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def fileResizeObscure(new_filepath):\n # Resize\n img1 = Image.open(new_filepath)\n img2=image_reduce(img1)\n *** Stopped working here\n newpath=\"toupload\\\\%s\" % new_filepath\n # Block ID\n width=img2.size[0]\n height=img2.size[1]\n # Obscuring params were decided by trial and error using fraction of width and height\n x1=int(0.16*width)\n x2=int(0.28*width)\n y1=int(0.94*height)\n y2=int(0.98*height) \n # Faster but easier to snoop? should not be since it changes the pixels\n draw = ImageDraw.Draw(img2)\n draw.rectangle([(x1,y1),(x2,y2)],fill=\"white\")\n del draw\n \n img2.save(newpath,optimize=True,quality=95)",
"def convert(filepath, duration=100):\n\n # Getting images from HDF5 file\n h5_file = h5py.File(filepath, 'r')\n images = h5_file['entry']['data']['data']\n # Converting to PIL.Image\n images = [Image.fromarray(i).convert() for i in images]\n # Saving as GIF\n images[0].save(filepath.split('/')[-1] + '.gif',\n save_all=True, append_images=images[1:],\n duration=duration, loop=0)\n return",
"def photo2web_process_hattenbach():\n\n os.chdir('/Volumes/SSD External/Hattenbach_v2')\n \n dir_base = os.getcwd()\n \n dir_p2w = '/Users/throop/photos/Trips/'\n \n dirs = sorted(glob.glob(os.path.join(dir_base, '*')))\n \n quality_out = '60'\n size_out = '2000x2000'\n \n for i,dir in enumerate(dirs):\n if os.path.isdir(dir):\n os.chdir(dir)\n dir_originals = os.path.join(dir, 'originals')\n dir_originals_fullres = os.path.join(dir, 'originals_fullres')\n\n# For HH files, copy the 'actual' originals into a 'fullres' folder, for safekeeping\n\n if not os.path.isdir(dir_originals_fullres):\n os.rename(dir_originals, dir_originals_fullres)\n os.mkdir(dir_originals)\n \n files = glob.glob(os.path.join(dir_originals_fullres, '*'))\n\n# Get a list of all the images\n\n# For each image, make a low-res, low-quality image. This is just because the scanned files\n# are huge and high-quality, and not useful for online. They are much larger than necessary. \n# So we use 'convert' to shrink them in size and quality, and put the output into 'originals' directory \n# for photo2web.\n\n for file in files:\n file_short = os.path.basename(file)\n file_in = os.path.join(dir_originals_fullres,file_short)\n file_out = os.path.join(dir_originals,file_short)\n if not os.path.isfile(file_out):\n cmd = (f'convert -resize {size_out} -quality {quality_out}' +\n f' {file_in}' +\n f' {file_out}')\n print(f'{cmd}')\n \n subprocess.run(['convert', '-resize', size_out, '-quality', quality_out,\n file_in,\n file_out])\n\n# Now, finally, go thru and do photo2web on all of them.\n \n print(f'\\nProcessing directory {i}/{len(dirs)} {dir}\\n')\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'header.txt'), '.'])\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'photos.css'), '.'])\n if not os.path.exists('captions.txt'):\n subprocess.run(['captions_photo2web']) \n subprocess.run(['photo2web_old'])\n subprocess.run(['photo2web'])",
"def process_image(fl):\n #print(\"processing of images\")\n print(fl)\n img = cv2.imread(fl, cv2.IMREAD_COLOR)\n resized_img = cv2.resize(img, (146, 243), interpolation=cv2.INTER_CUBIC)\n return resized_img",
"def save_as_hdf5(self, filename):",
"def test_correct_image_size(location):\n chunkloc = resave_to_chunks(root=location[\"dir\"],\n n_imgs=10,\n output_stem=location[\"stem\"])\n\n loaded = np.load(chunkloc)\n assert len(loaded.files) > 0\n\n first = loaded[loaded.files[0]]\n assert first.shape != ()\n assert first.shape == (520, 696)",
"def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n data = pd.DataFrame.from_csv(filename) \n\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n \n # [1:-1] is used to remove '[' and ']' from string \n cur_img_array = deserialize_image(cur_img)\n # cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test'+str(i)+'.jpg', image)",
"def _make_resized_data(self, odata):\n\n\n nmax = odata['file_id'].shape[1]\n new_nmax = odata['ncutout'].max()\n if new_nmax < 2:\n new_nmax = 2\n temp_obj_data = odata\n\n nobj = temp_obj_data.size\n\n new_data = meds.util.get_meds_output_struct(\n nobj,\n new_nmax,\n extra_fields=self._get_fields(new_nmax),\n )\n new_data = self._add_cat_fields(new_data, copy=False)\n\n for name in new_data.dtype.names:\n if name in temp_obj_data.dtype.names:\n\n shape = new_data[name].shape\n lshape = len(shape)\n\n if lshape > 1 and shape[1] == new_nmax:\n new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax]\n else:\n new_data[name][:] = temp_obj_data[name][:]\n\n del temp_obj_data\n\n return new_data",
"def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )",
"def load_h5(fname, surfmap=True):\n filenames = glob.glob(fname)\n print(\"Files found: {}\".format(filenames))\n fin = h5py.File(filenames[0])\n meas = fin['measurement0'] # Wavefront data located in 'measurement0'\n opdsets = meas['genraw']\n wvl = opdsets.attrs['wavelength'][:]\n wvl = float(wvl[:-3])\n # Get the x pixel spacing\n try:\n iscale = float(opdsets.attrs['xpix'][:-3])\n except TypeError:\n iscale = 0.0\n print(\"No Calibration Dimensioning Found in H5 file\")\n # Return either surface map or fringe map\n if surfmap is True:\n data = np.asarray(opdsets['data'])\n data[data > 1e10] = np.nan # Eliminates \"bad\" data sets to NAN\n data *= wvl * mask_data(filenames[0])\n else:\n data = np.asarray(meas['reserve_interferogram']['frame4']['data'])\n return data, wvl, iscale",
"def display_file(epd, file_name):\n\n image = Image.open(file_name)\n image = ImageOps.grayscale(image)\n\n # crop to the middle\n w,h = image.size\n x = w / 2 - epd.width / 2\n y = h / 2 - epd.height / 2\n\n cropped = image.crop((x, y, x + epd.width, y + epd.height))\n bw = cropped.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n epd.display(bw)\n epd.update()\n\n\n time.sleep(3) # delay in seconds\n\n rs = image.resize((epd.width, epd.height))\n bw = rs.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n epd.display(bw)\n epd.update()\n\n time.sleep(3) # delay in seconds",
"def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()",
"def run(basedir, resize_height=288):\n print(\"initialize\")\n\n img0 = [os.path.join(basedir, 'images', f) \\\n for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \\\n if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]\n sh = cv2.imread(img0).shape\n height = resize_height\n factor = sh[0] / float(height)\n width = int(round(sh[1] / factor))\n _minify(basedir, resolutions=[[height, width]])\n\n print(\"finished\")",
"def parseSFh5File_v01_old(\n files, memlimit_0D_MB=5, memlimit_mD_MB=132, createEscArrays=True\n):\n if (type(files) is str) or (not np.iterable(files)):\n files = [files]\n datasets_all = []\n for fina in files:\n fina = Path(fina)\n fh = h5py.File(fina.resolve(), mode=\"r\")\n datasets = utilities.findItemnamesGroups(fh, [\"data\", \"pulse_id\"])\n logger.info(\"Successfully parsed file %s\" % fina.resolve())\n datasets_all.append(datasets)\n\n names = set()\n dstores = {}\n\n for datasets in datasets_all:\n tnames = set(datasets.keys())\n newnames = tnames.difference(names)\n oldnames = names.intersection(tnames)\n for name in newnames:\n if datasets[name][0].size == 0:\n logger.debug(\"Found empty dataset in {}\".format(name))\n else:\n size_data = (\n np.dtype(datasets[name][0].dtype).itemsize\n * datasets[name][0].size\n / 1024 ** 2\n )\n size_element = (\n np.dtype(datasets[name][0].dtype).itemsize\n * np.prod(datasets[name][0].shape[1:])\n / 1024 ** 2\n )\n if datasets[name][0].chunks:\n chunk_size = list(datasets[name][0].chunks)\n else:\n chunk_size = list(datasets[name][0].shape)\n if chunk_size[0] == 1:\n chunk_size[0] = int(memlimit_mD_MB // size_element)\n dstores[name] = {}\n dstores[name][\"data\"] = []\n dstores[name][\"data\"].append(datasets[name][0])\n dstores[name][\"data_chunks\"] = chunk_size\n dstores[name][\"eventIds\"] = []\n dstores[name][\"eventIds\"].append(datasets[name][1])\n dstores[name][\"stepLengths\"] = []\n dstores[name][\"stepLengths\"].append(len(datasets[name][0]))\n names.add(name)\n for name in oldnames:\n if datasets[name][0].size == 0:\n logger.debug(\"Found empty dataset in {}\".format(name))\n # dirty hack for inconsitency in writer\n elif not len(datasets[name][0].shape) == len(\n dstores[name][\"data\"][0].shape\n ):\n logger.debug(\"Found inconsistent dataset in {}\".format(name))\n else:\n dstores[name][\"data\"].append(datasets[name][0])\n dstores[name][\"eventIds\"].append(datasets[name][1])\n dstores[name][\"stepLengths\"].append(len(datasets[name][0]))\n if createEscArrays:\n escArrays = {}\n containers = {}\n for name, dat in dstores.items():\n containers[name] = LazyContainer(dat)\n escArrays[name] = Array(\n containers[name].get_data,\n index=containers[name].get_eventIds,\n step_lengths=dat[\"stepLengths\"],\n scan=None,\n )\n return escArrays\n else:\n return dstores",
"def get_fame(self, w, h):\n frame = self.get_frame_read()\n frame = frame.frame\n frame = cv2.resize(frame, (w, h))\n\n return frame",
"def roi_data_from_hdf(self, h5file, roi_wildcard, data_type, run = [], postFix = ['mcf','sgtf'],combined = False, prf = False):\n\t\t\n\t\tif combined == False:\n\t\t\tthis_run_group_name = os.path.split(self.runFile(stage = 'processed/mri', run = run, postFix = postFix))[1]\n\t\telse:\n\t\t\tthis_run_group_name = os.path.split(self.runFile(stage = 'processed/mri/', extension = '_combined'))[1]\n\n\t\tif prf == True:\n\t\t\tthis_run_group_name = 'prf'\t\n\t\n\t\ttry:\n\t\t\tthisRunGroup = h5file.get_node(where = '/', name = this_run_group_name, classname='Group')\n\t\t\t# self.logger.info('group ' + self.runFile(stage = 'processed/mri', run = run, postFix = postFix) + ' opened')\n\t\t\troi_names = []\n\t\t\tfor roi_name in h5file.iterNodes(where = '/' + this_run_group_name, classname = 'Group'):\n\t\t\t\tif len(roi_name._v_name.split('.')) == 2:\n\t\t\t\t\themi, area = roi_name._v_name.split('.')\n\t\t\t\t\tif roi_wildcard == area:\n\t\t\t\t\t\troi_names.append(roi_name._v_name)\n\t\t\t\t#if len(roi_name._v_name.split('.')) == 3:\n\t\t\t\t#\themi, area, do_nothing = roi_name._v_name.split('.')\n\t\t\t\t#\tif roi_wildcard == area:\n\t\t\t\t#\t\troi_names.append(roi_name._v_name)\n\t\t\tif len(roi_names) == 0:\n\t\t\t\tself.logger.info('No rois corresponding to ' + roi_wildcard + ' in group ' + this_run_group_name)\n\t\t\t\treturn None\n\t\texcept NoSuchNodeError:\n\t\t\t# import actual data\n\t\t\tself.logger.info('No group ' + this_run_group_name + ' in this file')\n\t\t\treturn None\n\t\t\n\t\tall_roi_data = []\n\t\tfor roi_name in roi_names:\n\t\t\tthisRoi = h5file.get_node(where = '/' + this_run_group_name, name = roi_name, classname='Group')\n\t\t\tall_roi_data.append( eval('thisRoi.' + data_type + '.read()') )\n\t\tall_roi_data_np = np.hstack(all_roi_data).T\n\t\treturn all_roi_data_np",
"def process(self):\n self.output_image = cv.resize(\n self.input_image,\n (self.WIDTH, self.HEIHGT),\n )\n return self.output_image",
"def nReconBmpSequence_to_Hdf5(dataPath, fileNamePrefix):\n print dataPath\n h5Filename = fileNamePrefix + '_' + dataset + '.h5'\n #lazy loading of the files from the same dir as the script from the same image directory at the moment\n PATH = dataPath\n \n # pluck out the BMP images that match the nRecon reconstructed slice data naming conventions\n images = [f for f in os.listdir(PATH) if re.search(r'.*_rec(\\d).*(bmp)', f)]\n # make a numpy array of the images\n threeDarray = np.array([np.array(Image.open(PATH+'\\\\'+slice_image)) for slice_image in images])\n \n # add in a fourth dummy dimension to turn the zyx slices into zyxc -- c is just a 1d empty axis\n fourDarray = np.expand_dims(threeDarray, axis=3)\n \n # set up the hdf5 file for writing\n h5File = h5py.File(h5Filename, 'w')\n # add the ndarray of zyxc dimensions to the hdf5 file\n h5File.create_dataset(stackName,data=fourDarray)\n # hdf5 file written, so close it\n h5File.close()",
"def read_h5(fpath):\n _check_h5_installed()\n import h5py as h5\n\n f = h5.File(fpath, 'r')\n res = dict()\n\n if 'est_n_dips' in f.keys():\n res['est_n_dips'] = list(f['est_n_dips'][:])\n else:\n res['est_n_dips'] = 'Not available.'\n\n if 'exponents' in f.keys():\n res['exponents'] = f['exponents'][:]\n else:\n res['exponents'] = 'Not available.'\n\n if 'ch_names' in f.keys():\n _temp = list(f['ch_names'][:].flatten())\n res['ch_names'] = list(x.decode('utf-8', 'ignore') for x in _temp)\n del _temp\n else:\n res['ch_names'] = 'Not available.'\n\n for _k in ['prob_map', 'est_locs', 'model_sel', 'est_dip_mom_std']:\n if _k in f.keys():\n res[_k] = list(f[_k][_key][:] for _key in sorted(f[_k].keys(),\n key=lambda x: int(x)))\n else:\n res[_k] = 'Not available.'\n\n for _k in ['final_dip_mom_std', 'tmin', 'tmax', 'fmin', 'fmax', 'subsample']:\n if _k in f.keys():\n res[_k] = f[_k][()]\n else:\n res[_k] = None\n\n for _k in ['lambda', 'noise_std', 'dip_mom_std', 'max_n_dips',\n 'subject', 'subject_viz', 'data_path', 'fwd_path',\n 'cov_path', 'src_path', 'lf_path', 'fwd_fixed_ori']:\n if _k in f.keys():\n res[_k] = f[_k][()]\n else:\n res[_k] = 'Not available.'\n\n if 'est_dip_moms' in f.keys():\n est_dip_moms_temp = np.asarray(list(f['est_dip_moms'][_key][:] for _key in sorted(f['est_dip_moms'].keys(),\n key=lambda x: int(x))))\n if f['fwd_fixed_ori'][()]:\n est_dip_moms_aux = np.zeros((res['est_locs'][-1].shape[0], est_dip_moms_temp.shape[0]))\n for i in range(est_dip_moms_temp.shape[0]):\n _temp = est_dip_moms_temp[i, :].reshape(-1, 1)\n for j in range(res['est_locs'][-1].shape[0]):\n est_dip_moms_aux[j, i] += _temp[j]\n elif f['fwd_fixed_ori'][()] == 'Not available.':\n print('Uknown forward source orientation. Skipping dipole moments.')\n else:\n est_dip_moms_aux = np.zeros((res['est_locs'][-1].shape[0], est_dip_moms_temp.shape[0], 3))\n for i in range(est_dip_moms_temp.shape[0]):\n _temp = est_dip_moms_temp[i, :].reshape(-1, 3)\n for j in range(res['est_locs'][-1].shape[0]):\n est_dip_moms_aux[j, i, :] += _temp[j]\n res['est_dip_moms'] = est_dip_moms_aux\n f.close()\n return res",
"def numpy_to_h5py(in_dir=config.dir_npy, split = config.split):\n\n in_files=[x[:-13] for x in os.listdir(in_dir) if x.endswith('_voc_stft.npy') and not x.startswith('._')]\n\n random.shuffle(in_files)\n\n\n num_files = len(in_files)\n\n split_idx = int(num_files*split)\n\n trn_files = in_files[:split_idx]\n\n val_files = in_files[split_idx:]\n\n num_val_files = len(val_files)\n\n print('Processing %d training files' % split_idx)\n logger.info('Processing %d training files' % split_idx)\n\n logger.info('Training file: %s' % config.h5py_file_train)\n\n voc_shape_trn = [split_idx, 5170,config.input_features]\n\n mix_shape_trn = [split_idx, 5170,config.input_features]\n\n feats_shape_trn = [split_idx, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_train, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in trn_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, split_idx)\n\n logger.info('Processed training file: %s' % f)\n\n hdf5_file.close()\n\n print('Processing %d validation files' % num_val_files)\n logger.info('Processing %d validation files' % num_val_files)\n\n logger.info('Validation file: %s' % config.h5py_file_val)\n\n voc_shape_trn = [num_val_files, 5170,config.input_features]\n\n mix_shape_trn = [num_val_files, 5170,config.input_features]\n\n feats_shape_trn = [num_val_files, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_val, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in val_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, num_val_files)\n\n logger.info('Processed validation file: %s' % f)\n\n hdf5_file.close()\n # return original_ffts",
"def saveHDF5(ifile, cs, xi, yi, phi, cidx, idx, size, comx, comy, rgysq, pl, st, sw, ens, fils):\n \n ## Groups\n size_grp = ifile.create_group('size')\n beads_grp = ifile.create_group('beads')\n props_grp = ifile.create_group('props')\n \n ## Datasets\n size_grp.create_dataset('size', data=cs, compression='gzip')\n \n beads_grp.create_dataset('x', data=xi, compression='gzip')\n beads_grp.create_dataset('y', data=yi, compression='gzip')\n beads_grp.create_dataset('phi', data=phi, compression='gzip')\n beads_grp.create_dataset('idx', data=cidx, compression='gzip')\n \n idx_h5 = np.asarray(idx, dtype=int)\n size_h5 = np.asarray(size, dtype=int)\n comx_h5 = np.asarray(comx, dtype=float)\n comy_h5 = np.asarray(comy, dtype=float)\n rgysq_h5 = np.asarray(rgysq, dtype=float)\n planarity_h5 = np.asarray(pl, dtype=float)\n straightness_h5 = np.asarray(st, dtype=float)\n swirliness_h5 = np.asarray(sw, dtype=float)\n enstrophy_h5 = np.asarray(ens, dtype=float)\n \n props_grp.create_dataset('idx', data=idx_h5, compression='gzip')\n props_grp.create_dataset('size', data=size_h5, compression='gzip')\n props_grp.create_dataset('comx', data=comx_h5, compression='gzip')\n props_grp.create_dataset('comy', data=comy_h5, compression='gzip')\n props_grp.create_dataset('rgysq', data=rgysq_h5, compression='gzip')\n props_grp.create_dataset('planarity', data=planarity_h5, compression='gzip')\n props_grp.create_dataset('straightness', data=straightness_h5, compression='gzip')\n props_grp.create_dataset('swirliness', data=swirliness_h5, compression='gzip')\n props_grp.create_dataset('enstrophy', data=enstrophy_h5, compression='gzip')\n \n ## Filament list\n fil_grp = props_grp.create_group('filament_list')\n for sz_idx in np.arange(len(size_h5)):\n fil_list = np.asarray(fils[sz_idx], dtype=int)\n fil_grp.create_dataset(str(sz_idx), data=fil_list, compression='gzip')\n \n return"
] | [
"0.579831",
"0.5543579",
"0.54814976",
"0.5304206",
"0.52979577",
"0.52545416",
"0.522313",
"0.5215465",
"0.514157",
"0.51299906",
"0.512998",
"0.5110999",
"0.50995994",
"0.50909275",
"0.50872463",
"0.5072801",
"0.50724435",
"0.50657225",
"0.5059999",
"0.50595",
"0.5047526",
"0.50469244",
"0.5028167",
"0.5020159",
"0.4983489",
"0.49804783",
"0.49709654",
"0.4962665",
"0.49537688",
"0.49491778"
] | 0.6192724 | 0 |
Dumps random sources and FR sources into a h5py file. Empty images and images with nans are removed from inputed random radio images. The ouput h5py file has the exact file structure of the inputed fr_data_path file. The remaining random images are concatenated with the fr images. FRI labels are set to 0.0, FRII labels are set to 1.0, and random source labels are set to 2.0. Arguments | def add_random(fr_data_path, random_path, output_path):
with h5py.File(random_path, 'r') as data:
random = np.asarray(data['images'].value)
means = np.mean(np.mean(random, axis=-1), axis=-1)
empty = means == 0.0
error = np.isnan(means)
discard = empty | error
random_i = np.where(~discard)
random = random[random_i]
random = center_on_brightest(random)
with h5py.File(fr_data_path, 'r') as data:
images = np.asarray(data["images"].value)
images = center_on_brightest(images)
labels = np.where(np.asarray(data['labels']), 2, 1)
images = np.concatenate((images, random), axis=0)
labels = np.concatenate((labels, np.full((random.shape[0],),
fill_value=0)), axis=0)
with h5py.File(output_path, 'w') as f:
f.create_dataset('images', data=images)
f.create_dataset('labels', data=labels)
with h5py.File(fr_data_path, 'r') as data:
f.copy(data, 'fri_data')
f.copy(data, 'frii_data') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append_random(everything_path, random_path):\n with h5py.File(random_path, 'r') as data:\n random = np.asarray(data['images'].value)\n \n means = np.mean(np.mean(random, axis=-1), axis=-1)\n empty = means == 0.0\n error = np.isnan(means)\n discard = empty | error\n\n random_i = np.where(~discard)\n random = random[random_i]\n\n random = center_on_brightest(random)\n\n with h5py.File(everything_path, 'a') as data:\n data[\"images\"].resize((data[\"images\"].shape[0] + random.shape[0]), axis=0)\n data[\"images\"][-random.shape[0]:] = random\n \n data[\"labels\"].resize((data[\"labels\"].shape[0] + random.shape[0]), axis=0)\n data[\"labels\"][-random.shape[0]:] = np.full((random.shape[0],), \n fill_value=0)",
"def gene_h5_train_file(data_path, h5_name):\n img = []\n y_cls_mask = []\n y_reg_cls_mask = []\n os.chdir(data_path)\n jpgfiles = glob.glob('*.jpg')\n idx = 1\n # the position of generator objector is very important\n gene_obj = image_output_pair(data_path, 1/255.0)\n while True:\n if idx == len(jpgfiles):\n break\n print '\\t{0}/{1}'.format(idx, len(jpgfiles))\n # the position of generator objector is very important\n # gene_obj = image_output_pair(data_path, 1/255.0)\n img_it, y_cls_mask_it, y_reg_cls_mask_it = gene_obj.next()\n img.append(img_it)\n y_cls_mask.append(y_cls_mask_it)\n y_reg_cls_mask.append(y_reg_cls_mask_it)\n idx += 1\n\n # img => (320, 320, 3)\n # after np.stack => (19041, 320, 320, 3)\n img_input = np.stack(img, axis=0)\n y_cls = np.stack(y_cls_mask, axis=0)\n y_reg = np.stack(y_reg_cls_mask, axis=0)\n print 'input data shape is {0}'.format(img_input.shape)\n print 'y_cls data shape is {0}'.format(y_cls.shape)\n print 'y_reg data shape is {0}'.format(y_reg.shape)\n \n # wirte data\n h5 = '/home/yuquanjie/Documents/train_' + h5_name\n file_write = h5py.File(h5, 'w')\n file_write.create_dataset('X_train', data=img_input)\n file_write.create_dataset('Y_train_cls', data=y_cls)\n file_write.create_dataset('Y_train_merge', data=y_reg)\n file_write.close()",
"def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()",
"def numpy_to_h5py(in_dir=config.dir_npy, split = config.split):\n\n in_files=[x[:-13] for x in os.listdir(in_dir) if x.endswith('_voc_stft.npy') and not x.startswith('._')]\n\n random.shuffle(in_files)\n\n\n num_files = len(in_files)\n\n split_idx = int(num_files*split)\n\n trn_files = in_files[:split_idx]\n\n val_files = in_files[split_idx:]\n\n num_val_files = len(val_files)\n\n print('Processing %d training files' % split_idx)\n logger.info('Processing %d training files' % split_idx)\n\n logger.info('Training file: %s' % config.h5py_file_train)\n\n voc_shape_trn = [split_idx, 5170,config.input_features]\n\n mix_shape_trn = [split_idx, 5170,config.input_features]\n\n feats_shape_trn = [split_idx, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_train, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in trn_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, split_idx)\n\n logger.info('Processed training file: %s' % f)\n\n hdf5_file.close()\n\n print('Processing %d validation files' % num_val_files)\n logger.info('Processing %d validation files' % num_val_files)\n\n logger.info('Validation file: %s' % config.h5py_file_val)\n\n voc_shape_trn = [num_val_files, 5170,config.input_features]\n\n mix_shape_trn = [num_val_files, 5170,config.input_features]\n\n feats_shape_trn = [num_val_files, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_val, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in val_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, num_val_files)\n\n logger.info('Processed validation file: %s' % f)\n\n hdf5_file.close()\n # return original_ffts",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def make_hdf5file(self, dataset_paths: list = None, dataset_values: list = None) -> None:\n\n assert dataset_paths is not None and len(dataset_paths) != 0, (\"`dataset_path` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_paths)}.\")\n\n assert dataset_values is not None and len(dataset_values) != 0, (\"`dataset_values` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_values)}.\")\n\n\n # Remove file if already exists and create a new one\n if os.path.isfile(os.path.join(self.FOFDirectory, self.filename)):\n os.remove(os.path.join(self.FOFDirectory, self.filename))\n print(f'[ FOFOutput ]\\t==> Removed old {self.filename} file.')\n\n # Create file and optional groups within it\n FOFfile = h5py.File(os.path.join(self.FOFDirectory, self.filename), 'w')\n print(f'[ FOFOutput ]\\t==> Created new {self.filename} file.')\n\n # Push the attributes to file, if any\n if self.attrs is not None and len(self.attrs.keys()) > 0:\n for key, text in zip(self.attrs.keys(), self.attrs.values()):\n FOFfile.attrs[key] = text\n\n for internal_path, dataset_content in zip(dataset_paths, dataset_values):\n\n assert not internal_path.endswith('/'), \"Invalid hdf5 internal path\"\n assert type(dataset_content) is np.ndarray, \"Can only push numpy.ndarrays into hdf5 files.\"\n\n nested_groups = self.groups_from_path(internal_path)\n if len(nested_groups) == 1:\n FOFfile.create_dataset(nested_groups[0], data=dataset_content)\n else:\n for nested_group in nested_groups[:-1]:\n g = FOFfile.create_group(nested_group)\n g.create_dataset(nested_groups[-1], data = dataset_content)\n\n print(f'[ FOFOutput ]\\t==> Created {internal_path} dataset in {self.filename} file.')\n\n FOFfile.close()",
"def extract_data(filename, images_dir, output_dir, trials_idx, block_nums, goal_dict):\n num_images = len(trials_idx) * len(block_nums)\n f = h5py.File(os.path.join(output_dir, filename), 'w')\n X = f.create_dataset('X', (num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS*NUM_FRAMES), dtype=TYPE)\n Y = f.create_dataset('Y', (num_images, 2), dtype=TYPE)\n\n image_count = 0\n for trial_num in trials_idx:\n for block_num in block_nums:\n print('Blocks ' + str(block_num) + ' Trial ' + str(trial_num))\n for frame_num in xrange(0, NUM_FRAMES):\n temp = imread(images_dir+'RTr_Bl'+str(block_num)+'_'+str(trial_num)+'_'+str(frame_num)+IMAGE_FORMAT)\n temp = imresize(temp, [temp.shape[0]//DOWN_SAMPLE, temp.shape[1]//DOWN_SAMPLE, temp.shape[2]])\n X[image_count, 0:temp.shape[0], 0:temp.shape[1], frame_num*NUM_CHANNELS:(frame_num+1)*NUM_CHANNELS] = temp\n label = goal_dict['RTr_Bl'+str(block_num)+'_'+str(trial_num)]\n Y[image_count, :] = [label, 1-label]\n image_count += 1\n\n f.close()\n\n # TODO Use pixel depth normalization???\n #data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH",
"def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))",
"def initial_resizing(fr_raw_data_path, fr_data_path, dim=300):\n with h5py.File(fr_raw_data_path, 'r') as data:\n images = resize_array(np.asarray(data['images'].value), dim=dim)\n labels = data['labels'].value\n \n with h5py.File(fr_data_path, 'w') as f:\n f.create_dataset('images', data=images)\n\n with h5py.File(fr_raw_data_path, 'r') as data: \n f.copy(data['fri_data'], 'fri_data')\n f.copy(data['frii_data'], 'frii_data')\n f.copy(data['labels'], 'labels')",
"def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,\n do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):\n\n titles = []\n for i in range(len(image_list)):\n image_name = os.path.split(image_list[i])[1]\n titles.append(image_name[:-4])\n\n # Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)\n if len(mask_part) == 1:\n mask_el = mask_part.pop()\n\n if mask_el == \"glomerulus\":\n network_area = \"glomerulus_area\"\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),\n 'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),\n 'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),\n 'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),\n 'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),\n 'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),\n 'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n elif mask_el == \"podocytes\":\n network_count = \"podocyte_count\"\n network_area = \"podocyte_nuclear_area\"\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_DACH1_signal_in_podo': pd.Series(dataset_dict['mean_DACH1_podo_preds']),\n 'var_DACH1_signal_in_podo': pd.Series(dataset_dict['var_DACH1_podo_preds']),\n 'median_DACH1_signal_in_podo': pd.Series(dataset_dict['median_DACH1_podo_preds']),\n 'min_DACH1_signal_in_podo': pd.Series(dataset_dict['min_DACH1_podo_preds']),\n 'max_DACH1_signal_in_podo': pd.Series(dataset_dict['max_DACH1_podo_preds']),\n 'perc25_DACH1_signal_in_podo': pd.Series(dataset_dict['perc25_DACH1_podo_preds']),\n 'perc75_DACH1_signal_in_podo': pd.Series(dataset_dict['perc75_DACH1_podo_preds'])\n })\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n else:\n raise ValueError('The name of the segmentation is not known:', mask_el)\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_el))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n\n # Segmentation of 2 classes were applied (e.g. glomerulus and podocytes)\n elif len(mask_part) == 2:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n \"glomerulus_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[0]]),\n \"podocyte_count\": pd.Series(dataset_dict['count_preds_%s' % mask_part[1]]),\n \"podocyte_nuclear_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[1]])})\n\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df['mean_WT1_signal_in_glom'] = dataset_dict['mean_WT1_glom_preds']\n df['var_WT1_signal_in_glom'] = dataset_dict['var_WT1_glom_preds']\n df['median_WT1_signal_in_glom'] = dataset_dict['median_WT1_glom_preds']\n df['min_WT1_signal_in_glom'] = dataset_dict['min_WT1_glom_preds']\n df['max_WT1_signal_in_glom'] = dataset_dict['max_WT1_glom_preds']\n df['perc25_WT1_signal_in_glom'] = dataset_dict['perc25_WT1_glom_preds']\n df['perc75_WT1_signal_in_glom'] = dataset_dict['perc75_WT1_glom_preds']\n\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df['mean_DACH1_signal_in_podo'] = dataset_dict['mean_DACH1_podo_preds']\n df['var_DACH1_signal_in_podo'] = dataset_dict['var_DACH1_podo_preds']\n df['median_DACH1_signal_in_podo'] = dataset_dict['median_DACH1_podo_preds']\n df['min_DACH1_signal_in_podo'] = dataset_dict['min_DACH1_podo_preds']\n df['max_DACH1_signal_in_podo'] = dataset_dict['max_DACH1_podo_preds']\n df['perc25_DACH1_signal_in_podo'] = dataset_dict['perc25_DACH1_podo_preds']\n df['perc75_DACH1_signal_in_podo'] = dataset_dict['perc75_DACH1_podo_preds']\n\n if do_stereology_pred:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='pred')\n # Add it to df\n df['stereology_on_prediction-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_prediction-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_prediction-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n if do_stereology_gt:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='gt')\n # Add it to df\n df['stereology_on_groundtruth-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_groundtruth-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_groundtruth-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_part[0] + mask_part[1]))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n return",
"def save_frame_to_hdf5_file(fsrc, key = 'images', compression = 0):\n preparation = \"\"\n preparation += \"from h5py import File;\"\n preparation += \"from tempfile import gettempdir;\"\n preparation += \"import os;\"\n preparation += \"root = gettempdir()\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n testcode = ''",
"def make_data_hf(input_, label_, config):\n # Check the check dir, if not, create one\n if not os.path.isdir(os.path.join(os.getcwd(),config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(),config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n #checkimage(input_[1])\n hf.create_dataset('input', data=input_)\n hf.create_dataset('label', data=label_)",
"def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)",
"def create_devh5(self):\n if os.path.exists(self.dev_h5_path):\n print(\"[LOGGING]: \" + self.dev_h5_path + \" exists!\")\n return\n\n with h5py.File(self.dev_h5_path, 'w') as f:\n\n # create a group: f['train']\n train = f.create_group('train')\n self.extract_fea_for_datagroup(train, mode='train')\n\n # f['test']\n test = f.create_group('test')\n self.extract_fea_for_datagroup(test, mode='test')\n\n f.close()",
"def save_as_hdf5(self, filename):",
"def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels",
"def pack_audio_files_to_hdf5(args):\n\n # Arguments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n data_type = args.data_type\n mini_data = args.mini_data\n\n sample_rate = config.sample_rate\n audio_length = config.audio_length\n classes_num = config.classes_num\n lb_to_idx = config.lb_to_idx\n frames_per_second = config.frames_per_second\n frames_num = frames_per_second * config.audio_duration\n\n has_strong_target = data_type in ['testing', 'evaluation']\n\n # Paths\n audios_dir = os.path.join(dataset_dir, data_type)\n weak_label_csv_path = os.path.join(dataset_dir, 'metadata', \n get_weak_csv_filename(data_type))\n\n if data_type == 'testing':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_testing_set.csv')\n elif data_type == 'evaluation':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_evaluation_set.csv')\n\n if mini_data:\n packed_hdf5_path = os.path.join(workspace, 'features', \n 'minidata_{}.waveform.h5'.format(data_type))\n else:\n packed_hdf5_path = os.path.join(workspace, 'features', \n '{}.waveform.h5'.format(data_type))\n create_folder(os.path.dirname(packed_hdf5_path))\n\n # Read metadata\n weak_meta_list = read_weak_csv(weak_label_csv_path, data_type)\n\n # Use a small amount of data for debugging\n if mini_data:\n random.seed(1234)\n random.shuffle(weak_meta_list)\n weak_meta_list = weak_meta_list[0 : 100]\n\n audios_num = len(weak_meta_list)\n\n feature_time = time.time()\n with h5py.File(packed_hdf5_path, 'w') as hf:\n hf.create_dataset(\n name='audio_name', \n shape=(audios_num,), \n dtype='S80')\n\n hf.create_dataset(\n name='waveform', \n shape=(audios_num, audio_length), \n dtype=np.int32)\n\n hf.create_dataset(\n name='weak_target', \n shape=(audios_num, classes_num), \n dtype=np.float32)\n\n if has_strong_target:\n strong_meta_dict = read_strong_csv(strong_label_csv_path) \n \n hf.create_dataset(\n name='strong_target', \n shape=(0, frames_num, classes_num), \n maxshape=(None, frames_num, classes_num), \n dtype=np.bool)\n\n for n in range(audios_num):\n print(n)\n weak_meta_dict = weak_meta_list[n]\n audio_name = weak_meta_dict['audio_name']\n audio_path = os.path.join(audios_dir, audio_name)\n (audio, fs) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_truncate_sequence(audio, audio_length)\n\n hf['audio_name'][n] = audio_name.encode()\n hf['waveform'][n] = float32_to_int16(audio)\n hf['weak_target'][n] = weak_target = get_weak_target(\n weak_meta_dict['labels'], lb_to_idx)\n\n if has_strong_target:\n strong_target = get_strong_target(\n weak_meta_dict['audio_name'][1:], strong_meta_dict, \n frames_num, frames_per_second, lb_to_idx)\n \n hf['strong_target'].resize((n + 1, frames_num, classes_num))\n hf['strong_target'][n] = strong_target\n\n print('Write hdf5 to {}'.format(packed_hdf5_path))\n print('Time: {:.3f} s'.format(time.time() - feature_time))",
"def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")",
"def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]",
"def generate_images(\n network_pkl,\n seeds,\n truncation_psi,\n noise_mode,\n outdir \n):\n\n print('Loading networks from \"%s\"...' % network_pkl)\n # device = torch.device('cuda')\n device = torch.device('cpu')\n with dnnlib.util.open_url(network_pkl) as f:\n G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore\n\n os.makedirs(outdir, exist_ok=True)\n\n \n\n # if seeds is None:\n # ctx.fail('--seeds option is required when not using --projected-w')\n\n # Labels.\n label = torch.zeros([1, G.c_dim], device=device)\n\n # Generate images.\n file_list = []\n for seed_idx, seed in enumerate(seeds):\n print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))\n z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)\n # img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)\n img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode, force_fp32=True)\n img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n filename = f'{outdir}/seed{seed:04d}_{str(truncation_psi)}.png'\n PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(filename)\n file_list.append(filename)\n return file_list",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):\n assert(len(imagePathList) == len(labelList))\n nSamples = len(imagePathList)\n # env = lmdb.open(outputPath, map_size=1099511627776)\n num = 0\n with open(outputPath+'gt.txt', 'w') as txt:\n for i in range(nSamples):\n imagePath = imagePathList[i]\n label = labelList[i]\n\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n try:\n cv2.imread(imagePath)\n except:\n print('%s is not a valid image' % imagePath)\n continue\n num += 1\n txt.write(imagePath+' '+label+'\\n')\n # cache = {}\n # cnt = 1\n # for i in range(nSamples):\n # imagePath = imagePathList[i]\n # label = labelList[i]\n #\n # if not os.path.exists(imagePath):\n # print('%s does not exist' % imagePath)\n # continue\n #\n # # if checkValid:\n # # if not checkImageIsValid(imageBin):\n # # print('%s is not a valid image' % imagePath)\n # # continue\n # try:\n # cv2.imread(imagePath)\n # except:\n # print('%s is not a valid image' % imagePath)\n # continue\n # with open(imagePath, 'rb') as f:\n # imageBin = f.read()\n #\n # imageKey = b'image-%09d' % cnt\n # labelKey = b'label-%09d' % cnt\n # cache[imageKey] = imageBin\n # cache[labelKey] = label.encode()\n # if lexiconList:\n # lexiconKey = 'lexicon-%09d' % cnt\n # cache[lexiconKey] = ' '.join(lexiconList[i])\n # if cnt % 1000 == 0:\n # writeCache(env, cache)\n # cache = {}\n # print('Written %d / %d' % (cnt, nSamples))\n # cnt += 1\n # nSamples = cnt-1\n # cache[b'num-samples'] = str(nSamples).encode()\n # writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)",
"def load_h5py_file(fname, offsets = [0, 0, 0]):\n # Load the data\n f = h5py.File(fname, 'r') # r for read only\n print(\"Available fields: \", list(f.keys())) # f is a dictionary. Let's look at the keys\n\n # Create variables from loaded dictionary\n neural_data = f['ripple_data'][:,0:32]\n emg_data = f['ripple_data'][:,32:]\n force_data = f['data'][0:6,:].transpose()\n fs = f['mySampleRate'][:]\n\n # Transform matrix for force data\n TF = [[1.117\t, -0.096747,\t 1.7516, 0.03441, -0.88072, 0.042127, -0.89026],\n [0.3134, 0.0041349, 0.0045219, -0.055942, 1.5273, 0.037719,-1.5227],\n [0.135\t, 1.4494, -0.061075, 1.6259, 0.083867, 1.5999, 0.0058155]]\n TF = np.array(TF)\n\n # Read force data\n force_data = np.concatenate((np.ones((len(force_data),1)), force_data), axis=1)\n force_data = force_data @ TF.transpose()\n\n # Make baseband zero\n force_data[:,0] = force_data[:,0] - offsets[0]\n force_data[:,1] = force_data[:,1] - offsets[1]\n force_data[:,2] = force_data[:,2] - offsets[2]\n\n # Use sent and received pulse signals to allign DAQ and RIPPLE data\n pulse_sent = f['data'][6,:].transpose()\n ps_ind, = np.nonzero(pulse_sent>1)\n ps_ind = ps_ind[0]\n\n pulse_received = f['ttl_data'][:,0]\n pr_ind, = np.nonzero(pulse_received>2000)\n pr_ind = pr_ind[0]\n\n p_diff = ps_ind - pr_ind\n\n # Align data\n if p_diff > 0:\n pulse_sent = np.concatenate((pulse_sent[p_diff:], np.zeros((p_diff,))), axis=0)\n trailing = np.mean(force_data[-int(fs*0.1):], axis=0) * np.ones((p_diff,1))\n force_data = np.concatenate((force_data[p_diff:,:], trailing))\n else:\n pulse_sent = np.concatenate((np.zeros((-p_diff,)), pulse_sent[:p_diff]), axis=0)\n leading = np.mean(force_data[:int(fs * 0.1)], axis=0) * np.ones((-p_diff, 1))\n force_data = np.concatenate((leading, force_data[:p_diff,:]))\n\n # Choose force channel for analysis\n force_data = force_data[:,1]\n force_data = -force_data # Invert the sign (increased as applied force increased)\n\n # Choose EMG data\n emg_data = emg_data[:,(5,15)]-emg_data[:,(23,25)]\n\n # Re-order EMG data so that 1. Dorsal 2. Biceps 3. Ventral 4. Triceps\n positions3 = (0,1)\n emg_data = emg_data[:,positions3]\n\n # Corresponding time vectors\n time = f['ripple_time'][:]\n return neural_data, emg_data, force_data, time, fs",
"def add_dataset(dset_fp, dset_rel_fp, up3d_fp, # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches\n train_list_f, val_list_f, test_list_f,\n train_spec, val_spec, test_spec,\n target_person_size, partspec, crop, running_idx,\n only_missing=False):\n test_ids = [int(id_[1:6]) for id_ in test_spec]\n train_ids = [int(id_[1:6]) for id_ in train_spec]\n val_ids = [int(id_[1:6]) for id_ in val_spec]\n LOGGER.info(\"Split: %d train, %d val, %d test.\",\n len(train_ids), len(val_ids), len(test_ids))\n LOGGER.info(\"Writing dataset...\")\n for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids):\n image = scipy.misc.imread(path.join(up3d_fp, '%05d_image.png' % (im_idx)))\n with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)), 'r') as inf:\n cropinfo = [int(val) for val in inf.readline().strip().split()]\n assert image.ndim == 3\n out_exists = (path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx))) and\n path.exists(path.join(dset_fp, '%05d_ann.png' % (running_idx))) and\n path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx))) and\n path.exists(path.join(dset_fp, '%05d_render.png' % (running_idx))) and\n path.exists(path.join(dset_fp, '%05d_render_light.png' % (running_idx))))\n if not (only_missing and out_exists):\n rendering = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),\n resolution=(cropinfo[1],\n cropinfo[0]),\n quiet=True,\n use_light=False)[0],\n image.shape[:2],\n cropinfo)\n rendering_l = uncrop(render_body_impl(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)),\n resolution=(cropinfo[1],\n cropinfo[0]),\n quiet=True,\n use_light=True)[0],\n image.shape[:2],\n cropinfo)\n joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx)))\n joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :]))\n person_size = robust_person_size(joints)\n norm_factor = float(target_person_size) / person_size\n if not (only_missing and out_exists):\n image = scipy.misc.imresize(image, norm_factor, interp='bilinear')\n rendering = scipy.misc.imresize(rendering, norm_factor, interp='nearest')\n rendering_l = scipy.misc.imresize(rendering_l, norm_factor, interp='bilinear')\n if image.shape[0] > crop or image.shape[1] > crop:\n LOGGER.debug(\"Image (original %d, here %d) too large (%s)! Cropping...\",\n im_idx, running_idx, str(image.shape[:2]))\n person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1) * norm_factor\n crop_y, crop_x = get_crop(image, person_center, crop)\n image = image[crop_y[0]:crop_y[1],\n crop_x[0]:crop_x[1], :]\n rendering = rendering[crop_y[0]:crop_y[1],\n crop_x[0]:crop_x[1], :]\n rendering_l = rendering_l[crop_y[0]:crop_y[1],\n crop_x[0]:crop_x[1], :]\n assert image.shape[0] == crop or image.shape[1] == crop, (\n \"Error cropping image (original %d, here %d)!\" % (im_idx,\n running_idx))\n assert image.shape[0] <= crop and image.shape[1] <= crop and image.shape[2] == 3, (\n \"Wrong image shape (original %d, here %d)!\" % (im_idx, running_idx))\n class_groups = six_region_groups if partspec == '6' else None\n annotation = regions_to_classes(rendering, class_groups, warn_id=str(im_idx))\n if partspec == '1':\n annotation = (annotation > 0).astype('uint8')\n assert np.max(annotation) <= int(partspec), (\n \"Wrong annotation value (original %d, here %d): %s!\" % (\n im_idx, running_idx, str(np.unique(annotation))))\n if running_idx == 0:\n assert np.max(annotation) == int(partspec), (\n \"Probably an error in the number of parts!\")\n scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx)), image)\n scipy.misc.imsave(path.join(dset_fp, '%05d_ann.png' % (running_idx)), annotation)\n scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)),\n apply_colormap(annotation, vmax=int(partspec)))\n scipy.misc.imsave(path.join(dset_fp, '%05d_render.png' % (running_idx)), rendering)\n scipy.misc.imsave(path.join(dset_fp, '%05d_render_light.png' % (running_idx)), rendering_l) # pylint: disable=line-too-long\n if im_idx in train_ids:\n list_f = train_list_f\n elif im_idx in val_ids:\n list_f = val_list_f\n elif im_idx in test_ids:\n list_f = test_list_f\n list_f.write(\"/%s/%05d_image.png /%s/%05d_ann.png %f\\n\" % (\n dset_rel_fp, running_idx, dset_rel_fp, running_idx, norm_factor))\n list_f.flush()\n running_idx += 1\n return running_idx",
"def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)",
"def dump_signal_hdf(args):\n # construct & open output HDF5:\n outfile = args.out if (args.out is not None) else \"./samples.hdf5\"\n hdf = h5py.File(outfile, 'w-') # (throw error if file already exists)\n scaled_gp = hdf.create_group('scaled')\n if args.segmentation:\n states_gp = hdf.create_group('states')\n\n # loop thru polya calls output file and append samples to HDF5:\n curr_read = None\n curr_samples = []\n if args.segmentation:\n curr_states = []\n for row in tqdm(PolyaIterator(args.polya)):\n # create a new read dataset based on current samples if detect a switch:\n if row['readname'] != curr_read:\n if curr_read is not None:\n try:\n scaled_gp.create_dataset(curr_read, data=np.array(curr_samples, dtype=np.float32))\n if args.segmentation:\n states_gp.create_dataset(curr_read, data=np.array(curr_states, dtype='S10'))\n except:\n pass\n # reset current read & samples\n curr_read = row['readname']\n curr_samples = []\n if args.segmentation:\n curr_states = []\n hdf.flush()\n # otherwise append raw sample:\n curr_samples.append(float(row['scaled']))\n if args.segmentation:\n curr_states.append(row['state'])\n # append final read & close HDF5 file handle:\n try:\n scaled_gp.create_dataset(curr_read, data=np.array(curr_samples, dtype=np.float32))\n if args.segmentation:\n states_gp.create_dataset(curr_read, data=np.array(curr_states, dtype='S10'))\n except:\n pass\n hdf.flush()\n hdf.close()\n\n # print finishing message:\n print(\"[dump_signal.py] HDF5 file of (scaled) picoampere signals written to: {}\".format(outfile))",
"def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap",
"def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True",
"def generate_data(n_samples=5, n_objects=3, objects_pattern=\"data/objects/*\",\n backgronds_pattern=\"data/Casting_Similar_95/*\", show=False,\n output_dir=\"data/noisy/noisy_img\"):\n\n # Environment global variables\n global N_SAMPLES\n global BACKGROUNDS_PATTERN\n global OBJECTS_PATTERN\n global OUTPUT_DIR\n global N_OBJECTS\n global SHOW\n\n # Setup global variables\n SHOW = show\n N_SAMPLES = n_samples\n BACKGROUNDS_PATTERN = backgronds_pattern\n OBJECTS_PATTERN = objects_pattern\n OUTPUT_DIR = output_dir\n N_OBJECTS = n_objects\n\n # creates an empty folder\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n\n setup_environment(OBJECTS_PATTERN, BACKGROUNDS_PATTERN, N_SAMPLES)",
"def run(self):\n if self.camera.connected.value():\n self.settings.movie_on.update_value(True)\n \n \n num_of_chan=self.daq_ai.settings.num_of_chan.value()\n self.buffer = np.zeros((10000,num_of_chan+2), dtype=float)\n self.buffer[0:self.settings.tdelay.value(),3]=100;\n '''\n initialize position\n '''\n position = 0\n '''\n initialize number of water drops given\n '''\n total_drops=0\n self.settings.total_drops.update_value(total_drops)\n \n \n '''\n Decide whether to create HDF5 file or not\n '''\n # first, create a data file\n if self.settings['save_h5']:\n # if enabled will create an HDF5 file with the plotted data\n # first we create an H5 file (by default autosaved to app.settings['save_dir']\n # This stores all the hardware and app meta-data in the H5 file\n file_name_index=0\n file_name=os.path.join(self.app.settings.save_dir.value(),self.app.settings.sample.value())+'_'+str(file_name_index)+'.h5'\n while os.path.exists(file_name):\n file_name_index+=1\n file_name=os.path.join(self.app.settings.save_dir.value(),self.app.settings.sample.value())+'_'+str(file_name_index)+'.h5'\n self.h5file = h5_io.h5_base_file(app=self.app, measurement=self,fname = file_name)\n \n # create a measurement H5 group (folder) within self.h5file\n # This stores all the measurement meta-data in this group\n self.h5_group = h5_io.h5_create_measurement_group(measurement=self, h5group=self.h5file)\n \n # create an h5 dataset to store the data\n self.buffer_h5 = self.h5_group.create_dataset(name = 'buffer', \n shape = self.buffer.shape,\n dtype = self.buffer.dtype,\n maxshape=(None,self.buffer.shape[1]))\n \n # We use a try/finally block, so that if anything goes wrong during a measurement,\n # the finally block can clean things up, e.g. close the data file object.\n '''\n start actual protocol\n '''\n try:\n '''\n initialize counter ticks\n '''\n i = 0 #counter tick for loading buffer\n j = 0 #counter tick for saving hdf5 file\n self.k=0 #number of seconds saved\n water_tick=0 #\n step_size=self.daq_ai.settings.buffer_size.value()\n \n '''\n Start DAQ, Default at 1kHz\n '''\n self.daq_ai.start()\n \n # Will run forever until interrupt is called.\n '''\n Expand HDF5 buffer when necessary\n '''\n while not self.interrupt_measurement_called:\n i %= self.buffer.shape[0]\n if self.settings['save_h5']:\n if j>(self.buffer_h5.shape[0]-step_size):\n self.buffer_h5.resize((self.buffer_h5.shape[0]+self.buffer.shape[0],self.buffer.shape[1]))\n self.k +=10\n \n\n '''\n Update Progress Bar\n '''\n self.settings['progress'] = i * 100./self.buffer.shape[0]\n \n \n \n '''\n update water status\n '''\n if (water_tick<(self.settings.lick_interval.value()*1000)):\n water_tick+=1\n else:\n self.settings.water_reward.update_value(True)\n water_tick=0\n \n\n \n '''\n Generate a random odor\n '''\n #no addition\n \n \n \n '''\n Read DAQ sensor data(0:lick_left, 1:lick_right, 2:flowmeter)\n '''\n # Fills the buffer with sine wave readings from func_gen Hardware\n self.buffer[i:(i+step_size),0:num_of_chan] = self.daq_ai.read_data()\n\n lick_0 = (self.buffer[i,1]<4)\n lick_1 = (self.buffer[i,2]<4)\n self.buffer[i,1]=lick_0 #convert lick sensor into 0(no lick) and 1(lick)\n self.buffer[i,2]=lick_1\n# ask if the animal licked in this interval\n\n# print(self.buffer[i,0:1])\n lick = (lick_0 or lick_1)\n \n '''\n Decide whether water will be given, based on the status of reward and lick\n '''\n if self.settings.water_reward.value():\n if lick:\n if lick_0:\n side = 0\n else:\n side = 1\n self.water.give_water(side)\n self.settings.water_reward.update_value(False)\n \n '''\n save water given (5:If water given 6:water opened time)\n '''\n self.buffer[i,num_of_chan+side]=1\n #self.buffer[i,num_of_chan+2]=self.water.open_time[side].value()\n total_drops+=1\n self.settings.total_drops.update_value(total_drops)\n \n else:\n '''\n The mouse gets a timeout if it licks repetitively or hold the water port (when it is not suppose to lick)\n '''\n if lick:\n water_tick = 0\n '''\n Read and save Position and Speed at 100Hz(default) (3:position 4:speed)\n '''\n # to be implemented\n '''\n Read odor value from the odor generator, otherwise fill with clean air(default)\n '''\n \n '''\n write odor value to valve\n '''\n self.arduino_sol.write()\n '''\n write odor value to display (7:clean air 8:odor1 9:odor2 10:odor3)\n '''\n #to be implemented\n '''\n Save hdf5 file\n '''\n if self.settings['save_h5']:\n # if we are saving data to disk, copy data to H5 dataset\n self.buffer_h5[j:(j+step_size),:] = self.buffer[i:(i+step_size),:]\n # flush H5\n self.h5file.flush()\n \n \n # wait between readings.\n # We will use our sampling_period settings to define time\n #time.sleep(self.settings['sampling_period'])\n \n i += step_size\n j += step_size\n \n \n if self.interrupt_measurement_called:\n # Listen for interrupt_measurement_called flag.\n # This is critical to do, if you don't the measurement will\n # never stop.\n # The interrupt button is a polite request to the \n # Measurement thread. We must periodically check for\n # an interrupt request\n self.daq_ai.stop()\n break\n\n finally: \n if self.settings['save_h5']:\n # make sure to close the data file\n self.h5file.close()\n \n if self.camera.connected.value():\n self.settings.movie_on.update_value(False)"
] | [
"0.58822685",
"0.58631015",
"0.58495665",
"0.5796117",
"0.57869375",
"0.57436776",
"0.5726677",
"0.56164074",
"0.5588292",
"0.5530099",
"0.55273885",
"0.5512033",
"0.54644233",
"0.5449375",
"0.5439521",
"0.5407889",
"0.5386092",
"0.537235",
"0.53329575",
"0.53090507",
"0.52929425",
"0.5251734",
"0.5232278",
"0.52253276",
"0.52219325",
"0.5221056",
"0.52113336",
"0.5210808",
"0.51912117",
"0.5184183"
] | 0.7266753 | 0 |
Appends new random sources to data h5py File. Adds new random sources to a h5py file that is or has the same format as the output file of add_random_data. Arguments | def append_random(everything_path, random_path):
with h5py.File(random_path, 'r') as data:
random = np.asarray(data['images'].value)
means = np.mean(np.mean(random, axis=-1), axis=-1)
empty = means == 0.0
error = np.isnan(means)
discard = empty | error
random_i = np.where(~discard)
random = random[random_i]
random = center_on_brightest(random)
with h5py.File(everything_path, 'a') as data:
data["images"].resize((data["images"].shape[0] + random.shape[0]), axis=0)
data["images"][-random.shape[0]:] = random
data["labels"].resize((data["labels"].shape[0] + random.shape[0]), axis=0)
data["labels"][-random.shape[0]:] = np.full((random.shape[0],),
fill_value=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_random(fr_data_path, random_path, output_path):\n with h5py.File(random_path, 'r') as data:\n random = np.asarray(data['images'].value)\n \n means = np.mean(np.mean(random, axis=-1), axis=-1)\n empty = means == 0.0\n error = np.isnan(means)\n discard = empty | error\n\n random_i = np.where(~discard)\n random = random[random_i]\n\n random = center_on_brightest(random)\n \n with h5py.File(fr_data_path, 'r') as data:\n images = np.asarray(data[\"images\"].value) \n images = center_on_brightest(images)\n \n labels = np.where(np.asarray(data['labels']), 2, 1)\n\n images = np.concatenate((images, random), axis=0)\n labels = np.concatenate((labels, np.full((random.shape[0],), \n fill_value=0)), axis=0)\n\n with h5py.File(output_path, 'w') as f:\n f.create_dataset('images', data=images)\n f.create_dataset('labels', data=labels)\n\n with h5py.File(fr_data_path, 'r') as data: \n f.copy(data, 'fri_data')\n f.copy(data, 'frii_data')",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def init_hdf5_file(datasets, init_start, init_end, hdf5_file):\n schema = tokio.connectors.hdf5.SCHEMA.get(SCHEMA_VERSION)\n for dataset_name, dataset in datasets.items():\n hdf5_dataset_name = schema.get(dataset_name)\n if hdf5_dataset_name is None:\n if '/_' not in dataset_name:\n warnings.warn(\"Dataset key %s is not in schema\" % dataset_name)\n continue\n if hdf5_dataset_name not in hdf5_file:\n # attempt to convert dataset into a timeseries\n timeseries = hdf5_file.to_timeseries(dataset_name=hdf5_dataset_name)\n\n # if dataset -> timeseries failed, create and commit a new, empty timeseries\n if timeseries is None:\n timeseries = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=init_start,\n end=init_end,\n timestep=dataset.timestep,\n num_columns=dataset.dataset.shape[1])\n hdf5_file.commit_timeseries(timeseries=timeseries)\n print(\"Initialized %s in %s with size %s\" % (\n hdf5_dataset_name,\n hdf5_file.name,\n timeseries.dataset.shape))",
"def add_experiment(hdf5_filename, exp_filename):\n # handling input errors\n if not isinstance(hdf5_filename, str):\n raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '\n + str(type(hdf5_filename)))\n if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':\n raise TypeError('`hdf5_filename` is not type = .hdf5! Instead, it is: '\n + hdf5_filename.split('/')[-1].split('.')[-1])\n if not isinstance(exp_filename, str):\n raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '\n + str(type(exp_filename)))\n # confirm exp_filename is correct format (can handle additional decimals in exp_filename\n label = '.'.join(exp_filename.split('/')[-1].split('.')[:-1])\n if len(label.split('_')) < 2:\n raise ValueError(\"\"\"Passed value of `exp_filename` inapproprate. exp_filename must contain\n at least one '_', preferably of the format somename_temp_time.xlsx (or .csv)\"\"\")\n # r+ is read/write mode and will fail if the file does not exist\n exp_file = h5py.File(hdf5_filename, 'r+')\n if exp_filename.split('.')[-1] == 'xlsx':\n data = pd.read_excel(exp_filename, header=None, names=('wavenumber', 'counts'))\n elif exp_filename.split('.')[-1] == 'csv':\n data = pd.read_csv(exp_filename, header=None, names=('wavenumber', 'counts'))\n else:\n print('data file type not recognized')\n # ensure that the data is listed from smallest wavenumber first\n if data['wavenumber'][:1].values > data['wavenumber'][-1:].values:\n data = data.iloc[::-1]\n data.reset_index(inplace=True, drop=True)\n else:\n pass\n # peak detection and data fitting\n fit_result, residuals = spectrafit.fit_data(data['wavenumber'].values, data['counts'].values)\n # extract experimental parameters from filename\n specs = exp_filename.split('/')[-1].split('.')[-2]\n if len(specs) > 1:\n spec = ''\n for _, element in enumerate(specs):\n spec = str(spec+element)\n specs = spec\n specs = specs.split('_')\n time = specs[-1]\n temp = specs[-2]\n # write data to .hdf5\n exp_file['{}/{}/wavenumber'.format(temp, time)] = data['wavenumber']\n exp_file['{}/{}/counts'.format(temp, time)] = data['counts']\n exp_file['{}/{}/residuals'.format(temp, time)] = residuals\n for i, result in enumerate(fit_result):\n # create custom datatype\n my_datatype = np.dtype([('fraction', np.float),\n ('center', np.float),\n ('sigma', np.float),\n ('amplitude', np.float),\n ('fwhm', np.float),\n ('height', np.float),\n ('area under the curve', np.float)])\n if i < 9:\n dataset = exp_file.create_dataset('{}/{}/Peak_0{}'.format(temp, time, i+1),\n (1,), dtype=my_datatype)\n else:\n dataset = exp_file.create_dataset('{}/{}/Peak_{}'.format(temp, time, i+1),\n (1,), dtype=my_datatype)\n # apply data to tuple\n data = tuple(result[:7])\n data_array = np.array(data, dtype=my_datatype)\n # write new values to the blank dataset\n dataset[...] = data_array\n print(\"\"\"Data from {} fit with compound pseudo-Voigt model.\n Results saved to {}.\"\"\".format(exp_filename, hdf5_filename))\n exp_file.close()",
"def _test_output_created(self):\n TestHarness._test_output_created(self)\n source = glob.glob(os.path.join(os.getcwd(), 'source.*'))\n assert len(source) == 1, 'Either multiple or no source files ' \\\n 'exist.'\n assert source[0].endswith('h5'), \\\n 'Source file is not a HDF5 file.'",
"def add_to_hdf5file(self, dataset_paths: list = None, dataset_values: list = None) -> None:\n\n assert dataset_paths is not None and len(dataset_paths) != 0, (\"`dataset_path` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_paths)}.\")\n\n assert dataset_values is not None and len(dataset_values) != 0, (\"`dataset_values` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_values)}.\")\n\n assert os.path.isfile(os.path.join(self.FOFDirectory, self.filename)), f\"Target hdf5 file must exist in {self.FOFDirectory}\"\n\n # Open file and optional groups within it\n FOFfile = h5py.File(os.path.join(self.FOFDirectory, self.filename), 'r+')\n print(f'[ FOFOutput ]\\t==> Opening {self.filename} file.')\n\n for internal_path, dataset_content in zip(dataset_paths, dataset_values):\n\n assert not internal_path.endswith('/'), \"Invalid hdf5 internal path\"\n assert type(dataset_content) is np.ndarray, \"Can only push numpy.ndarrays into hdf5 files.\"\n\n nested_groups = self.groups_from_path(internal_path)\n if len(nested_groups) == 1:\n FOFfile.create_dataset(nested_groups[0], data=dataset_content)\n else:\n for nested_group in nested_groups[:-1]:\n g = FOFfile.create_group(nested_group)\n g.create_dataset(nested_groups[-1], data=dataset_content)\n\n print(f'[ FOFOutput ]\\t==> Created {internal_path} dataset in {self.filename} file.')\n\n FOFfile.close()",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def MakeFiles(arguments):\n # Unpack arguments\n process, counter, path, start, stop = arguments\n\n log.info(\"[{}] Importing data from {}\".format(process,path))\n hf = h5py.File(path, \"r\")\n\n data = np.empty((0,len(column_names)), float)\n\n # Total number of events in batch\n n_events = stop-start\n\n for i, event in enumerate(np.arange(start,stop)):\n # Print information on progress\n if i%100==0:\n log.info(\"[{}] {} of {} events examined\".format(process,i,n_events))\n\n # Number of muons in event\n nPho = np.shape(hf[ 'pho_truthType' ][ event ])[0]\n\n for pho in range(nPho):\n #log.info(\"[{}] Number of muons is {} \".format(process,nMuo))\n\n data_temp = np.zeros((1,len(column_names)))\n\n\n # Add event variables to array\n data_temp[ 0, column_names.index( 'NvtxReco' ) ] = np.int(hf['NvtxReco'][event])\n data_temp[ 0, column_names.index( 'correctedScaledAverageMu' ) ] = hf[ 'correctedScaledAverageMu' ][ event ]\n data_temp[ 0, column_names.index( 'correctedScaledActualMu' ) ] = hf[ 'correctedScaledActualMu' ][ event ]\n # Add muon variables to array\n\n addPhotonVariables(hf, event, data_temp, pho)\n\n data = np.append(data, data_temp, axis=0)\n\n\n return data",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def add_source(self, model, x, y):\n\n x_sample = numpy.min([x.shape[1], self.ccd.shape[0]])\n y_sample = numpy.min([x.shape[0], self.ccd.shape[1]])\n\n if x_sample == self.ccd.shape[0] and y_sample == self.ccd.shape[1]:\n\n source_data = model(x, y)\n noise = source_data - numpy.random.poisson(source_data)\n\n self.signal += source_data.astype(self.dtype)\n self.noise += noise.astype(self.dtype)\n\n else:\n\n x_mean = model.x_mean \\\n if not isinstance(model.x_mean, astropy.modeling.Parameter) else model.x_mean.value\n y_mean = model.y_mean \\\n if not isinstance(model.x_mean, astropy.modeling.Parameter) else model.y_mean.value\n\n x_offset = int(x_mean) - int(x_sample / 2) if x_mean > x_sample / 2 else 0\n y_offset = int(y_mean) - int(y_sample / 2) if y_mean > y_sample / 2 else 0\n\n model.x_mean = x_mean - x_offset\n model.y_mean = y_mean - y_offset\n\n source_data = model(x, y)\n\n noise = source_data - numpy.random.poisson(source_data)\n\n self.signal[y_offset:y_offset + y_sample,\n x_offset:x_offset + x_sample] += source_data.astype(self.dtype)\n self.noise[y_offset:y_offset + y_sample,\n x_offset:x_offset + x_sample] += noise.astype(self.dtype)\n\n model.x_mean += x_offset\n model.y_mean += y_offset\n\n self.sources.append(model)",
"def _create_guess_datasets(self):\n self.h5_guess = create_empty_dataset(self.h5_loop_metrics, loop_fit32, 'Guess')\n write_simple_attrs(self._h5_group, {'guess method': 'pycroscopy statistical'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_guess, self._parms_dict)\n write_simple_attrs(self.h5_guess, {'Loop_fit_method': \"pycroscopy statistical\", 'last_pixel': 0})\n\n self.h5_main.file.flush()",
"def _generate_testdata_h5(cls, test_filepath):\n # Generate some test data\n data = numpy.indices( (10, 100, 200, 3) )\n assert data.shape == (4, 10, 100, 200, 3)\n data = data.astype( numpy.uint32 )\n cls.original_data = data\n\n # Choose names\n cls.dvid_dataset = \"datasetA\"\n cls.data_uuid = \"abcde\"\n cls.data_name = \"indices_data\"\n cls.volume_location = \"/datasets/{dvid_dataset}/volumes/{data_name}\".format( **cls.__dict__ )\n cls.node_location = \"/datasets/{dvid_dataset}/nodes/{data_uuid}\".format( **cls.__dict__ )\n cls.voxels_metadata = voxels.VoxelsMetadata.create_default_metadata(data.shape, data.dtype, \"cxyzt\", 1.0, \"\")\n\n # Write to h5 file\n with H5MockServerDataFile( test_filepath ) as test_h5file:\n test_h5file.add_node( cls.dvid_dataset, cls.data_uuid )\n test_h5file.add_volume( cls.dvid_dataset, cls.data_name, data, cls.voxels_metadata )\n\n test_h5file.add_node( \"datasetB\", \"12345\" )\n test_h5file.add_volume( \"datasetB\", cls.data_name, data, cls.voxels_metadata )",
"def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass",
"def insert_volumes_in_one_dataset(file_path, h5path, file_list, newshape=None,\n concat_axis=0, dtype=None, append=True):\n\n def isalambda(v):\n return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'\n\n mode = 'w'\n if os.path.exists(file_path):\n if append:\n mode = 'a'\n\n #loading the metadata into spatialimages\n imgs = [nib.load(vol) for vol in file_list]\n\n #getting the shapes of all volumes\n shapes = [np.array(img.get_shape()) for img in imgs]\n\n #getting the reshaped shapes\n if newshape is not None:\n if isalambda(newshape):\n nushapes = np.array([newshape(shape) for shape in shapes])\n else:\n nushapes = np.array([shape for shape in shapes])\n\n #checking if concat_axis is available in this new shapes\n for nushape in nushapes:\n assert(len(nushape) - 1 < concat_axis)\n\n #calculate the shape of the new dataset\n n_dims = nushapes.shape[1]\n ds_shape = np.zeros(n_dims, dtype=np.int)\n for a in list(range(n_dims)):\n if a == concat_axis:\n ds_shape[a] = np.sum(nushapes[:, concat_axis])\n else:\n ds_shape[a] = np.max(nushapes[:, a])\n\n #get the type of the new dataset\n #dtypes = [img.get_data_dtype() for img in imgs]\n if dtype is None:\n dtype = imgs[0].get_data_dtype()\n\n with h5py.File(file_path, mode) as f:\n try:\n ic = 0\n h5grp = f.create_group(os.path.dirname(h5path))\n h5ds = h5grp.create_dataset(os.path.basename(h5path),\n ds_shape, dtype)\n for img in imgs:\n\n #get the shape of the current image\n nushape = nushapes[ic, :]\n\n def append_to_dataset(h5ds, idx, data, concat_axis):\n \"\"\"\n @param h5ds: H5py DataSet\n @param idx: int\n @param data: ndarray\n @param concat_axis: int\n @return:\n \"\"\"\n shape = data.shape\n ndims = len(shape)\n\n if ndims == 1:\n if concat_axis == 0:\n h5ds[idx] = data\n\n elif ndims == 2:\n if concat_axis == 0:\n h5ds[idx ] = data\n elif concat_axis == 1:\n h5ds[idx ] = data\n\n elif ndims == 3:\n if concat_axis == 0:\n h5ds[idx ] = data\n elif concat_axis == 1:\n h5ds[idx ] = data\n elif concat_axis == 2:\n h5ds[idx ] = data\n\n #appending the reshaped image into the dataset\n append_to_dataset(h5ds, ic,\n np.reshape(img.get_data(), tuple(nushape)),\n concat_axis)\n\n ic += 1\n\n except ValueError as ve:\n raise Exception('Error creating group {} in hdf file {}'.format(h5path, file_path)) from ve",
"def augment_by_additive_noise(ds, noise_datadir, snr_list, copy_noise_files_to_tmpdir=False):\n logger.info(\"Augmenting dataset with additive noise from '%s'.\", noise_datadir)\n if not os.path.isdir(noise_datadir):\n logger.error(\"Noise source dir '%s' does not exist.\", noise_datadir)\n return\n\n id2type = dict(lidbox.iter_metadata_file(os.path.join(noise_datadir, \"id2label\"), 2))\n type2paths = collections.defaultdict(list)\n for noise_id, path in lidbox.iter_metadata_file(os.path.join(noise_datadir, \"id2path\"), 2):\n type2paths[id2type[noise_id]].append(path)\n del id2type\n\n if copy_noise_files_to_tmpdir:\n tmpdir = os.path.join(os.environ.get(\"TMPDIR\", \"/tmp\"), \"lidbox_noise_signals\")\n logger.info(\"Copying all noise files to TMPDIR '%s'\", tmpdir)\n for noise_type, paths in list(type2paths.items()):\n new_paths = []\n for src in paths:\n dst = os.path.join(tmpdir, noise_type, os.path.basename(src))\n logger.debug(\"%s -> %s\", src, dst)\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copyfile(src, dst)\n new_paths.append(dst)\n type2paths[noise_type] = new_paths\n\n type2paths = {t: tf.constant(paths, tf.string) for t, paths in type2paths.items()}\n\n def _update_element_meta(new_id, mixed_signal, x):\n return dict(x, id=new_id, signal=mixed_signal)\n\n def _add_random_noise_and_flatten(x):\n \"\"\"\n Using snr_list, choose len(snr_list) noise signals randomly and create new signal samples by mixing the chosen noise signals with x[\"signal\"] using random SNR dB levels.\n \"\"\"\n # Random noise path indexes and random snr levels\n rand_noise = [\n (noise_type,\n tf.random.uniform([], 0, tf.size(type2paths[noise_type]), tf.int32),\n tf.random.uniform([], snr_low, snr_high, tf.float32))\n for noise_type, snr_low, snr_high in snr_list]\n # Select random noise signals by drawn indexes and read contents from files\n rand_noise = [\n (audio_features.read_wav(type2paths[noise_type][rand_index]), snr)\n for noise_type, rand_index, snr in rand_noise]\n\n # Assert sample rates\n # TODO maybe add inline resampling of noise signals so they match the speech sr\n for (noise, sample_rate), snr in rand_noise:\n tf.debugging.assert_equal(sample_rate, x[\"sample_rate\"], message=\"Invalid noise signals are being used, all noise signals must have same sample rate as speech signals that are being augmented\")\n\n # Fix noise signal length to match x[\"signal\"] by repeating the noise signal if it is too short and then slicing it\n rand_noise = [\n # How many multiples of `noise` fits in x[\"signal\"]\n (tf.cast(tf.size(x[\"signal\"]) / tf.size(noise), tf.int32), noise, snr)\n for (noise, _), snr in rand_noise]\n rand_noise = [\n # Repeat noise and slice\n (tf.tile(noise, [1 + noise_length_ratio])[:tf.size(x[\"signal\"])], snr)\n for noise_length_ratio, noise, snr in rand_noise]\n\n # Mix x[\"signal\"] and chosen noise signals\n mixed_signals = [audio_features.snr_mixer(x[\"signal\"], noise, snr)[2] for noise, snr in rand_noise]\n # Create new utterance ids that contain the mixed noise type and SNR level\n new_ids = [\n tf.strings.join((\n \"augmented\",\n x[\"id\"],\n noise_type,\n tf.strings.join((\"snr\", tf.strings.as_string(snr, precision=2)))),\n separator=\"-\")\n for (noise_type, _, _), (_, snr) in zip(snr_list, rand_noise)]\n\n # Create new elements from the mixed signals and return as dataset\n return (tf.data.Dataset\n .zip((tf.data.Dataset.from_tensor_slices(new_ids),\n tf.data.Dataset.from_tensor_slices(mixed_signals),\n tf.data.Dataset.from_tensors(x).repeat(len(mixed_signals))))\n .map(_update_element_meta))\n\n return ds.interleave(\n _add_random_noise_and_flatten,\n block_length=len(snr_list),\n num_parallel_calls=TF_AUTOTUNE)",
"def add_new_data(self, inputs, targets):\n n_new, _ = inputs.shape\n n_new_holdout = int(self.holdout_ratio * n_new)\n n_new_train = n_new - n_new_holdout\n self.n_holdout += n_new_holdout\n self.n_holdout = min(self.n_holdout, self.max_n_holdout)\n new_bootstrap_idxs = self.get_bootstrap_idxs(0, n_new_train, n_new_train)\n\n if self.train_inputs is None:\n new_train, new_holdout = self._random_split(inputs, targets)\n self.train_inputs, self.train_targets = new_train\n self.bootstrap_idxs = new_bootstrap_idxs\n\n else:\n new_train, new_holdout = self._random_split(\n np.concatenate([self.holdout_inputs, inputs]),\n np.concatenate([self.holdout_targets, targets]),\n )\n self.train_inputs, self.train_targets = (\n np.concatenate([self.train_inputs, new_train[0]]),\n np.concatenate([self.train_targets, new_train[1]])\n )\n self.bootstrap_idxs = np.concatenate(\n [self.bootstrap_idxs, self.n_train + new_bootstrap_idxs],\n axis=-1\n )\n\n [self.input_welford.add_data(x) for x in new_train[0]]\n [self.target_welford.add_data(y) for y in new_train[1]]\n self.n_train, _ = self.train_inputs.shape\n self.holdout_inputs, self.holdout_targets = new_holdout",
"def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)",
"def append_to_csv(self):\n appended_data = pd.concat([self.existing_data, self.new_data], axis = 1)\n appended_data.to_csv(filename_main, index = False)\n warnings.warn(\"Add new graphs to .vsz files to show the new data\")",
"def update_seed_parameters(parameters, samples):\n\n with open(\"../../output/seed.tmp\", \"w\") as f:\n f.write(f\"{parameters[0]+parameters[1]}\\n\")\n f.write(f\"{samples}\")",
"def appendToFile(fname, data, column_names, column_dtype):\n log.info(\"Appending to {}\".format(args.outdir + fname))\n with h5py.File(args.outdir + fname, 'a') as hf:\n for var in column_names:\n\n array = data[:,column_names.index(f'{var}')]\n hf[f'{var}'].resize((hf[f'{var}'].shape[0] + array.shape[0]), axis = 0)\n hf[f'{var}'][-array.shape[0]:] = array.astype(column_dtype[f'{var}'])",
"def generate_datasets(self, rand=None, *args, **kwargs):\n raise NotImplementedError()",
"def import_scatterdata(self):\n\n datalist = [name for name in os.listdir(self.dir_line.text()) if\n os.path.isfile(os.path.join(self.dir_line.text(), name)) and name.endswith('.h5')]\n\n data_os = os.path.join(self.dir_line.text(), random.choice(datalist))\n with h5py.File(data_os, 'r') as f:\n labelset = f['r_vs_thresh'].attrs['best'][0].decode()\n labels = pd.array(f['labels/' + labelset][()]).astype(int)\n columns = f['data'].attrs['datacolumns'] - 1\n columns = columns.tolist()\n dataset = pd.DataFrame(f['data'][()]).iloc[:, columns]\n dataset = dataset.divide(1000)\n dataset['labels'] = labels\n\n return dataset\n\n # TODO: implement a way to rotate over all datafiles",
"def registerSampleData():\n # It is always recommended to provide sample data for users to make it easy to try the module,\n # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.\n\n import SampleData\n iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')\n\n # To ensure that the source code repository remains small (can be downloaded and installed quickly)\n # it is recommended to store data sets that are larger than a few MB in a Github release.\n\n # RegularizedFastMarching1\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching1',\n # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.\n # It can be created by Screen Capture module, \"Capture all views\" option enabled, \"Number of images\" set to \"Single\".\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching1.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95\",\n fileNames='RegularizedFastMarching1.nrrd',\n # Checksum to ensure file integrity. Can be computed by this command:\n # import hashlib; print(hashlib.sha256(open(filename, \"rb\").read()).hexdigest())\n checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching1'\n )\n\n # RegularizedFastMarching2\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching2',\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching2.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97\",\n fileNames='RegularizedFastMarching2.nrrd',\n checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching2'\n )",
"def gene_h5_train_file(data_path, h5_name):\n img = []\n y_cls_mask = []\n y_reg_cls_mask = []\n os.chdir(data_path)\n jpgfiles = glob.glob('*.jpg')\n idx = 1\n # the position of generator objector is very important\n gene_obj = image_output_pair(data_path, 1/255.0)\n while True:\n if idx == len(jpgfiles):\n break\n print '\\t{0}/{1}'.format(idx, len(jpgfiles))\n # the position of generator objector is very important\n # gene_obj = image_output_pair(data_path, 1/255.0)\n img_it, y_cls_mask_it, y_reg_cls_mask_it = gene_obj.next()\n img.append(img_it)\n y_cls_mask.append(y_cls_mask_it)\n y_reg_cls_mask.append(y_reg_cls_mask_it)\n idx += 1\n\n # img => (320, 320, 3)\n # after np.stack => (19041, 320, 320, 3)\n img_input = np.stack(img, axis=0)\n y_cls = np.stack(y_cls_mask, axis=0)\n y_reg = np.stack(y_reg_cls_mask, axis=0)\n print 'input data shape is {0}'.format(img_input.shape)\n print 'y_cls data shape is {0}'.format(y_cls.shape)\n print 'y_reg data shape is {0}'.format(y_reg.shape)\n \n # wirte data\n h5 = '/home/yuquanjie/Documents/train_' + h5_name\n file_write = h5py.File(h5, 'w')\n file_write.create_dataset('X_train', data=img_input)\n file_write.create_dataset('Y_train_cls', data=y_cls)\n file_write.create_dataset('Y_train_merge', data=y_reg)\n file_write.close()",
"def seed_audio_file(dataset_id, data):\n\n for item in data:\n try:\n audiofile = AudioFile()\n audiofile.id_dataset = dataset_id\n audiofile.audio_path = item['file']\n audiofile.date_created = datetime.datetime.now()\n session.add(audiofile)\n session.commit()\n print(f'AudioFile: {audiofile.id} inserted')\n\n # get the data relative to the currenr audiofile\n items = [d for d in data if d['file'] == item['file']][0]\n\n # seed the peak table\n Seed.seed_peak(audiofile.id, items)\n\n # seed the rms table\n Seed.seed_rms(audiofile.id, items)\n\n except Exception as e:\n print(f'seed_audio_file: {e}')",
"def make_hdf5file(self, dataset_paths: list = None, dataset_values: list = None) -> None:\n\n assert dataset_paths is not None and len(dataset_paths) != 0, (\"`dataset_path` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_paths)}.\")\n\n assert dataset_values is not None and len(dataset_values) != 0, (\"`dataset_values` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_values)}.\")\n\n\n # Remove file if already exists and create a new one\n if os.path.isfile(os.path.join(self.FOFDirectory, self.filename)):\n os.remove(os.path.join(self.FOFDirectory, self.filename))\n print(f'[ FOFOutput ]\\t==> Removed old {self.filename} file.')\n\n # Create file and optional groups within it\n FOFfile = h5py.File(os.path.join(self.FOFDirectory, self.filename), 'w')\n print(f'[ FOFOutput ]\\t==> Created new {self.filename} file.')\n\n # Push the attributes to file, if any\n if self.attrs is not None and len(self.attrs.keys()) > 0:\n for key, text in zip(self.attrs.keys(), self.attrs.values()):\n FOFfile.attrs[key] = text\n\n for internal_path, dataset_content in zip(dataset_paths, dataset_values):\n\n assert not internal_path.endswith('/'), \"Invalid hdf5 internal path\"\n assert type(dataset_content) is np.ndarray, \"Can only push numpy.ndarrays into hdf5 files.\"\n\n nested_groups = self.groups_from_path(internal_path)\n if len(nested_groups) == 1:\n FOFfile.create_dataset(nested_groups[0], data=dataset_content)\n else:\n for nested_group in nested_groups[:-1]:\n g = FOFfile.create_group(nested_group)\n g.create_dataset(nested_groups[-1], data = dataset_content)\n\n print(f'[ FOFOutput ]\\t==> Created {internal_path} dataset in {self.filename} file.')\n\n FOFfile.close()",
"def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True",
"def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)",
"def load_hdf5_replace_and_dump(filename: str, output_filename_extra=\"duplicated\"):\n\n existing_particle_types, id_array_list = zip(\n *read_particle_ids_from_file(f\"{filename}.hdf5\").items()\n )\n id_array, insertion_points = combine_arrays(id_array_list)\n\n new_id_array, old_position_dict, new_position_dict = find_and_replace_non_unique_ids(\n id_array\n )\n\n new_id_array_list = split_arrays(new_id_array, insertion_points)\n\n duplicated_filename = f\"{filename}_{output_filename_extra}.yml\"\n write_data(duplicated_filename, old_position_dict, new_position_dict)\n\n write_all_id_arrays(f\"{filename}.hdf5\", new_id_array_list, existing_particle_types)\n\n return",
"def test_add_to_hdf5_cmd(tmpdir, datadir):\n filename_original = datadir.join(\"test_sensitivity_cube.fits\").strpath\n \n # Make some files to input\n scube_fn1 = tmpdir.join(\"20181203v013_multi_324_062_055.fits\").strpath\n scube_fn2 = tmpdir.join(\"20181203v013_multi_013_103_019.fits\").strpath\n copy(filename_original, scube_fn1)\n copy(filename_original, scube_fn2)\n\n output = tmpdir.join(\"test_output.h5\").strpath\n\n # Run with command line arguments passed\n args = [\"--regex\", \".*(2[0-9]{7}v[0-9]{3})_multi_[0-9]{3}_([0-9]{3})\",\n scube_fn1, scube_fn2, output] \n add_sensitivity_cube_to_hdf5(args=args)\n \n assert isfile(output)"
] | [
"0.67602044",
"0.6064266",
"0.54224694",
"0.5420276",
"0.54171365",
"0.54065704",
"0.5364606",
"0.53565514",
"0.53282213",
"0.5310068",
"0.5291916",
"0.52736014",
"0.52707237",
"0.52698874",
"0.52663946",
"0.5238524",
"0.5230575",
"0.5226644",
"0.5225333",
"0.5223067",
"0.52210927",
"0.5205476",
"0.51813376",
"0.51386374",
"0.51257086",
"0.51185274",
"0.5115813",
"0.51070553",
"0.5077936",
"0.50737005"
] | 0.6450735 | 1 |
Gets train and test labels for FRI vs Random classification. Arguments | def generate_labels_fri(train_i, test_i, labels):
train = labels[train_i]
test = labels[test_i]
train_y = train == 1
test_y = test == 1
return train_y, test_y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_labels_frii(train_i, test_i, labels):\n train = labels[train_i]\n test = labels[test_i]\n\n train_y = train == 2\n test_y = test == 2\n\n return train_y, test_y",
"def test_text_classifier_get_labels(self):\n pass",
"def get_test_labels(self):\n raise NotImplementedError",
"def get_train_labels(self):\n raise NotImplementedError",
"def test_intent_classifier_get_labels(self):\n pass",
"def get_train_labels(self, window, scene):\n pass",
"def test_labels(self):\n return self._test_labels",
"def get_two_class_labels_fortest(csv_file_path_test, stratify_percentage=1):\n \n test_dataframe = pd.read_csv(csv_file_path_test)\n #filtered_input_dataframe = input_dataframe[input_dataframe['ID'].apply(lambda x : 'any' in x) ]\n files_with_ids_fortest = []\n \n # print(input_dataframe.columns.values)\n X_test = list(test_dataframe['ID'])\n \n print(\"Testing sample\",X_test[0])\n \n y_test_df = test_dataframe.drop(test_dataframe.columns[[0,6]], axis = 1)\n \n print(\"testing y samples\")\n \n \n #y = [y_dataframe.columns.values.tolist()] + y_dataframe.values.tolist()\n y_test = y_test_df.values.tolist()\n #print (y[0])\n #print(len(X))\n #print(len(y))\n \n num_samples_train = int(stratify_percentage * len(X))\n num_samples_test = int(stratify_percentage * len(X_test))\n print(\"Num Samples in Training :\", num_samples_train)\n print(\"Num Samples in Testing :\", num_samples_test)\n \n for k,v in list(zip(X_test, y_test)) :\n files_with_ids_fortest.append( (\"_\".join(k.split('_')[:2]), v))\n \n return files_with_ids_fortest",
"def get_train_labels(self, window: Box, scene: Scene) -> Labels:\n raise NotImplementedError()",
"def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))",
"def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf",
"def _fetch_labels(self, list_fams, no_imgs, num_samples) -> Tuple[np.ndarray, List]:\n y_train = np.zeros(num_samples)\n pos = 0\n label = 0\n indexes = []\n for i in no_imgs:\n indexes.append(i)\n print(\"Label:%2d\\tFamily: %15s\\tNumber of images: %d\" % (label, list_fams[label], i))\n for j in range(i):\n y_train[pos] = label\n pos += 1\n label += 1\n return y_train, indexes",
"def main():\n\n trainData = os.getcwd() + '/data/traindata.txt'\n trainLabels = os.getcwd() + '/data/trainlabels.txt'\n\n #testData = os.getcwd() + '/data/traindata.txt'\n #testLabels = os.getcwd() + '/data/trainlabels.txt'\n\n testData = os.getcwd() + '/data/testdata.txt'\n testLabels = os.getcwd() + '/data/testlabels.txt'\n\n #trainData = os.getcwd() + '/data/toyData.txt'\n #trainLabels = os.getcwd() + '/data/toyLabel.txt'\n #testData = os.getcwd() +'/data/toyTestData.txt'\n #testLabels = os.getcwd() + '/data/toyTestLabel.txt'\n\n #print(trainData, trainLabels)\n myClassifier = NBClassifier.new(NBClassifier.MODE_BERNOULI)\n myClassifier.setTrainData(trainData, trainLabels)\n #print(myClassifier)\n\n #singleTestData = ['Chinese', 'Chinese', 'Chinese', 'Tokyo', 'Japan']\n #prediction = myClassifier.predict(singleTestData)\n #print(f'{singleTestData} >>> {prediction}')\n predictions = myClassifier.predictSet(testData)\n accuracy = myClassifier.reportAccuracy(testLabels)\n\n #print(predictions)\n print(accuracy)",
"def _get_classification_data(self, real, synthetic, real_label, synthetic_label):\n split_index = int(self.train_test_split * len(real))\n X_train = synthetic[:split_index]\n y_train = synthetic_label[:split_index]\n X_test = real[split_index:]\n y_test = real_label[split_index:]\n return X_train, y_train, X_test, y_test",
"def predictAuthors(training_fvs, labels, test_fvs):\n clf = MultinomialNB()\n clf.fit(training_fvs, labels)\n return clf.predict(test_fvs)",
"def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))",
"def test_text_classifier_get_training_samples(self):\n pass",
"def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect",
"def train_labels(self):\n return self._train_labels",
"def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels",
"def test(self):\n\t\treturn classification_report(self.test_labels, self.predict(self.test_data), target_names=self.le.classes_)",
"def _classifyROMs(self, classifier, features, clusterFeatures):\n # the actual classifying algorithms is the unSupervisedEnging of the QDataMining of the PP Model\n ## get the instance\n classifier = classifier.interface.unSupervisedEngine\n # update classifier features\n classifier.updateFeatures(features)\n # make the clustering instance)\n classifier.train(clusterFeatures)\n # label the training data\n labels = classifier.evaluate(clusterFeatures)\n return labels",
"def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')",
"def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]",
"def feature_label(self, train, val):\n self.train_features = {name: np.array(value) for name, value in train.items()}\n self.train_labels = {name: self.train_features.pop(name) for name in self.label_names}\n\n self.val_features = {name: np.array(value) for name, value in val.items()}\n self.val_labels = {name: self.val_features.pop(name) for name in self.label_names}\n\n return \"feature and label for training has been created\"",
"def get_labels(info):\n return info.features[\"labels\"].names",
"def get_labels(info):\n return info.features[\"labels\"].names",
"def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions"
] | [
"0.7286609",
"0.70569706",
"0.6933562",
"0.6903602",
"0.6713493",
"0.6697266",
"0.6591856",
"0.6465765",
"0.64569855",
"0.6450537",
"0.64132303",
"0.6358648",
"0.6337812",
"0.6301233",
"0.62951237",
"0.625972",
"0.6249722",
"0.6241342",
"0.6239845",
"0.6233966",
"0.62224144",
"0.62178457",
"0.6189355",
"0.6170317",
"0.6153322",
"0.6145257",
"0.61218745",
"0.6103668",
"0.6103668",
"0.6102869"
] | 0.73998797 | 0 |
Gets train and test labels for FRII vs Random classification. Arguments | def generate_labels_frii(train_i, test_i, labels):
train = labels[train_i]
test = labels[test_i]
train_y = train == 2
test_y = test == 2
return train_y, test_y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_labels_fri(train_i, test_i, labels):\n train = labels[train_i]\n test = labels[test_i]\n\n train_y = train == 1\n test_y = test == 1\n\n return train_y, test_y",
"def test_text_classifier_get_labels(self):\n pass",
"def get_test_labels(self):\n raise NotImplementedError",
"def get_train_labels(self):\n raise NotImplementedError",
"def test_intent_classifier_get_labels(self):\n pass",
"def get_train_labels(self, window, scene):\n pass",
"def test_labels(self):\n return self._test_labels",
"def get_train_labels(self, window: Box, scene: Scene) -> Labels:\n raise NotImplementedError()",
"def main():\n\n trainData = os.getcwd() + '/data/traindata.txt'\n trainLabels = os.getcwd() + '/data/trainlabels.txt'\n\n #testData = os.getcwd() + '/data/traindata.txt'\n #testLabels = os.getcwd() + '/data/trainlabels.txt'\n\n testData = os.getcwd() + '/data/testdata.txt'\n testLabels = os.getcwd() + '/data/testlabels.txt'\n\n #trainData = os.getcwd() + '/data/toyData.txt'\n #trainLabels = os.getcwd() + '/data/toyLabel.txt'\n #testData = os.getcwd() +'/data/toyTestData.txt'\n #testLabels = os.getcwd() + '/data/toyTestLabel.txt'\n\n #print(trainData, trainLabels)\n myClassifier = NBClassifier.new(NBClassifier.MODE_BERNOULI)\n myClassifier.setTrainData(trainData, trainLabels)\n #print(myClassifier)\n\n #singleTestData = ['Chinese', 'Chinese', 'Chinese', 'Tokyo', 'Japan']\n #prediction = myClassifier.predict(singleTestData)\n #print(f'{singleTestData} >>> {prediction}')\n predictions = myClassifier.predictSet(testData)\n accuracy = myClassifier.reportAccuracy(testLabels)\n\n #print(predictions)\n print(accuracy)",
"def get_two_class_labels_fortest(csv_file_path_test, stratify_percentage=1):\n \n test_dataframe = pd.read_csv(csv_file_path_test)\n #filtered_input_dataframe = input_dataframe[input_dataframe['ID'].apply(lambda x : 'any' in x) ]\n files_with_ids_fortest = []\n \n # print(input_dataframe.columns.values)\n X_test = list(test_dataframe['ID'])\n \n print(\"Testing sample\",X_test[0])\n \n y_test_df = test_dataframe.drop(test_dataframe.columns[[0,6]], axis = 1)\n \n print(\"testing y samples\")\n \n \n #y = [y_dataframe.columns.values.tolist()] + y_dataframe.values.tolist()\n y_test = y_test_df.values.tolist()\n #print (y[0])\n #print(len(X))\n #print(len(y))\n \n num_samples_train = int(stratify_percentage * len(X))\n num_samples_test = int(stratify_percentage * len(X_test))\n print(\"Num Samples in Training :\", num_samples_train)\n print(\"Num Samples in Testing :\", num_samples_test)\n \n for k,v in list(zip(X_test, y_test)) :\n files_with_ids_fortest.append( (\"_\".join(k.split('_')[:2]), v))\n \n return files_with_ids_fortest",
"def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))",
"def _fetch_labels(self, list_fams, no_imgs, num_samples) -> Tuple[np.ndarray, List]:\n y_train = np.zeros(num_samples)\n pos = 0\n label = 0\n indexes = []\n for i in no_imgs:\n indexes.append(i)\n print(\"Label:%2d\\tFamily: %15s\\tNumber of images: %d\" % (label, list_fams[label], i))\n for j in range(i):\n y_train[pos] = label\n pos += 1\n label += 1\n return y_train, indexes",
"def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]",
"def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf",
"def train_labels(self):\n return self._train_labels",
"def _classifyROMs(self, classifier, features, clusterFeatures):\n # the actual classifying algorithms is the unSupervisedEnging of the QDataMining of the PP Model\n ## get the instance\n classifier = classifier.interface.unSupervisedEngine\n # update classifier features\n classifier.updateFeatures(features)\n # make the clustering instance)\n classifier.train(clusterFeatures)\n # label the training data\n labels = classifier.evaluate(clusterFeatures)\n return labels",
"def get_labels(info):\n return info.features[\"labels\"].names",
"def get_labels(info):\n return info.features[\"labels\"].names",
"def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))",
"def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')",
"def get_labels(rf_pipeline):\n return rf_pipeline.stages[0].labels",
"def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title",
"def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score",
"def get_labels(fasta_file):\n\t\tbase_name = basename(fasta_file)\n\t\tname = splitext(base_name)[0]\n\t\tlabel = name.split(\"_\")[-1]\n\t\tassert label == \"pos\" or label == \"hard\", \"AssertionError: label {} not found, possible labels pos, hard.\"\n\t\tif label == \"pos\":\n\t\t\treturn \"Toxin\"\n\t\telif label == \"hard\":\n\t\t\treturn \"No_toxin\"",
"def predictAuthors(training_fvs, labels, test_fvs):\n clf = MultinomialNB()\n clf.fit(training_fvs, labels)\n return clf.predict(test_fvs)",
"def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels",
"def get_labels(self):\r\n raise NotImplementedError()",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect",
"def test_text_classifier_get_training_samples(self):\n pass"
] | [
"0.756389",
"0.7096626",
"0.69290054",
"0.69257945",
"0.6868091",
"0.6732344",
"0.65533715",
"0.65446967",
"0.65136015",
"0.64089143",
"0.6353218",
"0.63307375",
"0.6273301",
"0.6269273",
"0.6239302",
"0.62379026",
"0.6231137",
"0.6231137",
"0.6198764",
"0.61917514",
"0.61780524",
"0.61217517",
"0.6121299",
"0.6102165",
"0.6091634",
"0.60877407",
"0.6086347",
"0.6084768",
"0.6074591",
"0.6072886"
] | 0.7583044 | 0 |
Create ground truth array that allows multiple labels per point | def createGTMulti(classesDict, length, gtList):
""" Create array containing label for sample point: """
n_maxLabels = 5 #maximum number of labels that can be assign to one point
y_GT = np.empty([length,n_maxLabels])
y_GT.fill(-1) #-1 corresponds to no label given
classesNotTrained = []
for i in range(len(gtList)):
""" Fill array from start to end of each ground truth label with the correct label: """
gtList[i][2]
if gtList[i][2] == "start":
tmpContext = gtList[i][1]
start = getIndex(float(gtList[i][0]))
# Find the end time of this context:
for j in range(i,len(gtList)):
if ((gtList[j][1] == tmpContext) and (gtList[j][2] == "end")):
end = getIndex(float(gtList[j][0]))
if end >= y_GT.shape[0]:
end = y_GT.shape[0] - 1
""" Fill ground truth array, and check if our classifier was
trained with all labels of the test file, if not give warning: """
if (gtList[i][1] not in classesDict.keys()):
classesNotTrained.append(gtList[i][1])
else:
# Check if we can write into the first column of the y_GT array:
if ((len(np.unique(y_GT[start:end+1,0])) == 1) and
(np.unique(y_GT[start:end+1,0])[0] == -1)):
y_GT[start:end+1,0].fill(classesDict[gtList[i][1]])
# Check if we can write into the second column of the y_GT array:
elif ((len(np.unique(y_GT[start:end+1,1])) == 1) and
(np.unique(y_GT[start:end+1,1])[0] == -1)):
y_GT[start:end+1,1].fill(classesDict[gtList[i][1]])
# Check if we can write into the third column of the y_GT array:
elif ((len(np.unique(y_GT[start:end+1,2])) == 1) and
(np.unique(y_GT[start:end+1,2])[0] == -1)):
y_GT[start:end+1,2].fill(classesDict[gtList[i][1]])
# Check if we can write into the third column of the y_GT array:
elif ((len(np.unique(y_GT[start:end+1,3])) == 1) and
(np.unique(y_GT[start:end+1,3])[0] == -1)):
y_GT[start:end+1,3].fill(classesDict[gtList[i][1]])
# Check if we can write into the third column of the y_GT array:
elif ((len(np.unique(y_GT[start:end+1,4])) == 1) and
(np.unique(y_GT[start:end+1,4])[0] == -1)):
y_GT[start:end+1,4].fill(classesDict[gtList[i][1]])
else:
pdb.set_trace()
print("Problem occurred when filling ground truth array!")
break
if classesNotTrained:
for el in set(classesNotTrained):
print("The classifier wasn't trained with class '" +
el + "'. It will not be considered for testing.")
return y_GT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])",
"def create_TargetLabel(dataset):\n label_Array = dataset['close_-1_r'].shift(-1)\n label_Array = label_Array.apply(lambda x:1 if x>0.0000 else 0)\n return label_Array",
"def generate_patch_labels(batch_size : int, patch_shape : int, label : int = 1) -> np.ndarray:\n labels = np.full(shape=(batch_size, patch_shape, patch_shape, 1), fill_value=label)\n return labels",
"def make_multilabel_target(num_classes, classes):\n target = np.zeros(num_classes, dtype=np.uint8)\n target[classes] = 1\n return target",
"def create_labels(graphs : List[SpatialGraph] ) -> np.ndarray:\n if not graphs: # None or empty\n return np.array([])\n\n N = graphs[0].num_nodes\n p = graphs[0].dim\n n_batch = len(graphs)\n\n labels = np.zeros((n_batch, N, p))\n \n for i in range(n_batch):\n labels[i,:,:] = graphs[i].get_coords()\n\n return labels",
"def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels",
"def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y",
"def init_expected_outputs(data, no_labels=26):\n expected_outputs = np.zeros((data.shape[0], no_labels))\n \n for i in range(0,data.shape[0]): \n expected_outputs[i, data[i].astype(int)]=1\n\n return expected_outputs",
"def labels_array(self):\n return _build_label_vector_rows(\n [[(label, 1)] for label in self.labels], self.training_labels)[1:].T",
"def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y",
"def binary_labels(output_labels, return_index=False, label_list=None):\n\n # Populate label list if required, otherwise input is used (e.g. for\n # evaluationd data to follow same format as training)\n if label_list == None:\n label_list = [\"OTHER#OTHER\"]\n\n for element in output_labels:\n for quality in element:\n if quality[0] not in label_list:\n label_list.append(quality[0])\n\n labels_binary = []\n\n empty_label = []\n\n for element in label_list:\n empty_label.append(0)\n\n\n # TODO: Array of single aspect variable arrays.\n for element in output_labels:\n labels_binary.append(empty_label[:])\n for quality in element:\n if quality[0] in label_list:\n labels_binary[-1][label_list.index(quality[0])] = 1\n else:\n labels_binary[-1][label_list.index(\"OTHER#OTHER\")] = 1\n # label_index[quality[0]] = label_index['max'] + 1\n # label_index['max'] += 1\n # labels_binary[-1][label_index[quality[0]]] = 1\n\n if return_index:\n # label list acts as a lookup incase of printing classification results\n return np.array(labels_binary), label_list\n else:\n return np.array(labels_binary)",
"def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])",
"def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)",
"def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label",
"def fake_data(num_images):\n data = np.ndarray(\n shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),\n dtype=np.float32)\n labels = np.zeros(shape=(num_images,), dtype=np.int64)\n for image in xrange(num_images):\n label = image % 2\n data[image, :, :, 0] = label - 0.5\n labels[image] = label\n return data, labels",
"def initialization_based(input_array):\n\n # search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n # set the predicted class on 1, and all the other classes on 0\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n out[np.arange(out.shape[0]), oh_array.ravel()] = 1\n return out",
"def fake_data(num_images):\n data = np.ndarray(\n shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),\n dtype=np.float32)\n labels = np.zeros(shape=(num_images,), dtype=np.int64)\n for image in xrange(num_images):\n label = image % 2\n data[image, :, :, 0] = label - 0.5\n labels[image] = label\n return data, labels",
"def generate_labels_fri(train_i, test_i, labels):\n train = labels[train_i]\n test = labels[test_i]\n\n train_y = train == 1\n test_y = test == 1\n\n return train_y, test_y",
"def array2(self):\r\n profbox(whoami())\r\n # research\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n labelnode = slicer.mrmlScene.GetNodeByID(inputLabelID)\r\n i = labelnode.GetImageData()\r\n shape = list(i.GetDimensions())\r\n shape.reverse()\r\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\r\n labels = []\r\n val = [[0, 0, 0] for i in range(a.max() + 1)]\r\n for i in xrange(2, a.max() + 1):\r\n w = numpy.transpose(numpy.where(a == i))\r\n # labels.append(w.mean(axis=0))\r\n val[i] = [0, 0, 0]\r\n val[i][0] = w[int(round(w.shape[0] / 2))][2]\r\n val[i][1] = w[int(round(w.shape[0] / 2))][1]\r\n val[i][2] = w[int(round(w.shape[0] / 2))][0]\r\n if val[i] not in self.previousValues:\r\n labels.append(val[i])\r\n self.previousValues.append(val[i])\r\n return labels",
"def test_basic_labeling(self):\n # data with only 1 feature\n data = array([[-1], [1], [0.5], [0.25], [-0.33], [0]])\n # give 1 if feature value >= 0; otherwise 0\n labels = array([0, 1, 1, 1, 0, 1])\n cdata = LabeledCData(data, labels)\n\n # ensure that labelling is correct\n assert array_equal(cdata.labels, labels)",
"def encode_label(label: np.array, nb_classes: int):\n encoded = np.zeros(nb_classes)\n encoded[int(label)] = 1.\n return encoded",
"def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot",
"def categorical2mask(X, labels):\n X_shape = X.shape[0:2]\n if type(X_shape) == tuple:\n X_shape = list(X_shape)\n Y = np.zeros(X_shape + [3], dtype=\"uint8\")\n for i, key in enumerate(labels):\n print(X.shape,Y.shape)\n Y[...,0] = np.where(X==i, labels[key][0], Y[...,0])\n Y[...,1] = np.where(X==i, labels[key][1], Y[...,1])\n Y[...,2] = np.where(X==i, labels[key][2], Y[...,2])\n return Y",
"def y_to_z_mapping(self, Y):\n if len(Y[0])!=self.label_width**2:\n print('input labels have different dimension')\n Z = []\n for label in Y:\n z_label = np.array(label)\n for i in range(self.label_width**2):\n z_label = np.concatenate((z_label, (label[i+1:]==label[i]).astype(int)))\n Z.append(z_label)\n return Z",
"def get_pY(pY_true, y_fake):\n nof_objects = len(pY_true)\n\n all_labels = numpy.unique(y_fake)\n label_dict = {i: a for i, a in enumerate(all_labels)}\n nof_labels = len(all_labels)\n\n pY = numpy.zeros([nof_objects, nof_labels])\n\n for o in range(nof_objects):\n for c_idx, c in enumerate(all_labels):\n if y_fake[o] == c:\n pY[o, c_idx] = pY_true[o]\n else:\n pY[o, c_idx] = float(1 - pY_true[o]) / (nof_labels - 1)\n\n return pY, label_dict",
"def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg), dtype=np.int8)\n labels[:len(pos)] = 1\n labels[len(pos):] = 0\n return labels",
"def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array",
"def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array",
"def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map",
"def one_hot_encoding((uri, label), all_labels):\n labels = [0]*NUM_LABELS\n for i, l in enumerate(all_labels):\n if label == l:\n labels[i] = 1\n yield uri, labels"
] | [
"0.6753623",
"0.6318638",
"0.62710094",
"0.6227057",
"0.61745167",
"0.6169001",
"0.616126",
"0.6144299",
"0.61347485",
"0.6133566",
"0.60322136",
"0.60288686",
"0.60180634",
"0.6002986",
"0.5985894",
"0.5984476",
"0.59774005",
"0.5975838",
"0.59698594",
"0.59608054",
"0.5927142",
"0.59105545",
"0.5906748",
"0.58984715",
"0.5891674",
"0.5890673",
"0.58854157",
"0.58854157",
"0.5875875",
"0.5853121"
] | 0.6395419 | 1 |
The method first checks for every 2s windows, if all amplitude values lie below the silence threshold and returns silences for those interval. After that a majority vote of 2s length will be applied. | def majorityVoteSilence(y_Raw, amps, silenceClassNum):
y_raw = y_Raw.copy()
silenceThreshold = 1000
majVotWindowLength = 2.0 #in seconds
windowLength = 0.032
frameLengthFloat = math.ceil(majVotWindowLength/windowLength)
frameLength = int(frameLengthFloat)
resArray = np.empty(y_raw.shape)
n_frames = int(math.ceil(y_raw.shape[0]/frameLengthFloat))
for i in range(n_frames):
if ((i+1) * frameLength) < y_raw.shape[0]:
tmpAmps = amps[(i * frameLength):(((i+1) * frameLength))]
if tmpAmps.max() >= silenceThreshold:
#if True:
tmpArray = y_raw[(i * frameLength):(((i+1) * frameLength))]
""" Get most frequent number in that frames: """
count = np.bincount(tmpArray)
tmpMostFrequent = np.argmax(count)
""" Fill all elements with most frequent number: """
tmpArray.fill(tmpMostFrequent)
""" Write it into our result array: """
resArray[(i * frameLength):(((i+1) * frameLength))] = tmpArray
else:
"""If all amplitudes are below threshold, the
sample is considered silent:"""
resArray[(i * frameLength):(((i+1) * frameLength))] = silenceClassNum
else:
tmpAmps = amps[(i * frameLength):y_raw.shape[0]]
if tmpAmps.max() >= silenceThreshold:
#if True:
tmpArray = y_raw[(i * frameLength):y_raw.shape[0]]
""" Get most frequent number in that frames and fill
all elements in the frame with it: """
count = np.bincount(tmpArray)
tmpMostFrequent = np.argmax(count)
""" Fill all elements with most frequent number: """
tmpArray.fill(tmpMostFrequent)
""" Write it into our result array: """
resArray[(i * frameLength):y_raw.shape[0]] = tmpArray
else:
"""If all amplitudes are below threshold, the
sample is considered silent:"""
resArray[(i * frameLength):y_raw.shape[0]] = silenceClassNum
return resArray | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_silence_threshold(self):\n loudest_sound_cohort_size = 0.2 # Top 20% are counted in the loudest sound group.\n silence_threshold_multiplier = 1.6 # Sounds must be at least 1.6x as loud as the loudest silence\n\n rospy.loginfo(\"Getting intensity values from mic.\")\n self.open_stream()\n tss = self.total_silence_samples\n values = [math.sqrt(abs(audioop.avg(self.stream.read(self.chunk_size), self.audio_format_width)))\n for _ in range(tss)]\n values = sorted(values, reverse=True)\n sum_of_loudest_sounds = sum(values[:int(tss * loudest_sound_cohort_size)])\n total_samples_in_cohort = int(tss * loudest_sound_cohort_size)\n average_of_loudest_sounds = sum_of_loudest_sounds / total_samples_in_cohort\n rospy.loginfo(\"Average audio intensity is %d\" % average_of_loudest_sounds)\n self.silence_threshold = average_of_loudest_sounds * silence_threshold_multiplier\n rospy.loginfo(\"Silence threshold set to %d \" % self.silence_threshold)\n self.close_stream()",
"def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time",
"def silence(score0, score1):\n return silence",
"def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")",
"def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted",
"def removeGlitch( self, rGlitchMaxTresholdPercent = 5., rGlitchMaxDurationSec = 0.01, rSilenceTresholdPercent = 1., rSilenceMinDurationSec = 0.020 ):\n timeBegin = time.time()\n nGlitchLimit = int( self.getSampleMaxValue() * rGlitchMaxTresholdPercent / 100 )\n nSilenceLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 )\n \n nGlitchNumSampleMaxDuration = int( rGlitchMaxDurationSec * self.nSamplingRate )\n nSilenceNumSampleMinDuration = int( rSilenceMinDurationSec * self.nSamplingRate )\n \n rMarginAroundSilenceBlanking = 0.1 # in sec\n nSilenceAroundSilenceBlanking = int( rMarginAroundSilenceBlanking * self.nSamplingRate )\n \n logging.debug( \"nSilenceLimit: %d, nGlitchLimit: %d, nGlitchNumSampleMaxDuration: %d, nSilenceNumSampleMinDuration: %d\" % ( nSilenceLimit, nGlitchLimit, nGlitchNumSampleMaxDuration, nSilenceNumSampleMinDuration ) )\n \n aPosGlitchBegin = [0]*self.nNbrChannel # for each channel, the position of beginning glitch\n aPosSilenceBegin = [0]*self.nNbrChannel # for each channel, the position of beginning silence\n aPosLastSoundEnd = [0]*self.nNbrChannel # for each channel, the last time with sound\n anState = [0]*self.nNbrChannel # for each channel: the nature of current sound: 0: real silence, 1: glitch, 2: sound, 3: short silence after glitch, 4: short silence after sound\n\n nNbrGlitch = 0\n nNumSample = 0\n nNbrSampleReplace = 0\n while( True ):\n for nNumChannel in range( self.nNbrChannel ):\n val = self.data[(nNumSample*self.nNbrChannel)+nNumChannel]\n val = abs(val)\n nCurrentState = anState[nNumChannel]\n newState = nCurrentState\n \n if( nCurrentState == 0 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val > nSilenceLimit ):\n newState = 1\n aPosGlitchBegin[nNumChannel] = nNumSample\n elif( nCurrentState == 1 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val < nSilenceLimit ):\n newState = 3\n aPosSilenceBegin[nNumChannel] = nNumSample\n elif( nNumSample - aPosGlitchBegin[nNumChannel] >= nGlitchNumSampleMaxDuration ):\n # too long => sound\n newState = 2\n elif( nCurrentState == 2 ):\n if( val < nSilenceLimit ):\n newState = 4\n aPosSilenceBegin[nNumChannel] = nNumSample\n aPosLastSoundEnd[nNumChannel] = nNumSample\n elif( nCurrentState == 3 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val > nSilenceLimit ):\n newState = 1\n elif( nNumSample - aPosSilenceBegin[nNumChannel] >= nSilenceNumSampleMinDuration ):\n newState = 0\n # erase this glitch\n logging.info( \"Channel%d: Erasing glitch between %s (%5.3fs) and %s (%5.3fs)\" % (nNumChannel, aPosGlitchBegin[nNumChannel],aPosGlitchBegin[nNumChannel]/float(self.nSamplingRate), nNumSample, nNumSample/float(self.nSamplingRate) ) )\n nNbrGlitch += 1\n self.data[ (aPosGlitchBegin[nNumChannel]*self.nNbrChannel)+nNumChannel:(nNumSample*self.nNbrChannel)+nNumChannel:self.nNbrChannel]=[0]*(nNumSample-aPosGlitchBegin[nNumChannel])\n elif( nCurrentState == 4 ):\n if( val > nSilenceLimit ):\n newState = 2\n elif( nNumSample - aPosSilenceBegin[nNumChannel] >= nSilenceNumSampleMinDuration ):\n newState = 0\n # nothing to do!\n \n if( newState != nCurrentState ):\n if( nNumSample < 300000 ):\n logging.debug( \"Channel%d: sample: %d (%5.3fs), new state: %d, data: %d\" % (nNumChannel,nNumSample,nNumSample/float(self.nSamplingRate), newState,val) )\n anState[nNumChannel] = newState\n if( newState == 2 ):\n # we add a small respiration to leave sound trail and attacks\n if( aPosLastSoundEnd[nNumChannel] == 0 ):\n nBegin = 0\n else:\n nBegin = aPosLastSoundEnd[nNumChannel] + nSilenceAroundSilenceBlanking\n nEnd = nNumSample - nSilenceAroundSilenceBlanking\n if( nBegin < nEnd ):\n logging.debug( \"Channel%d: Blanking silence between %s (%5.3fs) and %s (%5.3fs)\" % ( nNumChannel, nBegin, nBegin/float(self.nSamplingRate), nEnd, nEnd/float(self.nSamplingRate) ) )\n self.data[ (nBegin*self.nNbrChannel)+nNumChannel:(nEnd*self.nNbrChannel)+nNumChannel:self.nNbrChannel]=[0]*(nEnd-nBegin)\n \n # for each chan - end\n nNumSample += 1\n if( nNumSample % 10000 == 0 ):\n #TODO: unpack to be able to modify just a bit of the chain OR look how to remove a bit of the chain without compy everything (super long)\n logging.debug( \"nNumSample: %d (state[0]: %d)\" % (nNumSample, anState[0]) ) \n \n if( nNumSample >= self.nNbrSample ):\n break\n # while - end\n \n rDuration = time.time()-timeBegin\n \n logging.info( \"removeGlitch: nNbrGlitch: %d, (time taken: %5.3fs)\" % (nNbrGlitch, rDuration ) )\n \n return True",
"def detect_silences(sound, sil_threshold, sil_duration):\n textgrid = call(sound, 'To TextGrid (silences)', 100, 0.0, sil_threshold, sil_duration, 0.1, 'silence', 'speech')\n\n return textgrid",
"def silence_handler(wav, sr, fl=320, fs=80, \n max_thres_below=30, \n min_thres=-55, \n shortest_len_in_ms=50,\n flag_output=0):\n assert fs < fl, \"Frame shift should be smaller than frame length\"\n \n frames = buffering(wav, fl, fl - fs, 'nodelay')\n windowed_frames = windowing(frames)\n \n frame_energy = 20*np.log10(np.std(frames, axis=1)+np.finfo(np.float32).eps)\n frame_energy_max = np.max(frame_energy)\n \n frame_tag = np.bitwise_and(\n (frame_energy > (frame_energy_max - max_thres_below)),\n frame_energy > min_thres)\n frame_tag = np.asarray(frame_tag, dtype=np.int)\n \n seg_len_thres = shortest_len_in_ms * sr / 1000 / fs\n \n \n def ignore_short_seg(frame_tag, seg_len_thres):\n frame_tag_new = np.zeros_like(frame_tag) + frame_tag\n # boundary of each segment\n seg_bound = np.diff(np.concatenate(([0], frame_tag, [0])))\n # start of each segment\n seg_start = np.argwhere(seg_bound == 1)[:, 0]\n # end of each segment\n seg_end = np.argwhere(seg_bound == -1)[:, 0]\n assert seg_start.shape[0] == seg_end.shape[0], \\\n \"Fail to extract segment boundaries\"\n \n # length of segment\n seg_len = seg_end - seg_start\n seg_short_ids = np.argwhere(seg_len < seg_len_thres)[:, 0]\n for idx in seg_short_ids:\n start_frame_idx = seg_start[idx]\n end_frame_idx = seg_end[idx]\n frame_tag_new[start_frame_idx:end_frame_idx] = 0\n return frame_tag_new\n \n # work on non-speech, 1-frame_tag indicates non-speech frames\n frame_process_sil = ignore_short_seg(1-frame_tag, seg_len_thres)\n # reverse the sign\n frame_process_sil = 1 - frame_process_sil\n \n # work on speech\n frame_process_all = ignore_short_seg(frame_process_sil, seg_len_thres)\n \n # separate non-speech and speech segments\n # do overlap and add\n frame_tag = frame_process_all\n # buffer for speech segments\n spe_buf = np.zeros([np.sum(frame_tag) * fs + fl], dtype=wav.dtype)\n # buffer for non-speech segments\n sil_buf = np.zeros([np.sum(1-frame_tag) * fs + fl], dtype=wav.dtype)\n spe_fr_pt = 0\n non_fr_pt = 0\n for frame_idx, flag_speech in enumerate(frame_tag):\n if flag_speech:\n spe_buf[spe_fr_pt*fs:spe_fr_pt*fs+fl] += windowed_frames[frame_idx]\n spe_fr_pt += 1\n else:\n sil_buf[non_fr_pt*fs:non_fr_pt*fs+fl] += windowed_frames[frame_idx]\n non_fr_pt += 1\n \n if flag_output == 1: \n return spe_buf\n elif flag_output == 2:\n return sil_buf\n else:\n return spe_buf, sil_buf, frame_tag",
"def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)",
"def split_vad(silence_probs: List[float], p_silence_threshold: float, len_threshold: int) -> List[Tuple[int, int]]:\n segments = []\n\n start = None\n i = 0\n n = len(silence_probs)\n\n while i < len(silence_probs) and silence_probs[i] > p_silence_threshold:\n i += 1\n # supported invariants: `start` points to the frame where speech starts, i >= start\n start = i\n\n while i < n:\n # scroll until first silence frame\n if silence_probs[i] < p_silence_threshold:\n i += 1\n continue\n\n # now i points to the first silence frame\n # look ahead: do we have at least len_threshold silence frames?\n all_silence = True\n for j in range(i + 1, min(i + len_threshold, n)):\n all_silence = all_silence and silence_probs[j] > p_silence_threshold\n if not all_silence:\n break\n\n if not all_silence:\n # no we don't: disregard the silence, go further\n # starting from the first non-silence frame\n i = j\n else:\n # we do have enough silence for a split\n if i - start > len_threshold:\n segments.append((start, i))\n\n while i < n and silence_probs[i] > p_silence_threshold:\n i += 1\n start = i\n i += 1\n\n if i - start > len_threshold and start < n:\n segments.append((start, i))\n\n return segments",
"def get_silence_threshold(sound, lower_quantile):\n soundint = sound.to_intensity()\n max_intensity = call(soundint, 'Get quantile', 0.0, 0.0, 1)\n sil_intensity = call(soundint, 'Get quantile', 0.0, 0.0, lower_quantile)\n return sil_intensity - max_intensity",
"def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits",
"def _check_for_noise(self) -> None:\n safety_stop = 5\n while self._has_noise() and safety_stop > 0:\n self.filter(size=3)\n safety_stop -= 1",
"def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))",
"def trim_silence(audio, noise_threshold=150):\n start = None\n end = None\n\n for idx, point in enumerate(audio):\n if abs(point) > noise_threshold:\n start = idx\n break\n\n # Reverse the array for trimming the end\n for idx, point in enumerate(audio[::-1]):\n if abs(point) > noise_threshold:\n end = len(audio) - idx\n break\n\n return audio[start:end]",
"def peakRecognition(y, sg_window, threshold):\n\n corrected_sg2 = savgol_filter(\n y, window_length=sg_window, polyorder=3, deriv=2)\n\n peaks_all = []\n\n for row in corrected_sg2:\n peaks = argrelmin(row)[0]\n peaks = [peak for peak in peaks if row[peak] < -threshold] # Remove peaks below threshold\n\n # Combine peaks w/o positive 2nd derivative between them\n peak_condensing = []\n peaks_condensed = []\n for j in range(len(row)):\n if j in peaks:\n peak_condensing.append(j)\n if row[j] > 0 and len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n peak_condensing = []\n if len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n\n peaks_all.append(peaks_condensed)\n bar3.update(bar3.value + 1)\n\n return peaks_all",
"def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None",
"def snip_audio(data, snip_length=4, cutoff=0.25, min_snips=None, max_snips=None,\n num_jitters=None, jitter=0.25,\n rate=44100, log=False):\n if max_snips is None:\n if min_snips is None:\n min_snips = 1\n max_snips = max(min_snips, int((float(len(data))/rate)/3.0))\n # Pad data with (snip_length * rate / 2) zeros.\n chop = np.lib.pad(data, int(snip_length*rate/2), 'constant')\n if log:\n logging.info(\"Data padded with %.1f s of zeros.\" %\n (float(snip_length)/2))\n snips = []\n logs = []\n max_sum = 0\n count = 0\n sum_ratio = 1\n\n while True:\n current_sum, start_idx, end_idx = find_loudest_subset(chop, snip_length,\n rate=rate)\n max_sum = max(max_sum, current_sum)\n sum_ratio = float(current_sum) / max_sum\n if sum_ratio < cutoff:\n break\n collection = []\n if num_jitters is None:\n collection.append(np.copy(chop[start_idx : end_idx]))\n else:\n for j in xrange(num_jitters):\n offset = int(jitter * rate * random.uniform(-1, 1))\n try:\n collection.append(np.copy(chop[start_idx+offset : end_idx+offset]))\n except IndexError:\n collection.append(np.copy(chop[start_idx : end_idx]))\n logs.append((sum_ratio, max_sum, start_idx, end_idx))\n chop[start_idx : end_idx] = 0\n snips.append(collection)\n count += 1\n if count >= max_snips:\n break\n return snips, logs",
"def noise(self, freq: int, /) -> None:",
"def detect_freqs(self):\n n_fft_bins = self._config[\"audio_config\"][\"N_FFT_BINS\"]\n channel_avgs = []\n differences = []\n \n for i in range(n_fft_bins):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.2)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n else:\n self.current_freq_detects[i] = False",
"def noise_despiker(self, win=3, nlim=12.):\n if ~isinstance(win, int):\n win = int(win)\n if not hasattr(self, 'despiked'):\n self.data['despiked'] = {}\n for a, vo in self.focus.items():\n v = vo.copy()\n if 'time' not in a.lower():\n # calculate rolling mean using convolution\n kernel = np.ones(win) / win\n rmean = np.convolve(v, kernel, 'same')\n\n # with warnings.catch_warnings():\n # to catch 'empty slice' warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # rmean = \\\n # np.apply_along_axis(np.nanmean, 1,\n # rolling_window(v, win,\n # pad=np.nan))\n # rmean = \\\n # np.apply_along_axis(np.nanmean, 1,\n # rolling_window(v, win,\n # pad=np.nan))\n # calculate rolling standard deviation\n # (count statistics, so **0.5)\n rstd = rmean**0.5\n\n # find which values are over the threshold\n # (v > rmean + nlim * rstd)\n over = v > rmean + nlim * rstd\n if sum(over) > 0:\n # get adjacent values to over - limit values\n neighbours = \\\n np.hstack([v[np.roll(over, -1)][:, np.newaxis],\n v[np.roll(over, 1)][:, np.newaxis]])\n # calculate the mean of the neighbours\n replacements = np.apply_along_axis(np.nanmean, 1,\n neighbours)\n # and subsitite them in\n v[over] = replacements\n self.data['despiked'][a] = v\n self.setfocus('despiked')\n return",
"def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)",
"def disaggregate_chunk(self, chunk):\n\n # An resistive element has active power equal to apparent power.\n # Checking power units.\n units = self.__physical_quantity(chunk)\n\n # EVENTS OUT OF THE CHUNK:\n # Delta values:\n column_name = 'diff_' + units[1]\n chunk[column_name] = chunk.loc[:, units].diff()\n\n # Filter the noise.\n chunk['onpower'] = (chunk[column_name] > self.powerNoise)\n chunk['offpower'] = (chunk[column_name] < -self.powerNoise)\n events = chunk[(chunk.onpower == True) | (chunk.offpower == True)]\n\n detection_list = []\n singleOnevent = 0\n # Max Likelihood algorithm (optimized):\n for onevent in events[events.onpower == True].iterrows():\n # onTime = onevent[0]\n # deltaOn = onevent[1][1]\n # windowning:\n offevents = events[(events.offpower == True) & (events.index > onevent[0]) & (\n events.index < onevent[0] + timedelta(seconds=self.timeWindow))]\n # Filter paired events:\n offevents = offevents[\n abs(onevent[1][1] - offevents[column_name].abs()) < self.powerPair]\n\n # Max likelihood computation:\n if not offevents.empty:\n # pon = self.__pdf(self.onpower, onevent[1][1])\n for offevent in offevents.iterrows():\n # offTime = offevent[0]\n # deltaOff = offevent[1][1]\n # poff = self.__pdf(self.offpower, offevent[1][1])\n # duration = offevent[0] - onTime\n # pduration = self.__pdf(self.duration, (offevent[0] - onTime).total_seconds())\n likelihood = self.__pdf(self.onpower, onevent[1][1]) * \\\n self.__pdf(self.offpower, offevent[1][1]) * \\\n self.__pdf(self.duration, (offevent[0] - \\\n onevent[0]).total_seconds())\n detection_list.append(\n {'likelihood': likelihood, 'onTime': onevent[0], \n 'offTime': offevent[0], 'deltaOn': onevent[1][1]})\n else:\n singleOnevent += 1\n\n # Passing detections to a pandas.DataFrame\n detections = pd.DataFrame(\n columns=('onTime', 'offTime', 'likelihood', 'deltaOn'))\n\n for i in range(len(detection_list)):\n detections.loc[i] = [detection_list[i]['onTime'], detection_list[i][\n 'offTime'], detection_list[i]['likelihood'], detection_list[i]['deltaOn']]\n\n detections = detections[detections.likelihood >= self.thLikelihood]\n\n # Constructing dis_chunk (power of disaggregated appliance)\n dis_chunk = pd.DataFrame(\n index=chunk.index, columns=[str(units[0]) + '_' + str(units[1])])\n dis_chunk.fillna(0, inplace=True)\n\n # Ruling out overlapped detecttions ordering by likelihood value.\n detections = detections.sort('likelihood', ascending=False)\n for row in detections.iterrows():\n # onTime = row[1][0] offTime = row[1][1] deltaOn = row[1][3]\n #import ipdb\n #ipdb.set_trace()\n if ((dis_chunk[(dis_chunk.index >= row[1][0]) &\n (dis_chunk.index < row[1][1])].sum().values[0]) == 0):\n # delta = chunk[chunk.index == onTime][column_name].values[0]\n dis_chunk[(dis_chunk.index >= row[1][0]) & (\n dis_chunk.index < row[1][1])] = row[1][3]\n\n # Stat information:\n print(str(len(events)) + \" events found.\")\n print(str(len(events[events.onpower == True])) + \" onEvents found\")\n print(str(singleOnevent) + \" onEvents no paired.\")\n\n return dis_chunk",
"def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"",
"def rmsilence(sample):\n ns, ne = sample.wordseq[0][0][0], sample.wordseq[-1][0][1]\n return sample.signal[ns:ne]",
"def detect(self, short, long, nstds=1, trigger_len=5000):\n trigger_values = []\n\n long_win_len = int(long / self.interval)\n short_win_len = int(short / self.interval)\n iter_len = 1\n # Convert from ms to number of obs\n trigger_len = int(trigger_len / self.interval)\n debug(\"Window lengths: {}, {} ({}s, {}s)\".format(\n long_win_len, short_win_len, long/1000, short/1000))\n i = long_win_len + short_win_len\n triggered = False\n off_threshold = 0\n triggered_obs = 0\n while i + short_win_len < len(self.trace):\n long_window = self.abs[\n i - long_win_len - short_win_len:\n i - short_win_len\n ]\n long_win_mean = np.mean(long_window)\n long_win_std = np.std(long_window)\n short_win_mean = np.mean(\n self.abs[i - short_win_len:i])\n trigger_values.append((\n i * self.interval,\n short_win_mean,\n long_win_mean,\n long_win_std * nstds\n ))\n if not triggered:\n trigger_val = long_win_mean + long_win_std * nstds\n if short_win_mean > trigger_val:\n off_threshold = long_win_mean\n triggered = True\n triggered_obs = 1\n else: # if triggered\n triggered_obs += iter_len\n if short_win_mean < off_threshold: # trigger over\n triggered = False\n if triggered_obs > trigger_len:\n yield(\n int(i - triggered_obs - (short_win_len / 2)) * self.interval,\n i * self.interval\n )\n triggered_obs = 0\n i += iter_len\n # Get trigger values\n self.trigger_values = pd.DataFrame(\n trigger_values,\n columns=(\"t\", \"sm\", \"lm\", \"trigger\"),\n )",
"def _getWavelet(self, ch='dos1rate', thresh=0.1, maxWidth=1, SIGNIF_LEVEL=0.25):\n # Feed the counts into the wavelet microburst finder\n validDataIdt = np.where(self.d[ch] != -1E31)[0]\n waveletAnalysis.WaveletDetector.__init__(self, self.d[ch][validDataIdt], \n self.d['dateTime'][validDataIdt], 0.1, mother='DOG', siglvl=0.95)\n self.waveletTransform() # Get wavelet space\n self.waveletFilter(self.s0, maxWidth, SIGNIF_LEVEL=SIGNIF_LEVEL) # Do a band pass and significance filter.\n self.degenerateInvWaveletTransform() # Inverse transform filtered data.\n # Indicies where the error-filetered data is greater than thresh\n self.burstIdt = np.where(self.dataFlt > thresh)[0] \n self._getPeaks(ch, validDataIdt) # Find peaks\n return",
"def getSectionsOfNewVideo (silences, duration):\n return [0.0] + silences + [duration]",
"def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return",
"def initial_sampling(y):\n samples = list(np.random.randint(0, len(y), 2))\n while len(np.unique(y[samples] > 0.5)) != 2:\n samples = list(np.random.randint(0, len(y), 2))\n return samples"
] | [
"0.71466213",
"0.6409475",
"0.6371328",
"0.6293573",
"0.6193076",
"0.5912924",
"0.5897721",
"0.5817821",
"0.5812242",
"0.57155514",
"0.5647271",
"0.56060433",
"0.5580342",
"0.5548751",
"0.5509576",
"0.54549193",
"0.5436235",
"0.5432081",
"0.53647876",
"0.53432316",
"0.53431046",
"0.5306017",
"0.5304138",
"0.52976894",
"0.52743936",
"0.5269249",
"0.5215721",
"0.52061164",
"0.5204143",
"0.51961917"
] | 0.67833835 | 1 |
Read a line from self.serial, ignoring all of the debug lines. You must have the self.serialLock before calling this function Returns None if we couldn't read a nondebug line in a reasonable amount of time. | def readNonDebugLine(self):
line = "D"
linesRead = 0
try:
while line == None or line == "" or line[0] == 'D':
linesRead += 1
if linesRead == 100:
return None
try:
line = self.serial.readline()
sys.stdout.write(".")
sys.stdout.flush()
except serial.SerialTimeoutException:
sys.stdout.write("T")
sys.stdout.flush()
#print repr(line)
return line
except KeyboardInterrupt:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readline(self):\n try:\n output = self.ser.readline()\n return output\n except SerialException as se:\n log.debug('Serial connection read error: {}'.format(se))\n return None",
"def get_valid_line(port):\r\n while True:\r\n try:\r\n # note in the case of serial port overflow some characters left in \r\n # it decode will then throw an exception\r\n return port.readline().decode(encoding='ASCII').strip()\r\n \r\n except:\r\n pass # ignore the exception and try again with the next line\r",
"def _get_line(self):\n if len(self.lines) > 0:\n # Get the next line\n return self.lines.pop(0)\n # packets are 8192 bytes in size\n # for packet in self.s3File :\n while self.packet_counter * CsvAbstractReader.BUFFER_SIZE <= self._get_file_size():\n\n success, packet = self._get_next_packet()\n if not success:\n break\n self.packet_counter += 1\n\n # Get the current lines\n current_bytes = self.unprocessed + packet\n self.lines = _split_lines(current_bytes)\n\n # edge case if the packet was filled with newlines only try again\n if len(self.lines) == 0:\n continue\n\n # last line still needs processing save and reuse\n self.unprocessed = self.lines.pop()\n if len(self.lines) > 0:\n # Get the next line\n return self.lines.pop(0)\n self.is_finished = True\n\n if len(self.unprocessed) < 5:\n # Got an extra line from a line break on the last line\n self.extra_line = True\n return self.unprocessed",
"def _readline(self, a_serial, eol=b'\\r\\r\\n'):\n leneol = len(eol)\n line = bytearray()\n while True:\n c = a_serial.read(1)\n if c:\n line += c\n if line[-leneol:] == eol:\n break\n else:\n break\n return bytes(line)",
"def read(self):\n try:\n while self.Serial.in_waiting:\n c = self.Serial.read().decode()\n if c != \"\\n\":\n self.buffer += c\n else:\n return self.parse_telemetry()\n except:\n pass\n\n return None",
"def reader(self):\n try:\n line = ''\n while self.alive:\n data = self.serial.read(1)\n if data == '\\r':\n continue\n\n line += data\n if data == '\\n':\n self.log.print_distant(datetime.now().strftime(\n \"%d/%m/%Y %H:%M:%S> \"))\n if line.startswith('ALARM:'):\n self.log.alert(line)\n elif line.startswith('EVENT:') or line.startswith('INFO'):\n self.log.warn(line)\n else:\n self.log.print_distant(line)\n self.parse(line.strip())\n line = ''\n\n sys.stdout.flush()\n\n except serial.SerialException:\n self.alive = False\n # would be nice if the console reader could be interruptted at this\n # point...\n raise",
"def readline( self ):\n self.readbuf += self.read( 1024 )\n if '\\n' not in self.readbuf:\n return None\n pos = self.readbuf.find( '\\n' )\n line = self.readbuf[ 0 : pos ]\n self.readbuf = self.readbuf[ pos + 1: ]\n return line",
"def readline(self) -> Optional[bytes]:\n ...",
"def _read_line(self, input: TextIO) -> Optional[str]:\n if self.next_line:\n line: str = self.next_line\n self.next_line = None\n return line\n\n while True:\n line = input.readline()\n\n # EOF\n if line == '':\n return None\n\n # remove trailing comments\n i: int = line.find('#')\n if i >= 0:\n line = line[:i]\n\n # strip any trailing whitespaces\n line = line.rstrip()\n\n # skip any blank lines after stripping\n if not line:\n continue\n\n return line",
"def _read_line(self):\n line = ''\n while True:\n c = self.s.read()\n if c == b'':\n raise EvseTimeoutError\n line += c.decode('ascii')\n if c == b'\\r':\n break\n return line",
"def getLine(self):\n\t\tif len(self._completeLines) > 0:\n\t\t\treturn self._completeLines.pop(0)\n\t\telse:\n\t\t\treturn None",
"def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()",
"def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()",
"def readline(self):\n sep = b'\\n'\n seplen = len(sep)\n try:\n line = yield from self.readuntil(sep)\n except IncompleteReadError as e:\n return e.partial\n except LimitOverrunError as e:\n if self._buffer.startswith(sep, e.consumed):\n del self._buffer[:e.consumed + seplen]\n else:\n self._buffer.clear()\n self._maybe_resume_transport()\n raise ValueError(e.args[0])\n return line",
"def _read_line(self):\r\n line = \"\"\r\n while not line[-2:] == \"\\r\\n\":\r\n char = self.sock.recv(1)\r\n if not char:\r\n raise SocketClosedException\r\n line += char\r\n return line.strip()",
"def read_from_serial(self):\n self.running = True\n while self.running:\n data = self.serial.readline().decode()\n if \"DATA\" not in data:\n print(\"ERREUR : {}\".format(data))\n continue\n\n try:\n _, hum, tem = data.split(\" \")\n hum = float(hum)\n tem = float(tem)\n except Exception as e:\n print(\"ERREUR : ligne nulle {}\".format(e))\n else:\n self.data.append([datetime.datetime.now(), hum, tem])",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF",
"def readline(self):\n try:\n return self.queue.get_nowait()\n except Empty:\n return None",
"def readline(self) -> bytes | None:",
"def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None",
"def _readline(self):\n return self.ser.readline().decode(\"ASCII\").strip()",
"def _readline(self):\n\n eol = b'\\r'\n leneol = len(eol)\n line = bytearray()\n while True:\n c = self.ser.read(1)\n if c:\n line += c\n if line[-leneol:] == eol:\n break\n else:\n break\n return bytes(line)",
"def readline( shell ):\n global readbuf\n readbuf += read( shell, 1024 )\n if '\\n' not in readbuf:\n return None\n pos = readbuf.find( '\\n' )\n line = readbuf[ 0: pos ]\n readbuf = readbuf[ pos + 1: ]\n return line",
"def read(self) -> bytes:\n line = self.device.readline()\n if len(line) > 0 and line[-1] == 10:\n line += self.device.readline()\n return line",
"def readline(self) -> Optional[str]:",
"def read_serial_data(serial):\n serial.flushInput()\n \n serial_data = []\n readings_left = True\n timeout_reached = False\n \n while readings_left and not timeout_reached:\n serial_line = serial.readline()\n if serial_line == '':\n timeout_reached = True\n else:\n serial_data.append(serial_line)\n if len(serial_data) == max_num_readings:\n readings_left = False\n \n return serial_data",
"def readline(self) -> str | None:",
"def read_from_serial(self):\n output = b''\n time.sleep(self._sleep_time)\n while self._ser.inWaiting() > 0:\n output = output + self._ser.read(1)\n #A default ten powercycle delay means that some measurements may still be processing\n #by the time the read function is called. This slows down the read but ensures that\n #it will finish (per my testing). There is probably a better way to do this. TODO\n time.sleep(0.06)\n return output.decode('utf-8').strip()",
"def __get_response(serial_port):\n read_data = \"\"\n while not read_data.endswith(\"\\n>> \"):\n ready = select.select([serial_port], [], [], 25)[0]\n if ready:\n read_data += serial_port.read(serial_port.inWaiting()).decode(\n \"utf-8\", \"replace\")\n else:\n raise errors.DeviceError(\n \"Device cambrionix get response failed. \"\n \"Read timeout on serial port: {}\".format(serial_port))\n\n return read_data.splitlines()"
] | [
"0.675768",
"0.64719343",
"0.6133813",
"0.6089158",
"0.59813446",
"0.5949382",
"0.5907831",
"0.5847358",
"0.58420366",
"0.58226496",
"0.5808249",
"0.57025224",
"0.57025224",
"0.5692863",
"0.5597065",
"0.55887336",
"0.55807936",
"0.55561167",
"0.55356073",
"0.5519128",
"0.5516207",
"0.5486602",
"0.5485922",
"0.5450695",
"0.54439384",
"0.5421079",
"0.54155624",
"0.5393563",
"0.53901595",
"0.53439367"
] | 0.80467755 | 0 |
redirects to next question, appends the responses | def handle_answer():
extracted_answer = request.form.get('answers')
responses.append(extracted_answer)
length = len(responses)
return redirect(f"/questions/{length}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")",
"def add_answer():\n\n ans = request.form['answer']\n responses.append(ans)\n\n responses = session['responses']\n responses.append(ans)\n session['responses'] = responses\n \n # redirect...\n q_num = len(responses)\n \n if q_num >= len(satisfaction_survey.questions):\n return redirect(\"/finished\")\n\n else:\n return redirect(f\"/questions/{q_num}\")",
"def handle_answer():\n choice = request.form['answer']\n text = request.form.get(\"text\", \"\")\n\n # add this response to the list in the session\n responses = session[RESPONSES_KEY]\n responses.append({\"choice\": choice, \"text\": text})\n\n # add this response to the session\n session[RESPONSES_KEY] = responses\n survey_code = session[CURRENT_SURVEY_KEY]\n survey = surveys[survey_code]\n\n if (len(responses) == len(survey.questions)):\n # survey is complete\n return redirect(\"/complete\")\n\n else:\n return redirect(f\"/questions/{len(responses)}\")",
"def start_survey():\n session[RESPONSES_KEY] = []\n\n return redirect(\"/questions/0\")",
"def start_survey():\n\n session[RESPONSES_KEY] = []\n\n return redirect(\"/questions/0\")",
"def question_page(q_num): \n if q_num == len(responses) and q_num < len(satisfaction_survey.questions):\n title=satisfaction_survey.title\n question = satisfaction_survey.questions[q_num].question\n choices = satisfaction_survey.questions[q_num].choices\n return render_template(\"question.html\", survey_title=title, question=question, choices=choices, q_num=q_num)\n\n elif q_num != len(responses) and len(responses) < len(satisfaction_survey.questions):\n flash(\"Please answer the questions in order\")\n return redirect(f\"/questions/{len(responses)}\")\n \n elif q_num != len(responses) and len(responses) == len(satisfaction_survey.questions):\n flash(\"If you want to edit your response, please contact us\")\n return redirect(\"/finished\")\n \n else:\n \"\"\" default?\"\"\"\n return render_template(\"thanks.html\")",
"def next_question(self):\n self.user_answers = []\n self.curent_question = choice(self.to_ask)",
"def store_answer():\n #breakpoint()\n answer = request.form['answer']\n response = session[ANSWERS_KEY]\n response.append(answer)\n session[ANSWERS_KEY] = response\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")",
"async def respond(self, ctx, index, *, response):\n try:\n config = self.bot.db['questions'][str(ctx.guild.id)][str(ctx.channel.id)]\n except KeyError:\n return\n if not response:\n await hf.safe_send(ctx, \"You need to type something for your response.\")\n return\n if len(response.split()) == 1:\n try:\n msg = await ctx.channel.fetch_message(int(response))\n await ctx.message.add_reaction('⤴')\n ctx.message = msg\n ctx.author = msg.author\n response = msg.content\n except (discord.NotFound, ValueError):\n pass\n if index not in config['questions']:\n await hf.safe_send(ctx, \"Invalid question index. Make sure you're typing this command in the channel \"\n \"the question was originally made in.\")\n return\n\n try:\n log_channel = ctx.guild.get_channel(config['log_channel'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original log channel can't be found (type `;q setup`)\")\n return\n try:\n log_message = await log_channel.fetch_message(config['questions'][index]['log_message'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original question log message could not be found. Type `;q a <index>` to \"\n \"close the question and clear it.\")\n return\n\n emb: discord.Embed = log_message.embeds[0]\n value_text = f\"\\n[Jump URL]({ctx.message.jump_url})\"\n emb.add_field(name=f\"Response by {ctx.author.name}#{ctx.author.discriminator}\",\n value=value_text.replace('', response[:1024-len(value_text)]))\n await log_message.edit(embed=emb)\n config['questions'][index].setdefault('responses', []).append(ctx.message.jump_url)\n await self._delete_log(ctx)\n await self._post_log(ctx)\n await ctx.message.add_reaction('✅')",
"def question_page():\n question_start_time = session.get('start_time')\n question_last_answer = session.get('last_answer')\n counter = session.get('counter')\n if not counter:\n session['counter'] = 0\n if session['counter'] < 5:\n if session['counter'] > 0:\n check_answer(question_start_time, question_last_answer, counter)\n session['counter'] += 1\n question = _get_question()\n session['start_time'] = time()\n session['last_answer'] = question.pop()\n answers = {}\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n for k, a in enumerate(question):\n if k > 0:\n answers[letters[k-1]] = a\n # answers = zip(letters, question[1:-1]\n return render_template('question.html', question=question[0], answers=answers, counter=session['counter']),\n else:\n check_answer(question_start_time, question_last_answer, counter)\n return redirect(url_for('result_page'))",
"def api_next_question(request):\n # if request.method == 'GET':\n try:\n request_id: int = int(request.GET[\"reqid\"])\n reponse_id: int = int(request.GET['repid'])\n\n if reponse_id is -1:\n # Convention pour la requete\n return JsonResponse({\n # Temporary cheat, always first question\n 'question_id': 1\n })\n\n id_only: bool = bool(int(request.GET.get('id_only', '0')))\n o_reponse = Reponse.objects.get(repid=reponse_id)\n o_request = Requete.objects.get(reqid=request_id)\n method = getattr(met, f'question{o_reponse.question_id}')\n next_question_id = method(requete=o_request, reponse=o_reponse)\n\n if not next_question_id:\n return JsonResponse({})\n\n if next_question_id == -1:\n return JsonResponse({\n 'question_id': next_question_id\n })\n\n o_question = Question.objects.get(qid=next_question_id)\n\n if id_only:\n return JsonResponse({\n 'question_id': o_question.qid\n })\n\n serializer = QuestionSerializer(o_question)\n return JsonResponse(serializer.data)\n except AttributeError:\n raise NotImplementedError('')\n pass",
"def load_next_response(self):\n if self.response_info['next']:\n self.response = requests.get(self.response_info['next'])\n self.response_info = self.response.json()",
"def complete_questionnaire(self):\n while True:\n data = {\n \"question\": \"questionnaire\",\n \"number\": 1,\n \"response\": json.dumps(self.question_responses),\n }\n url = \"{host}/question/{self.participant_id}\".format(\n host=self.host, self=self\n )\n try:\n result = requests.post(url, data=data)\n result.raise_for_status()\n except RequestException:\n self.stochastic_sleep()\n continue\n return True",
"def action_next(self, cr, uid, ids, context=None):\n survey_obj = self.pool.get('survey')\n search_obj = self.pool.get('ir.ui.view')\n if context is None: context = {}\n\n this = self.browse(cr, uid, ids, context=context)[0]\n survey_id = this.survey_id.id\n context.update({'survey_id': survey_id, 'sur_name_id': this.id})\n cr.execute('select count(id) from survey_history where user_id=%s\\\n and survey_id=%s' % (uid,survey_id))\n\n res = cr.fetchone()[0]\n sur_rec = survey_obj.browse(cr,uid,survey_id,context=context)\n if sur_rec.response_user and res >= sur_rec.response_user:\n raise osv.except_osv(_('Warning!'),_(\"You cannot give response for this survey more than %s times.\") % (sur_rec.response_user))\n\n if sur_rec.max_response_limit and sur_rec.max_response_limit <= sur_rec.tot_start_survey:\n raise osv.except_osv(_('Warning!'),_(\"You cannot give more responses. Please contact the author of this survey for further assistance.\"))\n\n search_id = search_obj.search(cr,uid,[('model','=','survey.question.wiz'),('name','=','Survey Search')])\n return {\n 'view_type': 'form',\n \"view_mode\": 'form',\n 'res_model': 'survey.question.wiz',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'search_view_id': search_id[0],\n 'context': context\n }",
"def redirectSubmit():\n postRequest = request.json or request.form or request.args\n print postRequest\n\n rawText = str(postRequest.items()[0][1])\n collist = key_words_filter(rawText)\n if len(collist) != 0:\n dna.db.fileter_cato(collist,0)\n if dna.currentquestion.qid == -1:\n print \"error got\"\n SESSION_INFO.result = dna.currentList\n q = Question()\n q.qid = \"-1\"\n SESSION_INFO.question = q\n SESSION_INFO.answerlist = dna.answerList\n\n\n\n\n return render_template('question.html', session_info=json.dumps(SESSION_INFO.toJson()))",
"def test_add_questions_view(self):\n target_url = url_for('quiz.questions')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)",
"def next_question(update: Update, context: CallbackContext) -> None:\n update.callback_query.answer()\n if not isinstance(context.chat_data, dict):\n raise AssertionError\n if context.chat_data['question_number'] < (\n len(context.chat_data['qlist']) - 1):\n context.chat_data['question_number'] += 1\n context.chat_data['question_attempted_by'] = []\n msg_text, option_keyboard = Quiz.parse_question(\n context.chat_data['qlist'][\n context.chat_data['question_number']])\n option_keyboard.append([\n InlineKeyboardButton(\"Next (Admin Only)\", callback_data=\"next\")\n ])\n context.chat_data['message'] = context.bot.edit_message_text(\n text=msg_text,\n chat_id=context.chat_data['message'].chat.id,\n message_id=context.chat_data['message'].message_id,\n reply_markup=InlineKeyboardMarkup(option_keyboard),\n parse_mode=ParseMode.MARKDOWN)\n else:\n Quiz.send_scoreboard(context=context)",
"def show_question(q_id):\n responses = session[ANSWERS_KEY]\n if len(responses) < len(survey.questions):\n current_question = survey.questions[len(responses)]\n return render_template(\"question.html\", \n question = current_question)\n else:\n return redirect(\"/completion\")",
"def redirect_to_next(self, request):\n\n if 'next' in request.GET:\n next_page = request.GET['next']\n return HttpResponseRedirect(next_page)\n else:\n return redirect('index')",
"def next_question(self):\n # Grab the old question\n wid = self.lay.itemAt(1)\n if wid is not None and isinstance(wid.widget(), QuickQuestion) :\n # Save off the answer\n idx = self.q_order[self.cur_q_idx]\n # self.answers.append(wid.widget().get_value())\n self.answers[idx] = wid.widget().get_value()\n # print(self.answers[-1])\n\n # Delete the old question\n wid.widget().deleteLater()\n self.lay.removeItem(wid)\n\n\n # Update the index and progressbar\n self.cur_q_idx += 1\n self.pbar.setValue(100 * self.cur_q_idx / self.pbar_end)\n\n\n # Check if you've reached the last question\n if self.cur_q_idx >= self.pbar_end:\n self.show_results()\n # self.populate_img()\n return\n\n\n # populate the next question\n idx = self.q_order[self.cur_q_idx]\n q = self.questions[idx]\n wid = QuickQuestion(text=\"\\n\"+q[\"text\"]+\"\\n\", answers=q[\"answers\"])\n wid.procDone.connect(self.next_question)\n\n self.lay.insertWidget(1, wid)",
"def answer_question(self, ques, ans):\n res = Response(Answer(ans, len(self.response_list)), ques, self)\n self.response_list.append(res)\n return res",
"def biostar_question_redirect( self, trans, payload={} ):\n return self.biostar_redirect( trans, payload=payload, biostar_action='new' )",
"def game_post():\n game = Game.current_game\n question = game.current_question\n \n answer_keys = request.form[\"answer_keys\"].split('++')\n print(answer_keys)\n question['user_response'] = answer_keys\n \n return redirect(url_for(\"show_answer\"))",
"def process_question_answer_page(self, response):\n self.results_page_count += 1\n self.classification_file.write(\"results, {}\\n\".format(response.url))\n logging.info('results: {}'.format(response.url))\n print(\"results: {}\".format(response.url))\n\n # Filters\n if not self.page_contains_answers(response):\n return []\n\n # Process posts\n question_answer_list = []\n question_answer = QuestionAnswer()\n question_answer = self.fill_question(response, question_answer)\n # cycle through answers and build Q/A pairs\n answers = response.xpath(self.gt.css_to_xpath('.answercell .post-text')).extract()\n for answer_number in range(len(answers)):\n question_answer_copy = question_answer.copy()\n question_answer_copy = self.fill_answer(response, question_answer_copy, answer_number)\n question_answer_list.append(question_answer_copy)\n return question_answer_list",
"def check_responses(self, button):\n with self.out:\n clear_output()\n\n for i, question in enumerate(self.questions):\n self.create_feedback(i+1, question.correct())",
"def post_answer(request):\n if request.method == 'GET':\n response_data = list(range(10))\n # geodata = response.json()\n return Response(\n data=response_data\n )\n # snippets = Snippet.objects.all()\n # serializer = SnippetSerializer(snippets, many=True)\n # return Response(serializer.data)\n\n elif request.method == 'POST':\n data = request.data\n print(type(data))\n userResponse.append(data)\n if int(data['questionCode']) in userQuestions:\n user_question = userQuestions[int(data['questionCode'])]\n print(user_question)\n\n # get response and movie list\n updatedMovieList = imdb.get_imdb_movies()\n robotMessage = assistant.ask_assistant(user_question)\n responseData = {\"nextQuestionString\": robotMessage,\"nextQuestionCode\": int(data['questionCode'])+1,\"updatedMovieList\" : updatedMovieList}\n return Response(\n data=responseData\n )",
"def next_question(self):\n while True:\n current_question = self.question_list[self.question_number]\n user_answer = input(f\"Q.{self.question_number + 1}: {current_question.text} (True\"\n f\"/False)?: \")\n if user_answer.lower() in ['true', 'false']:\n break\n else:\n print(\"Incorrect choice. Please retry\")\n self.question_number += 1\n self.check_answer(user_answer, current_question.answer)",
"def next_question(self):\n current_question = self.question_list[self.question_number]\n number = self.question_number + 1\n\n user_answer = input(\"Q.\" + str(number)+ \": \" + current_question.text + \"(True/False : ?)\")\n self.question_number += 1\n\n self.check_answer(user_answer , current_question.answer)",
"def next(self):\n question = self._get_item(self._current_page['questions'], 'position', self._current_question['position'] + 1)\n if question is None:\n page = self._get_item(self._current_survey['pages'], 'position', self._current_page['position'] + 1)\n if page is None: # no more question\n raise StopIteration()\n\n self._current_page = page\n question = self._get_item(self._current_page['questions'], 'position', 1)\n\n self._current_question = self._format_question(question)",
"def gonext():\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)"
] | [
"0.75370693",
"0.7309631",
"0.69628924",
"0.6704508",
"0.6669402",
"0.6529099",
"0.65054333",
"0.6428117",
"0.6416601",
"0.63072085",
"0.62077636",
"0.6194221",
"0.61758107",
"0.6150198",
"0.61199987",
"0.60824966",
"0.6030395",
"0.59834605",
"0.58911955",
"0.58771056",
"0.5837625",
"0.58370095",
"0.5835953",
"0.58180887",
"0.5804654",
"0.5771668",
"0.5753857",
"0.575078",
"0.57425034",
"0.57078576"
] | 0.74533087 | 1 |
after survery is done landing on a thank page | def thank_you():
return redirect('/thankyou') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def thank_you(request):\n return render(request, 'core/thank_you.html')",
"def thanks(request):\n if 'contacted' in request.session:\n return render(request, 'thanks.html')\n else:\n return redirect('contact_us')",
"def finish_survey():\n return render_template(\"thanks.html\")",
"def say_thanks():\n survey_id = session[CURRENT_SURVEY_KEY]\n survey = surveys[survey_id]\n responses = session[RESPONSES_KEY]\n\n html = render_template(\"thanks.html\", survey=survey, responses=responses)\n\n # Set cookie noting this survey is done so they can't retake it\n response = make_response(html)\n response.set_cookie(f\"completed_{survey_id}\", \"yes\", max_age=60)\n return response",
"def add_thankyou():\n\n final_thanks = \"Thank you for taking our survey\"\n\n return render_template(\n \"thankyou.html\",\n thanks=final_thanks\n )",
"def thanks(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/Thanks.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':\"You are now signed up!\",\n 'year': datetime.now().year,\n }\n )",
"def thank_you():\n donor = get_donor()\n if donor != 'q':\n donation = get_donation()\n add_donation(donor, donation, DONORS)\n display_email(donor, donation)\n handle_user_input(get_user_input())",
"def account_activation_sent(request):\r\n\treturn render(request, 'account_activation_sent.html')",
"def thanks(request):\n return render(request, 'SNP_Feature_View/thanks.html')",
"def waiting_confirmation(self):",
"def registration_done(request):\n dc_settings = request.dc.settings\n dc1_settings = DefaultDc().settings\n text_blocks = [\n _('Thank you for registering at %s.') % dc_settings.SITE_NAME,\n _('You should receive an email shortly. Please click on the link in the email to activate your account.'),\n _('If you don\\'t receive an email, please check your spam folder.'),\n ]\n\n if dc1_settings.SMS_REGISTRATION_ENABLED:\n text_blocks.append(_('Once your account is active, you will receive a text message (SMS) with your password.'))\n\n return render(request, 'gui/note.html', {\n 'header': _('Registration almost complete!'),\n 'blocks': text_blocks,\n })",
"def say_thanks():\n return render_template(\"completion.html\")",
"def awaiting_payment(self):",
"def get_finished_url(self):\n return reverse('thank_you_for_your_order')",
"def thanks():\n return render_template('submitted.html')",
"def i_check_that_the_form_has_been_subimtted():\n driver.find_element_by_id(\"submit_message\").click()\n assert \"Contact Confirmation\" in driver.title",
"def send_thank_you(fullname):\n print(f'Thank you {fullname} for your generous donation!')",
"def account_activation_sent(request):\n current_user = request.user\n if current_user.is_authenticated():\n return HttpResponseRedirect('/')\n return render(request, 'registration/activation_complete.html')",
"def thank_you(request):\n purchase = request.session.get('purchase')\n purchase_items = []\n for id in purchase:\n product = get_object_or_404(Product, pk=id)\n purchase_items.append(product)\n\n request.session['purchase'] = {}\n\n context = {'purchase_items': purchase_items}\n return render(request, \"thank_you.html\", context)",
"def serve(self, request, *args, **kwargs):\n\n template = self.get_template(request)\n\n if request.method == 'POST':\n\n form = self.get_form(request.POST, page=self, user=request.user)\n\n if form.is_valid():\n self.process_form_submission(form)\n return HttpResponseRedirect(self.url + '?thank=you')\n\n else:\n\n thanks = request.GET.get('thank', False)\n if thanks:\n form = None\n template = self.get_landing_page_template(request)\n if self.thanks_page_title:\n self.title = self.thanks_page_title\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context['form'] = form\n if form:\n context['conditional_rules'] = json.dumps(form.conditional_rules)\n\n return render(\n request,\n template,\n context\n )",
"def thanks(request):\n # Let's grab that temporary authorization code Slack's sent us from\n # the request's parameters.\n code_arg = request.GET['code']\n # The bot's auth method to handles exchanging the code for an OAuth token\n pyBot.auth(code_arg)\n return render(request, \"thanks.html\")",
"def requestSubmitted(request):",
"def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True",
"def you_win(request):\n # filter originals by paid is false and auction is closed\n now = datetime.now()\n auction_items_to_be_paid = Original.objects.exclude(end_date_time__lte=now, paid=\"True\")\n if not auction_items_to_be_paid:\n redirect(reverse(originals)) \n # if it is empty, then redirect to auctions.html\n\n return render(request, 'youwin.html', { 'auction_items_to_be_paid': auction_items_to_be_paid })",
"def credit_deliverer():\n return True",
"def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})",
"def send_single_thank_you():\n update_lists()\n donor_name = get_name_input()\n\n if donor_name == \"quit\":\n print(\"No donor name entered, exiting to menu\")\n else:\n donor_amount = check_number_input()\n\n if donor_name not in donor_totals_list:\n firstname, lastname = donor_name.split(\" \")\n add_donor(firstname, lastname, donor_name)\n add_donation(donor_name, donor_amount)\n else:\n for donor in donor_totals_list:\n if donor.fullname == donor_name:\n add_donation(donor_name, donor_amount)\n print('\\nDear {},'.format(donor_name))\n print('''\\tThank you for your generous donation of ${:,.2f}\\n\n Sincerely, \\nThe ChickTech Donations Department\\n'''.format(\n donor_amount))\n update_lists()",
"def proceed(self):\n pass",
"def test_first_page_passes(self):\n\n self.page.open_site(PageLocators.PREVIOUS_LINK)\n self.page.fill_all_fields()\n self.page.send_the_data()",
"def test_submit_reason_success(self):\n data = {'other_reason': 'I dont like the color.'}\n response = self.uriel.post('/submit_reason', data, follow=True)\n self.assertRedirects(response, '/', 302)"
] | [
"0.6788264",
"0.6595573",
"0.6461199",
"0.6460274",
"0.6422279",
"0.63809615",
"0.6344948",
"0.622954",
"0.6163851",
"0.6097609",
"0.60253763",
"0.59629446",
"0.59608006",
"0.5955534",
"0.5943139",
"0.5873326",
"0.57648176",
"0.57403934",
"0.5713635",
"0.5680469",
"0.56595933",
"0.5610204",
"0.5600539",
"0.55973065",
"0.5525217",
"0.5495591",
"0.5474843",
"0.545982",
"0.5447106",
"0.5440474"
] | 0.66740227 | 1 |
renders thank you html to page | def add_thankyou():
final_thanks = "Thank you for taking our survey"
return render_template(
"thankyou.html",
thanks=final_thanks
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def thank_you(request):\n return render(request, 'core/thank_you.html')",
"def thanks():\n return render_template('submitted.html')",
"def thanks(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/Thanks.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':\"You are now signed up!\",\n 'year': datetime.now().year,\n }\n )",
"def thanks(request):\n return render(request, 'SNP_Feature_View/thanks.html')",
"def say_thanks():\n return render_template(\"completion.html\")",
"def finish_survey():\n return render_template(\"thanks.html\")",
"def credits():\n return render_template('credits.html')",
"def feedback():\n return render_template(\"feedback.html\")",
"def hello_hbnb():\n return render_template('contact_us.html')",
"def say_thanks():\n survey_id = session[CURRENT_SURVEY_KEY]\n survey = surveys[survey_id]\n responses = session[RESPONSES_KEY]\n\n html = render_template(\"thanks.html\", survey=survey, responses=responses)\n\n # Set cookie noting this survey is done so they can't retake it\n response = make_response(html)\n response.set_cookie(f\"completed_{survey_id}\", \"yes\", max_age=60)\n return response",
"def thank_you():\n\n return redirect('/thankyou')",
"def thanks(request):\n # Let's grab that temporary authorization code Slack's sent us from\n # the request's parameters.\n code_arg = request.GET['code']\n # The bot's auth method to handles exchanging the code for an OAuth token\n pyBot.auth(code_arg)\n return render(request, \"thanks.html\")",
"def contact_us():\n return render_template('home/contact-us.html')",
"def account_activation_sent(request):\r\n\treturn render(request, 'account_activation_sent.html')",
"def send_thank_you(fullname):\n print(f'Thank you {fullname} for your generous donation!')",
"def serve(self, request, *args, **kwargs):\n\n template = self.get_template(request)\n\n if request.method == 'POST':\n\n form = self.get_form(request.POST, page=self, user=request.user)\n\n if form.is_valid():\n self.process_form_submission(form)\n return HttpResponseRedirect(self.url + '?thank=you')\n\n else:\n\n thanks = request.GET.get('thank', False)\n if thanks:\n form = None\n template = self.get_landing_page_template(request)\n if self.thanks_page_title:\n self.title = self.thanks_page_title\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context['form'] = form\n if form:\n context['conditional_rules'] = json.dumps(form.conditional_rules)\n\n return render(\n request,\n template,\n context\n )",
"def thanks(request):\n if 'contacted' in request.session:\n return render(request, 'thanks.html')\n else:\n return redirect('contact_us')",
"def success_view(request):\n return render(request, 'contact/contact_success.html')",
"def contact():\n return render_template(\n 'contact.html',\n nav=nav,\n title='Contact me',\n year=datetime.now().year,\n message='The following are ways to contact me'\n )",
"def createThankYouEmail(self):\n result = (\"\\nDear {:s},\\n\\n\"\n \"\\tThank you so much for your generous donation of ${:,.2f}!\\n\\n\"\n \"\\tIt will be put to very good use.\\n\\n\"\n \"\\t\\tSincerely,\\n\\t\\t\\t- The Team\".format(self.name, self.getTotDonation())\n )\n return result",
"def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='My contact page.'\n )",
"def contact():\r\n return render_template(\r\n 'contact.html',\r\n title='Contact',\r\n year=datetime.now().year,\r\n message='Your contact page.'\r\n )",
"def contact():\r\n return render_template(\r\n 'contact.html',\r\n title='Contact',\r\n year=datetime.now().year,\r\n message='Your contact page.'\r\n )",
"def contact():\n return render_template('contact.html')",
"def contact():\n return render_template('contact.html',\n title='联系',\n year=datetime.now().year,\n message='如果需要联系我')",
"def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n img_tichonet = '/static/pics/tichonet.png',\n message='Your contact page.'\n )",
"def thank_you_message(self, name, donation_amount):\n\t\tthank_you_message = \"\\nThank you {0:s} for you generous donation of ${1:.2f}.\\n\".format(name, round(donation_amount,2))\n\t\treturn thank_you_message",
"def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=\"2020\",\n message='Your contact page.'\n )",
"def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )",
"def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )"
] | [
"0.8062041",
"0.77734506",
"0.75290006",
"0.74883515",
"0.7457509",
"0.6948474",
"0.6882326",
"0.6873065",
"0.6788622",
"0.6718006",
"0.66567886",
"0.659958",
"0.6525142",
"0.64785516",
"0.6360265",
"0.62707627",
"0.62213707",
"0.6219066",
"0.61733526",
"0.6143223",
"0.61296624",
"0.6107885",
"0.6107885",
"0.6099836",
"0.60991526",
"0.60773313",
"0.60618603",
"0.60526",
"0.60325193",
"0.60325193"
] | 0.79702383 | 1 |
Computes sacreBLEU score for current submission. | def _compute_score(self):
sgml_path = str(self.sgml_file.name)
text_path = sgml_path.replace('.sgm', '.txt')
ref_path = 'testsets/wmt18.ende.ref.txt'
from sacrebleu import process_to_text, corpus_bleu
from pathlib import Path
if not Path(text_path).exists():
process_to_text(sgml_path, text_path)
hyp_stream = [x for x in open(text_path, encoding='utf-8')]
ref_stream = [r for r in open(ref_path, encoding='utf-8')]
bleu = corpus_bleu(hyp_stream, [ref_stream])
self.score = bleu.score
self.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_stability_scores(self):\n self.mutations, self.scores, self.matrix = stability(\n self.seq,\n alphabet='ACGU',\n fold_vectorize=self.fold_vectorize)",
"def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget",
"def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc",
"def getSubmissionScore(submission):\r\n return submission.score",
"def scoring(self):\n pass",
"def score_solution(g, s):\n pass",
"def evaluate(self):\n self.matrix = pair_matrix(self)\n score = 0\n for x in range(len(self.seq)):\n for y in range(x, len(self.seq)):\n if self.matrix[x, y] == 1:\n if abs(x - y) < 5:\n score -= 7\n if self.seq[x] == complementary(self.seq[y]):\n score += 2\n elif self.seq[x] == 'U' and self.seq[y] == 'G' or self.seq[x] == 'G' and self.seq[y] == 'U':\n score += 1\n else:\n score -= 5\n return score",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def get_score(self, solution: np.array) -> float:\n pass",
"def update_score():\n pass",
"def score(self):",
"def _scoring(self):\n val = 0 #score will be totaled here\n\n for c in range(0, self.width): #for every column in the board\n for r in range(0, self.height): #for every row of a column\n #see if we can move...\n possible_up = (r + 3 < self.height) #up?\n possible_left = (c - 3 > 0) #left?\n possible_right = (c + 3 < self.width) #right?\n\n #diagonally up, left\n if possible_up and possible_left:\n val+= self._up_left(c, r)\n\n #up\n if possible_up:\n val+= self._up(c,r)\n\n #diagonally up, right\n if possible_up and possible_right:\n val+= self._up_right(c,r)\n\n #right\n if possible_right:\n val+= self._right(c,r)\n\n\n return val",
"def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)",
"def score(self):\n result = 0\n\n idx = self.cups.index(1)\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n # ok, keep adding things until we get back to 1\n while 1 != self.cups[idx]:\n # add this value..\n result *= 10\n result += self.cups[idx]\n # and on to the next one..\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n\n return result",
"def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product",
"def _compute_score(img_binary: np.ndarray, s: float) -> float:\n img_sheared = _shear_img(img_binary, s, 0)\n h = img_sheared.shape[0]\n\n img_sheared_mask = img_sheared > 0\n first_fg_px = np.argmax(img_sheared_mask, axis=0)\n last_fg_px = h - np.argmax(img_sheared_mask[::-1], axis=0)\n num_fg_px = np.sum(img_sheared_mask, axis=0)\n\n dist_fg_px = last_fg_px - first_fg_px\n col_mask = np.bitwise_and(num_fg_px > 0, dist_fg_px == num_fg_px)\n masked_dist_fg_px = dist_fg_px[col_mask]\n\n score = sum(masked_dist_fg_px ** 2)\n return score",
"def scoring(self):\n return -100 if self.loss_condition() else 0",
"def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result",
"def calculScore(affectation, carac, dicoCaracServeur):\n \n # Initialisation du score minimal\n score = capaciteGarantie(affectation, 0, carac, dicoCaracServeur)\n \n # rq : l'initialisation etant faite, on part de 1 et non de zero\n for p in range(1, carac[\"P\"]):\n tmpScore = capaciteGarantie(affectation, p, carac, dicoCaracServeur)\n if tmpScore < score:\n score = tmpScore\n \n return score",
"def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)",
"def score_sc1(self, prediction_file):\n fh = TempFile()\n gs1, _ = self.download_gs()\n script = self.classpath + os.sep + \"DREAM_Olfaction_scoring_Q1.pl\"\n cmd = \"perl %s %s %s %s\"\n cmd = cmd % (script, prediction_file, fh.name, gs1)\n shellcmd(cmd)\n df = pd.read_csv(fh.name, sep='\\t', index_col=None).ix[0]\n fh.delete()\n return df\n\n\n # score sub1 = (zint +zple +zdec)/3\n # sigma_int = 0.0787\n # sigma_ple = 0.176\n # signa_dec = 0.0042\n\n # final is average of zscores",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def score(self) -> int:\n return self.__state.score()",
"def workout_score(a, scal):\r\n # Check if score is empty\r\n if check_if_empty(a):\r\n return np.nan\r\n if scal == 0:\r\n # Some people sign up for Rx then enter scaled scores...\r\n if check_if_scaled(a):\r\n return np.nan\r\n if a.split(\" \")[-1] == 's':\r\n return extract_score(a.replace(\" - s\",\"\"))\r\n return extract_score(a)",
"def score(entry):\n score = time_seconds(entry['Swim'])\n score += time_seconds(entry['Run'])\n score -= (int(entry['Push-ups']) * 2)\n score -= int(entry['Sit-ups'])\n score -= int(entry['Pull-ups']) * 6\n return score",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def updateScore(score):\n return score + 1",
"def f1_score(self):",
"def get_score(self) -> int:\n return self.rstate.score()",
"def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'"
] | [
"0.6732517",
"0.65753704",
"0.6566397",
"0.64567846",
"0.6423692",
"0.6418522",
"0.6341895",
"0.6328618",
"0.6322662",
"0.62882644",
"0.6285559",
"0.62766033",
"0.6234183",
"0.6184874",
"0.61445343",
"0.6140803",
"0.6136644",
"0.6134206",
"0.6118318",
"0.61123616",
"0.6053035",
"0.60519034",
"0.6040781",
"0.60332966",
"0.59941685",
"0.59641045",
"0.5951034",
"0.59363514",
"0.5928451",
"0.5920002"
] | 0.66659355 | 1 |
Create a sparse representention of ``sequences``. | def sparse_tuple_from(sequences):
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=np.int32)
shape = np.asarray([len(sequences), indices.max(0)[1] + 1], dtype=np.int64)
# return tf.SparseTensor(indices=indices, values=values, shape=shape)
return indices, values, shape | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sparse_tuple_from(sequences, dtype=np.int32):\n indices = []\n values = []\n\n for n, seq in enumerate(sequences):\n indices.extend(zip([n]*len(seq), range(len(seq))))\n values.extend(seq)\n\n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1]+1], dtype=np.int64)\n\n return (indices, values, shape)",
"def sparse_tuple_from(sequences, dtype=np.int32):\n indices = []\n values = []\n\n for n, seq in enumerate(sequences):\n indices.extend(zip([n]*len(seq), range(len(seq))))\n values.extend(seq)\n\n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1]+1], dtype=np.int64)\n\n return indices, values, shape",
"def sparse_tuple_from_label(self, sequences, dtype=np.int32):\n indices = []\n values = []\n \n for n, seq in enumerate(sequences):\n indices.extend(zip([n] * len(seq), range(len(seq))))\n values.extend(seq)\n \n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)\n \n return indices, values, shape",
"def buildTrainingSequences(voc, maxlen=50, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n next_syms = []\n \n syms = set(text) # unique symbols (chars or words)\n \n for i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_syms.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\n \n X = np.zeros((len(sentences), maxlen), dtype=np.int)\n y = np.zeros((len(sentences), len(syms)), dtype=np.bool)\n\n for i, sentence in enumerate(sentences):\n for j, sym in enumerate(sentence):\n X[i,j] = sym_indices[sym] \n \n y[i, sym_indices[next_syms[i]]] = 1 # one-hot enconding\n\n return (X,y)",
"def sparse_tuple_form(sequences, dtype=np.int32):\n indices = []\n values = []\n\n for n, seq in enumerate(sequences):\n indices.extend(zip([n] * len(seq), range(len(seq))))\n values.extend(seq)\n\n indices = np.asarray(indices, dtype=np.int32)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int32)\n\n return indices, values, shape",
"def sparse_tuple_from_label(sequences, dtype=np.int32):\n indices = []\n values = []\n\n for n, seq in enumerate(sequences):\n indices.extend(zip([n] * len(seq), range(len(seq))))\n values.extend(seq)\n\n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)\n\n return indices, values, shape",
"def sparse_tuple_from_label(sequences, dtype=np.int32):\n indices = []\n values = []\n\n for n, seq in enumerate(sequences):\n indices.extend(zip([n] * len(seq), range(len(seq))))\n values.extend(seq)\n\n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)\n\n return indices, values, shape",
"def vectorize_sequences(sequences, dimension=word_dict_size):\n results = np.zeros((len(sequences), dimension))\n for ii, sequence in enumerate(sequences):\n results[ii, sequence] = 1\n return results",
"def create_one_hot(sequence):\n\n transformed = []\n for x in sequence:\n transformed.append(cs.DNA[x])\n return np.array(transformed)",
"def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)",
"def seq2Vec(sequences):\r\n global dict_words_n_vectors\r\n for sent in sequences:\r\n for i in range(len(sent)):\r\n if sent[i] in dict_words_n_vectors:\r\n sent[i] = dict_words_n_vectors[sent[i]]\r\n else:\r\n sent[i] = np.zeros(300)\r\n return np.array(sequences, dtype=\"float32\")",
"def build_seq_embeddings(self, input_seqs):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.embedding_size],\n initializer=self.initializer)\n seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)\n\n return seq_embeddings",
"def sequence_to_onehot(seq):\n seq_one_hot = []\n for i in range(MAX_SEQ_LENGTH):\n if i < len(seq):\n if seq[i] == 'A':\n seq_one_hot.append([1,0,0,0])\n elif seq[i] == 'T':\n seq_one_hot.append([0,1,0,0])\n elif seq[i] == 'G':\n seq_one_hot.append([0,0,1,0])\n elif seq[i] == 'C':\n seq_one_hot.append([0,0,0,1])\n else:\n seq_one_hot.append([0,0,0,0])\n else:\n seq_one_hot.append([0,0,0,0])\n seq_one_hot = np.array(seq_one_hot).reshape(MAX_SEQ_LENGTH,4)\n return seq_one_hot",
"def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)",
"def __init__(self, sequences, config=Config()):\n self.sequences = sequences\n self.config = config\n self.matrix = self.initializeMatrix(sequences)\n self.enterelement([0]*len(self.sequences), Score([0]*len(self.sequences), 0)) # Set the origin of the matrix to 0\n self.counter = [1] + ([0] * (len(sequences)-1)) # Creates a counter which is used to transverse a matrix of arbitrary size",
"def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())",
"def hot1_dna(seqs_1hot):\n\n singleton = False\n if seqs_1hot.ndim == 2:\n singleton = True\n seqs_1hot = np.expand_dims(seqs_1hot, 0)\n\n seqs = []\n for si in range(seqs_1hot.shape[0]):\n seq_list = ['A'] * seqs_1hot.shape[1]\n for li in range(seqs_1hot.shape[1]):\n if seqs_1hot[si, li, 0] == 1:\n seq_list[li] = 'A'\n elif seqs_1hot[si, li, 1] == 1:\n seq_list[li] = 'C'\n elif seqs_1hot[si, li, 2] == 1:\n seq_list[li] = 'G'\n elif seqs_1hot[si, li, 3] == 1:\n seq_list[li] = 'T'\n else:\n seq_list[li] = 'N'\n\n seqs.append(''.join(seq_list))\n\n if singleton:\n seqs = seqs[0]\n\n return seqs",
"def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings",
"def convert_to_one_hot(sequences, sequence_length, chars_length, char_to_index, labels):\n x = np.zeros((len(sequences), sequence_length, chars_length), dtype=np.bool)\n y = np.zeros((len(sequences), chars_length), dtype=np.bool)\n for i, sentence in enumerate(sequences):\n for t, char in enumerate(sentence):\n x[i, t, char_to_index[char]] = 1\n y[i, char_to_index[labels[i]]] = 1\n\n return x, y",
"def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'SparseTensor':\n return SparseTensor(indices, values, shape)",
"def to_input_variable(sequences, vocab, tgt_len=-1, training=True, append_boundary_sym=False,\n batch_first=False, shuffle=False):\n from .tensor_ops import get_long_tensor\n if not isinstance(sequences[0], list):\n sequences = [sequences]\n if append_boundary_sym:\n sequences = [['<s>'] + seq + ['</s>'] for seq in sequences]\n\n pad_sents = padding_input(sequences, tgt_len=tgt_len)\n seqs = word2id(pad_sents, vocab)\n\n if not training:\n with torch.no_grad():\n seqs_var = Variable(get_long_tensor(seqs), requires_grad=False)\n else:\n seqs_var = Variable(get_long_tensor(seqs), requires_grad=False)\n if not batch_first:\n seqs_var = seqs_var.transpose(1, 0).contiguous()\n shuffle_dim = -1\n else:\n shuffle_dim = 0\n\n if shuffle:\n from .tensor_ops import shuffle_2d\n return shuffle_2d(inputs=seqs_var, dim=shuffle_dim)\n\n return seqs_var",
"def _build(self, sequences, verbose=False):\n if verbose:\n print(f\"Creating {type(self).__name__}...\")\n\n # Type\n self._sequence_type = type(sequences[0])\n \n # Check if hashable\n if not isinstance(sequences[0], Hashable):\n sequences = [tuple(val) for val in sequences]\n\n # Make sure sequences are list-type and sorted\n sequences = sorted(list(set(sequences)))\n\n # Insert all sequences\n i = 0\n start = time()\n for sequence in sequences:\n i += 1\n self._insert(sequence)\n\n self._finish()\n if verbose:\n print(\n f\"Creation of {type(self).__name__} with {self._sequence_count} sequences \"\n f\"took {time() - start:.3f}s.\\n\")",
"def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')",
"def prepare_labels(state_mapping, sequences):\n encoded_labels = [[state_mapping[state] for state in label] for label in sequences]\n \n depth = len(state_mapping)\n one_hot_labels = [[one_hot_encode(label, depth) for label in sequence] for sequence in encoded_labels]\n one_hot_labels = [np.asarray(ls) for ls in one_hot_labels]\n return one_hot_labels",
"def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence",
"def generate_sparse(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = 1\n return x",
"def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)",
"def encode_sequence(seq, mapping=None):\n if not mapping:\n mapping = GetNucleotideMap()\n\n n = len(seq)\n embedding = np.zeros(n\n , dtype=int)\n for i in range(n):\n if seq[i] in mapping:\n embedding[i] = mapping[seq[i]]\n else:\n raise Exception('%s is not a valid nucleotide!' % i)\n\n return embedding",
"def list_to_sparse(inputs):\n\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(\n *[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n\n s = coo_matrix((data, (row, col)), shape=(\n len(inputs), np.max([len(x) for x in inputs])))\n\n return s",
"def keep_only_positive_unique_sequences(self) -> None:\n\n already_visited_sequences = set()\n for s1, current_state_sequence in enumerate(self.mcts_statesequences):\n assert current_state_sequence.exctract_and_do_hash_analysis is False\n\n # Ignore empty sequences\n if current_state_sequence.status != -1:\n seq_key = \"_\".join([x.state_id for x in current_state_sequence.states])\n\n if seq_key not in already_visited_sequences:\n already_visited_sequences.add(seq_key)\n current_state_sequence.exctract_and_do_hash_analysis = True"
] | [
"0.64814466",
"0.6476671",
"0.6423379",
"0.63320357",
"0.63310236",
"0.6280472",
"0.6280472",
"0.6027212",
"0.5975199",
"0.59632",
"0.5899897",
"0.5783154",
"0.57495433",
"0.57459867",
"0.57062215",
"0.5704625",
"0.56536067",
"0.5650805",
"0.564712",
"0.56219155",
"0.5614227",
"0.5612356",
"0.5601246",
"0.55924904",
"0.55809194",
"0.5554045",
"0.5550693",
"0.5515628",
"0.5500544",
"0.5498615"
] | 0.69004315 | 0 |
Return harm'th harmonic of freq'th frequency | def HarmonicCurrent(self, freq, harm):
f = harm*self.freqs[freq].Wph
return self.Ip(f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def harmonic(num):\n if num < 2:\n return 1\n else:\n return 1 / num + (harmonic(num-1))",
"def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm",
"def harmonic(dist,spring,d=1):\n if dist <= d:\n return 0\n \n return 0.5*spring*(dist-1)",
"def harmonic_mean(self):\n return self.count() / sum(1/number for number in self.numbers)",
"def Harmonic_potential(x):\n k=1\n return 0.5*k*(x**2)",
"def harmonicOscillator_d_heatCapacity_d_freq(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * exp_x / one_minus_exp_x / one_minus_exp_x * (2.0 + x + 2.0 * x * exp_x / one_minus_exp_x) * x / freq",
"def frequency(self, w, s=1.0):\n x = w * s\n # Heaviside mock\n Hw = np.array(w)\n Hw[w <= 0] = 0\n Hw[w > 0] = 1\n return np.pi ** -0.25 * Hw * np.exp((-((x - self.w0) ** 2)) / 2)",
"def alt_harmonic(i):\n return sum([((-1)**(n + 1)) / n for n in range(1, i + 1)])",
"def harmonic(n, s=1):\n res = 0.0\n for i in xrange(1, n+1):\n res += 1 / (float(i) ** s)\n return res",
"def perceptual_amplitude_dbb(frequency: float) -> float:\n # See http://www.sengpielaudio.com/BerechnungDerBewertungsfilter.pdf\n\n num = 12200.0 ** 2. * frequency ** 3\n den = (frequency ** 2. + 20.6) * (frequency ** 2. + 12200. ** 2.) * np.sqrt(frequency ** 2. + 158.5 ** 2.)\n return num / den",
"def harmonic_mean(numbers):\n return 2 * numbers[0] * numbers[1] / sum(numbers)",
"def fharmonicmean(items):\n if len(items) == 0:\n return 0.\n\n # create a list with 1/xi values\n s = 0.\n for item in items:\n s += 1./item\n\n return float(len(items)) / s",
"def harmonicOscillator_heatCapacity(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x",
"def is_monic(f):\n return dmp_monic_p(f.rep, f.lev, f.dom)",
"def test_FourierAnalyser_freq_odd_case(self): \n P = PSignal.PSignal(np.array([-2, 8, 6, 4, 1, 0, 3, 5,8], dtype=float), i_samp_freq = 0.1 )\n spectrum = PeriodogramEngine.FourierAnalyser(P)\n self.assertTrue( np.allclose(spectrum.periodogram_freq(), np.array([ 1.11111111, 2.22222222, 3.33333333, 4.44444444])) )",
"def harmonics(y, n, /, axis=-1):\n # Get fourier transform\n y = np.moveaxis(y, axis, -1)\n fft = np.fft.fft(y, axis=-1)\n\n # Remove frequencies outside range. The FFT will have some error and give\n # non-zero imaginary components, but we can get magnitude or naively cast to real\n fft[..., 0] = 0\n fft[..., n + 1:-n] = 0\n yf = np.real(np.fft.ifft(fft, axis=-1))\n # yf = np.abs(np.fft.ifft(fft, axis=-1))\n return np.moveaxis(yf, -1, axis)",
"def freq(self, frequency: Optional[int]):",
"def sinh(x):\n return 0.0",
"def get_cw_freq(self):\n return self.get_frequency(self.synth)",
"def lharmonicmean (inlist):\r\n sum = 0\r\n for item in inlist:\r\n sum = sum + 1.0/item\r\n return len(inlist) / sum",
"def wavelength(self,freq):\n return self.phase_velocity()/freq",
"def psd(self, frequency):\n #if frequency < 1 / self.obstime: return np.nan\n #if frequency > 1 / self.cadence: return np.nan\n outs = np.ones(len(frequency))\n outs[frequency < 1/self.obstime] = np.nan\n outs[frequency > 1/self.cadence] = np.nan\n return (2 * 1./self.cadence * self.rms**2)*outs",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def freq(self, freq: Optional[int] = None) -> Optional[int]:\n ...",
"def freq():",
"def noise(self, freq: int, /) -> None:",
"def filter_harmonic(self, n):\n tvec = np.arange(self.nsamp)/self.sr\n hf = np.interp(tvec, self.harmonic_times(n), self.harmonic_amplitudes(n))\n idx = (self.f0<self.fmin) | (self.f0>self.fmax) | (self.f0*n>self.sr/2.2)\n rmsmin = np.max(np.abs(hf))*self.ampthr\n idx = idx | (np.abs(hf)<rmsmin)\n hf[idx] = 0\n return hf",
"def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0",
"def filter_harmonic(self, n):\n tvec = np.arange(self.nsamp)/self.sr\n hf = np.interp(tvec, self.harmonic_times(n), self.harmonic_amplitudes(n))\n idx = (self.f<self.fmin) | (self.f>self.fmax) | (self.f*n>self.sr/2.2)\n rmsmin = np.max(np.abs(hf))*self.ampthr\n idx = idx | (np.abs(hf)<rmsmin)\n hf[idx] = 0\n return hf",
"def freq2erb(freq_hz):\n return 9.265 * np.log(1 + freq_hz / (24.7 * 9.265))"
] | [
"0.7025545",
"0.69666004",
"0.67388034",
"0.66742104",
"0.66665643",
"0.65769804",
"0.65501016",
"0.64974946",
"0.64428353",
"0.6394897",
"0.63119864",
"0.6299196",
"0.62838924",
"0.6217714",
"0.6166127",
"0.61557734",
"0.61395794",
"0.61143035",
"0.60838854",
"0.6046865",
"0.6006524",
"0.5992481",
"0.5990123",
"0.5976472",
"0.5953061",
"0.59309447",
"0.5924513",
"0.5910435",
"0.59082115",
"0.58987963"
] | 0.7094828 | 0 |
Deabbreviates the given label_str as a Drake tools/workspace label. If the label_str is None, returns None. If the label_str is relative, interprets it relative to the "//tools/workspace/{name}/" package and returns an absolute label. Otherwise, returns the label_str unchanged. | def _resolve_drake_abbreviation(name, label_str):
if label_str == None:
return None
if label_str.startswith(":"):
return "@drake//tools/workspace/" + name + label_str
return label_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_cmdline(label_string):\n if not label_string.startswith('//'):\n label_string = '//' + label_string\n return Label.parse(label_string, None)",
"def parse_buildfile(label_string):\n if not isinstance(label_string, str):\n raise iga.fargparse.ParseError()\n return Label.parse(label_string, iga.context.current()['package'])",
"def parse(label_string, current_package):\n if label_string.startswith('//'):\n package_start = 2\n package_end = (_find_or_none(label_string, ':', package_start) or\n len(label_string))\n package = label_string[package_start:package_end]\n if not package:\n raise IgaError('empty package of %r' % label_string)\n else:\n package_start = package_end = 0\n package = None\n\n if label_string[package_end:package_end+1] == ':':\n target_start = package_end + 1\n else:\n target_start = package_end\n target = label_string[target_start:]\n\n if not package and not target:\n raise IgaError('cannot parse %r' % label_string)\n package = package or current_package\n if not package:\n raise IgaError('cannot parse package part from %r' % label_string)\n target = target or _default_target(package)\n if not target:\n raise IgaError('cannot parse target part from %r' % label_string)\n\n return Label(\n package=PurePosixPath(package),\n target=PurePosixPath(target),\n )",
"def suggest_label_name(runtime_addr, context, move_id):\n\n assert context is not None\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n #if runtime_addr == 0x6a7:\n # print(\"BBB\", hex(context), move_id)\n\n # Work out the best move_id to use for the label.\n #\n # The basic idea is that if we can't assign a move ID based on a\n # matching of context and runtime_addr move IDs, *if* there is any\n # existing label for any of those move IDs we will use it rather\n # than forcing a new label in base_move_id to be created.\n #\n # The actual rules are:\n #\n # 1. If a move ID is already specified, use that.\n # 2. Choose the move ID of the binary_address if it's the same\n # as a move ID of the runtime address.\n # 3. Look for any label at the runtime address that has a\n # `move_id` that matches one at either the runtime address or\n # binary address. If found, we have got our ideal label so\n # return that ((name, move ID), False) tuple.\n # We look in order through the local labels, then the explicit\n # labels, then the expressions.\n # 4. If there is only one valid move_id at the runtime address,\n # select that move ID. (It makes more sense to create a label\n # there than in the base move ID).\n # 5. All else has failed, so use the base move ID.\n\n # Rule 1\n if move_id is None:\n move_id = movemanager.move_id_for_binary_addr[context]\n move_ids2 = movemanager.move_ids_for_runtime_addr(runtime_addr)\n #if runtime_addr == 0x6a7:\n # print(\"CCC\", move_id, move_ids2)\n\n # Rule 2\n if move_id not in move_ids2:\n move_ids3 = [move_id] + list(move_ids2)\n\n # Get the label\n label = labelmanager.labels.get(runtime_addr)\n for candidate_move_id in move_ids3:\n # Rule 3: Local labels\n if candidate_move_id in label.local_labels:\n for (name, start_addr, end_addr) in label.local_labels[candidate_move_id]:\n if start_addr <= context < end_addr:\n return ((name, candidate_move_id), False)\n\n # Rule 3: Explicit labels\n if candidate_move_id in label.explicit_names:\n for name in label.explicit_names[candidate_move_id]:\n return ((name.name, candidate_move_id), False)\n\n # Rule 3: Expressions\n if candidate_move_id in label.expressions:\n for expression in label.expressions[candidate_move_id]:\n return ((expression, candidate_move_id), False)\n\n if len(move_ids2) == 1:\n # Rule 4\n move_id = min(move_ids2)\n else:\n # Rule 5\n move_id = movemanager.base_move_id\n\n label = labelmanager.labels.get(runtime_addr)\n #print(\"YYY %04x\" % runtime_addr)\n assert label is not None\n\n # If the runtime address has a label name, choose the first one.\n # Check the local labels, then explicit names then expressions in\n # our chosen move ID. If that fails try the base move ID.\n\n # TODO: We might want to move this logic into the Label object, and\n # it could potentially pick one of its own explicit names out based\n # on context. For now we prefer the first one, since that's how the\n # code used to behave and we're trying to gradually refactor.\n\n # We are just returning the first name arbitrarily, since we\n # have no basis to choose anything else.\n\n # We look for a local label, explicit label or expression in the\n # current move_id, or failing that in the base_move_id.\n for (name, start_addr, end_addr) in label.local_labels[move_id]:\n if start_addr <= context < end_addr:\n return ((name, move_id), False)\n for name in label.explicit_names[move_id]:\n return ((name.name, move_id), False)\n for expression in label.expressions[move_id]:\n return ((expression, move_id), False)\n\n for (name, start_addr, end_addr) in label.local_labels[movemanager.base_move_id]:\n if start_addr <= context < end_addr:\n return ((name, move_id), False)\n for name in label.explicit_names[movemanager.base_move_id]:\n return ((name.name, None), False)\n for expression in label.expressions[movemanager.base_move_id]:\n return ((expression, move_id), False)\n\n # If no explicit label or expression is suitable, try the optional\n # labels.\n if runtime_addr in optional_labels:\n s, base_addr = optional_labels[runtime_addr]\n if base_addr is not None:\n # TODO: If our \"suggestion\" is not acted on, we will have\n # added this base label unnecessarily. I don't think this\n # is a big deal, but ideally we wouldn't do it.\n add_label(base_addr, optional_labels[base_addr][0], None)\n return ((s, None), False) # TODO: optional labels don't have a move_id at the moment?\n\n # Make up a brand new label name.\n #\n # if the binary address is not code, then call it \"lXXXX\"\n # TODO: Is this runtime->binary stuff correct?\n binary_addr, _ = movemanager.r2b(runtime_addr)\n if binary_addr is None or not is_code(binary_addr):\n label = utils.force_case(\"l%04x\" % runtime_addr)\n else:\n # TODO: Should probably be user-configurable, but maybe the \"c\"\n # prefix here is not ideal because I personally tend to mix it\n # up with the following hex digits - a letter > 'f' would be\n # better - perhaps \"x\" for \"executable\"? (should be\n # user-configurable as I say, but I am inclined to change the\n # default)\n\n # Assume label is \"cXXXX\", c for code, but may change to\n # \"sub_cXXXX\" or \"loop_cXXXX\"\n label = utils.force_case(\"c%04x\" % runtime_addr)\n addr_refs = trace.references.get(binary_addr, [])\n if all(trace.cpu.is_subroutine_call(runtime_addr) for runtime_addr in addr_refs):\n # Found a subroutine, label is \"sub_XXXX\"\n label = \"sub_\" + label\n else:\n # Look for loops\n #\n # If there is one reference, and it's a branch backwards to\n # the target address, and within loop_limit bytes of the\n # current address then it's a \"loop_cXXXX\" name.\n if len(addr_refs) == 1:\n addr_ref = list(addr_refs)[0]\n\n # TODO: Maybe check if the instruction at runtime_addr\n # is an RTS and don't use loop_ prefix if it is - or\n # getting fancier, check if there's a straight line\n # sequence terminating in RTS at runtime_addr and don't\n # use loop_ prefix in that case either\n if trace.cpu.is_branch_to(addr_ref, runtime_addr) and 0 <= movemanager.b2r(addr_ref) - runtime_addr < config.get_loop_limit():\n label = \"loop_\" + label\n if config.get_indent_loops():\n while binary_addr <= addr_ref:\n c = classifications[binary_addr]\n if c is not None:\n if c.is_code(binary_addr):\n c.indent(binary_addr)\n binary_addr += c.length()\n else:\n binary_addr += 1\n return ((label, move_id), True)",
"def get_final_label(addr, context, move_id):\n #if addr == 0x6a7:\n # print(\"FFF\", hex(context), move_id)\n assert trace.cpu.trace_done\n assert memorymanager.is_valid_binary_addr(addr)\n assert memorymanager.is_valid_binary_addr(context)\n assert move_id is None or movemanager.is_valid_move_id(move_id)\n name, move_id = label_maker(addr, context, move_id)\n if is_simple_name(name):\n labelmanager.labels[addr].add_explicit_name(name, move_id)\n\n return name",
"def addr(label_name):\n\n if not utils.is_string_type(label_name):\n return None\n\n return labelmanager.addr(label_name)",
"def delete_manifest_label(label_uuid, tag_manifest):\n\n # Find the label itself.\n label = get_manifest_label(label_uuid, tag_manifest)\n if label is None:\n return None\n\n if not label.source_type.mutable:\n raise DataModelException(\"Cannot delete immutable label\")\n\n # Delete the mapping records and label.\n (TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())\n\n deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n label.delete_instance(recursive=False)\n return label",
"def sanitize_label(label: str) -> str:\n if \"-\" in label:\n prefix, suffix = label.split(\"-\", 1)\n suffix = suffix.split(\"(\")[-1]\n return f\"{prefix}-{suffix}\"\n else:\n return label",
"def make_label(self, node):\n\t\tcurstring = str(node.__class__)[13:-2]\n\t\tif isinstance(node, ast.Name):\n\t\t\tcurstring = node.id\n\t\telif isinstance(node, ast.Num):\n\t\t\tcurstring = str(node.n)\n\t\telif isinstance(node, ast.Str):\n\t\t\tcurstring = node.s\n\n\t\tif isinstance(node, ast.Load) or isinstance(node, ast.Store) or \\\n\t\t\tisinstance(node, ast.Param) or isinstance(node, ast.Add) or \\\n\t\t\tisinstance(node, ast.Sub) or isinstance(node, ast.Mult):\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tself.labels[str(node)] = curstring\n\t\t\treturn str(node)\n\t\texcept AttributeError:\n\t\t\treturn None",
"def consolidate_label(label):\n return label.split(\"-\")[0] if label.startswith(\"O\") else label",
"def consolidate_label(label):\n return label.split(\"-\")[0] if label.startswith(\"O\") else label",
"def get_label(runtime_addr, context, move_id=None):\n\n runtime_addr = int(runtime_addr)\n context = memorymanager.BinaryAddr(context)\n\n assert 0 <= runtime_addr <= 0x10000 # 0x10000 is valid for labels, not code/data TODO?\n assert memorymanager.is_valid_binary_addr(context)\n assert move_id is None or movemanager.is_valid_move_id(move_id)\n\n # We need to ensure the labelmanager knows there's a label at this\n # address so it can emit a definition. It's tempting to try to\n # defer this until get_final_label() is called, but it's good to\n # have the label exist as early as possible - for example, this\n # means post-tracing code analysis can see where labels exist and\n # what references them.\n # TODO: It is a bit clunky to have to do the \"ensure it exists\" via\n # this dummy dictionary lookup though.\n dummy = labelmanager.labels[runtime_addr]\n\n return utils.LazyString(\"%s\", lambda: get_final_label(runtime_addr, context, move_id))",
"def normalisesym(self, label):\n return label",
"def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()",
"def label_to_name(label):\n return \"Tree\"",
"def label_to_name(self, label):\n return self.labels[label]",
"def label(self, name: str) -> Optional[str]:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"label\", _args)\n return _ctx.execute_sync(Optional[str])",
"def short_branch_name(branch):\n return branch.replace('refs/heads/', '')",
"def normalize_label(label):\n label = normalize('NFKD', label)\n label = re.sub('/[^a-z0-9-_:.]/g', '-', label)\n label = label.lower()\n return label",
"def label_to_name(self, label):\n\t\treturn self.labels[label]",
"def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n path = \"\"\n components = self.GetLabelComponents(label)\n if not components:\n return path\n \n for c in components[:-1]:\n path = os.path.join(path, c + self.suite_extension)\n path = os.path.join(path, components[-1])\n return path",
"def label_to_name(self, label):\n\t\t\treturn self.labels[label]",
"def cleanLabel(label):\n if label.startswith('^'):\n label = label[1:] + '_prexisting'\n label = label.replace('?', 'Q') # happens with mRnaCompare filter labels\n return label",
"def extract_label(node):\n if (isinstance(node, UnaryOp) and\n isinstance(node.op, USub) and\n isinstance(node.operand, UnaryOp) and\n isinstance(node.operand.op, USub) and\n isinstance(node.operand.operand, Name)):\n return node.operand.operand.id\n else:\n return None",
"def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment",
"def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]",
"def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)",
"def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))",
"def get_target_group_fully_qualified_name(self, short_name):\n return '{}-{}'.format(\n self.get_balancer_name(),\n short_name,\n )",
"def extract_label(selector):\n return selector.split('=')[-1][:-1]"
] | [
"0.5493452",
"0.53157383",
"0.5308865",
"0.52162045",
"0.5032356",
"0.49955347",
"0.49076423",
"0.48855197",
"0.48234028",
"0.47723868",
"0.47723868",
"0.4730411",
"0.47035912",
"0.46912217",
"0.4640282",
"0.46265084",
"0.4619898",
"0.4597021",
"0.45949158",
"0.45918074",
"0.459001",
"0.45688376",
"0.45279312",
"0.45173037",
"0.4504252",
"0.4486377",
"0.4472783",
"0.4436175",
"0.43939593",
"0.43877485"
] | 0.75893784 | 0 |
Download an archive of the provided GitHub repository and commit to the output path and extract it. | def github_download_and_extract(
repository_ctx,
repository,
commit,
mirrors,
output = "",
sha256 = "0" * 64,
extra_strip_prefix = "",
commit_pin = None):
urls = _urls(
repository = repository,
commit = commit,
mirrors = mirrors,
)
repository_ctx.download_and_extract(
urls,
output = output,
sha256 = _sha256(sha256),
type = "tar.gz",
stripPrefix = _strip_prefix(repository, commit, extra_strip_prefix),
)
# Create a summary file for Drake maintainers.
generate_repository_metadata(
repository_ctx,
repository_rule_type = "github",
repository = repository,
commit = commit,
version_pin = commit_pin,
sha256 = sha256,
urls = urls,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)",
"def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)",
"def rupture(url, outpath=None, branch='master', dirname=None, release=None):\n try:\n file, filename = _download(\n url, outpath=outpath, \n dirname=dirname, branch=branch, \n release=release\n )\n base, cs = _unzip(filename)\n _delete(filename)\n if release or branch != 'master':\n return\n to_find = \"{}/{}-{}\".format(base, file, branch)\n _newname = dirname or file\n shutil.move(to_find, base+\"/\"+_newname)\n except Exception as e:\n six.print_(traceback.format_exc())\n six.print_(\"Cannot download the repo. Could you check the repo url ?\")",
"def download_from_github(self):\n logging.debug('download_from_github called')\n self.response = requests.get(f'{self.full_url}{self.filename}')\n status_code = self.response.status_code\n if status_code == 200:\n logging.debug('Success response gave status code 200')\n with open(f'{self.out_dir}docs/downloaded/{self.filename}',\n 'wb') as csv_written:\n csv_written.write(self.response.content)\n else:\n logging.debug('Error in requests download'\n 'status_code=%d', status_code)\n sys.exit()\n\n return self.response",
"def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):\n\n if github_token:\n github_release._github_token_cli_arg = github_token\n\n if not os.path.isdir(download_dir):\n os.mkdir(download_dir)\n\n hashalgo_dir = os.path.join(root_dir, hashalgo)\n if not os.path.isdir(hashalgo_dir):\n os.mkdir(hashalgo_dir)\n\n hashalgo_csv = download_fileindex_csv(\n repo_name, hashalgo_dir, hashalgo, github_token\n )\n fileindex = read_fileindex_csv(hashalgo_csv)\n\n logging.debug(hashalgo + \": downloading release assets\")\n # Find out which filenames are present in multiple versions (need to give them unique names)\n filenames = [checksum_filename[1] for checksum_filename in fileindex]\n from collections import Counter\n\n # Sort based on filename and filedate\n fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))\n\n filenames_counter = Counter(filenames)\n # download saves files to current working directory, so we need to temporarily\n # change working dir to hashalgo_dir folder\n with cd(hashalgo_dir):\n fileindex_with_local_filename = []\n for fileindex_item in fileindex:\n checksum = fileindex_item[COLUMN_CHECKSUM]\n filename = fileindex_item[COLUMN_FILENAME]\n filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else \"\"\n filepath = os.path.join(hashalgo_dir, checksum)\n if not os.path.isfile(filepath):\n if not github_release.gh_asset_download(repo_name, hashalgo, checksum):\n logging.error(\n hashalgo\n + \": failed to download \"\n + filename\n + \" (\"\n + checksum\n + \")\"\n )\n continue\n logging.debug(\n hashalgo + \": downloaded \" + filename + \" (\" + checksum + \")\"\n )\n\n # determine local filename\n if filenames_counter[filename] == 1:\n # unique filename\n local_filename = filename\n else:\n # multiple versions of the filename with different content\n # add checksum as suffix to distinguish them\n local_filename = filename + \".\" + checksum\n local_filepath = os.path.join(download_dir, local_filename)\n\n # set file name and date from index\n copyfile(filepath, local_filepath)\n set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))\n\n # save local fileindex\n fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])\n\n # Create new hashalgo.csv from existing and incoming files\n write_fileindex_csv(hashalgo_csv, fileindex)\n hashalgo_local_md = os.path.join(download_dir, hashalgo + \"_local.md\")\n write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True)",
"def github_archive(\n name,\n repository = None,\n commit = None,\n commit_pin = None,\n sha256 = \"0\" * 64,\n build_file = None,\n patches = None,\n extra_strip_prefix = \"\",\n local_repository_override = None,\n mirrors = None,\n **kwargs):\n if repository == None:\n fail(\"Missing repository=\")\n if commit == None:\n fail(\"Missing commit=\")\n if mirrors == None:\n fail(\"Missing mirrors=; see mirrors.bzl\")\n\n build_file = _resolve_drake_abbreviation(name, build_file)\n patches = [\n _resolve_drake_abbreviation(name, one_patch)\n for one_patch in (patches or [])\n ]\n\n if local_repository_override != None:\n path = local_repository_override\n if extra_strip_prefix:\n path += \"/\" + extra_strip_prefix\n if build_file == None:\n native.local_repository(\n name = name,\n path = path,\n )\n else:\n native.new_local_repository(\n name = name,\n build_file = build_file,\n path = path,\n )\n return\n\n # Once we've handled the \"local_repository_override\" sidestep, we delegate\n # to a rule (not a macro) so that we have more leeway in the actions we can\n # take (i.e., so we can do more than just a simple download-and-extract).\n _github_archive_real(\n name = name,\n repository = repository,\n commit = commit,\n commit_pin = commit_pin,\n sha256 = sha256,\n build_file = build_file,\n patches = patches,\n extra_strip_prefix = extra_strip_prefix,\n mirrors = mirrors,\n **kwargs\n )",
"def fetch(self) -> None:\n archive_path = os.path.join(self._output_dir, self._archive_name)\n self._download_file(self._parsed_url.original_url, archive_path)\n try:\n with zipfile.ZipFile(archive_path, \"r\") as zip_file:\n zip_file.extractall(path=self._output_dir)\n except zipfile.BadZipfile:\n raise REANAFetcherError(\"The provided zip file is not valid\")\n\n os.remove(archive_path)\n\n if not self._discover_workflow_specs():\n top_level_entries = [\n os.path.join(self._output_dir, entry)\n for entry in os.listdir(self._output_dir)\n ]\n # Some zip archives contain a single directory with all the files.\n if len(top_level_entries) == 1 and os.path.isdir(top_level_entries[0]):\n top_level_dir = top_level_entries[0]\n # Move all entries inside the top level directory\n # to the output directory.\n for entry in os.listdir(top_level_dir):\n shutil.move(os.path.join(top_level_dir, entry), self._output_dir)\n os.rmdir(top_level_dir)",
"def download_dependency_github(name, repo, tag, temp_path, build_path, config):\n wp = os.getcwd()\n os.chdir(temp_path)\n # Clone into the repo, pull the specified tag\n clone_cmd = f\"git clone https://github.com/{repo}.git\"\n tag_cmd = f\"git checkout master && git fetch && git fetch --tags && git checkout {tag}\"\n os.system(clone_cmd)\n os.chdir(name)\n os.system(tag_cmd)\n os.chdir(wp)\n # Move the contents of GameData into the build directory\n shutil.copytree(os.path.join(temp_path, name, \"GameData\", name), os.path.join(build_path, \"GameData\", name))",
"def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return",
"def maybe_download_and_extract():\n dest_directory = FLAGS['model_dir']\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)",
"def maybe_download_and_extract():\n dest_directory = MODEL_DIR\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))",
"def fetch_repo(root, repo, url, destination_temp):\n\n print \"Fetching %s from %s\" % (repo, url)\n\n if root.exists('repos/%s' % repo):\n print \"Repo %s exists, issuing a git pull...\" % repo\n call('cd repos/%s; git pull' % repo, shell=True)\n else:\n print \"Repo %s does not exist, issuing a git clone...\" % repo\n\n # explicitely create dir as implicit creation fails on server\n root.makedir('%s/%s' % (destination_temp, repo))\n call('cd repos; git clone %s %s' % (url, repo), shell=True)\n # call('git clone %s %s/%s > /dev/null 2>&1' % (repo['url'], source, repo['id']), shell=True)",
"def download_from_uri(uri: str, dst: utils.ReadWritePath) -> str:\n if uri.startswith('github://'):\n raise NotImplementedError('Github sources not supported yet')\n\n path = utils.as_path(uri)\n if not path.exists():\n raise ValueError(f'Unsuported source: {uri}')\n\n # Download the main file\n python_module = path / f'{path.name}.py'\n python_module.copy(dst / python_module.name)\n\n # TODO(tfds): Should also support download on the extra files (e.g. label.txt,\n # util module,...)\n\n # Add the `__init__` file\n (dst / '__init__.py').write_text('')\n return python_module.stem",
"def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')",
"def download_untar(url, download_path, extract_path=None):\n file_name = url.split('/')[-1]\n if extract_path is None:\n extract_path = download_path\n tar_file_path = os.path.join(download_path, file_name)\n download(tar_file_path, url)\n sys.stdout.flush()\n print('Extracting {} archive into {}'.format(tar_file_path, extract_path))\n untar(tar_file_path, extract_path)\n os.remove(tar_file_path)",
"def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)",
"def fetch(self) -> None:\n try:\n repository = Repo.clone_from(\n self._parsed_url.original_url,\n self._output_dir,\n depth=1,\n no_single_branch=True,\n env={\"GIT_TERMINAL_PROMPT\": \"0\"},\n )\n except Exception:\n raise REANAFetcherError(\n \"Cannot clone the given Git repository. Please check that the provided \"\n \"URL is correct and that the repository is publicly accessible.\"\n )\n\n if self._git_ref:\n try:\n repository.remote().fetch(self._git_ref, depth=1)\n repository.git.checkout(self._git_ref)\n except Exception:\n raise REANAFetcherError(\n f'Cannot checkout the given Git reference \"{self._git_ref}\"'\n )\n\n shutil.rmtree(os.path.join(self._output_dir, \".git\"))",
"def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')",
"def download_and_extract(down_dir=download_dir, url=tuda_url):\n\n wget.download(url, down_dir) \n tar_filepath = os.path.join(down_dir, \"german-speechdata-package-v2.tar.gz\")\n #with tarfile.open(tar_filepath, \"r\") as tar:\n # tar.extractall(down_dir)",
"def fetch(uri, output, b64=False):\n output = os.path.abspath(output)\n distdir, name = os.path.split(output)\n if os.path.exists(output):\n logging.info('Using existing download: %s', name)\n return\n\n logging.info('Downloading %s to %s', uri, output)\n os.makedirs(distdir, exist_ok=True)\n\n # Use kokoro build cache or Gentoo distdir if available.\n for envvar in ('KOKORO_GFILE_DIR', 'DISTDIR'):\n cache_dir = os.getenv(envvar)\n if cache_dir:\n cache_file = os.path.join(cache_dir, name)\n if os.path.exists(cache_file):\n logging.info(' Cache hit via %s', envvar)\n symlink(cache_file, output)\n return\n\n # Don't be verbose if running on CI systems.\n verbose = os.isatty(sys.stdout.fileno())\n\n # We use urllib rather than wget or curl to avoid external utils & libs.\n # This seems to be good enough for our needs.\n tmpfile = output + '.tmp'\n for _ in range(0, 5):\n try:\n with open(tmpfile, 'wb') as outfp:\n fetch_data(uri, outfp, verbose=verbose, b64=b64)\n break\n except ConnectionError as e:\n time.sleep(1)\n logging.warning('Download failed; retrying: %s', e)\n else:\n logging.error('Unabled to download; giving up')\n unlink(tmpfile)\n sys.exit(1)\n\n # Clear the progress bar.\n if verbose:\n print(' ' * 80, end='\\r')\n\n os.rename(tmpfile, output)",
"def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)",
"def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')",
"def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)"
] | [
"0.7299827",
"0.6996785",
"0.6851019",
"0.66118145",
"0.6502438",
"0.6454363",
"0.6326854",
"0.63167197",
"0.63033384",
"0.62513226",
"0.62509066",
"0.6237316",
"0.6226956",
"0.6197997",
"0.6196102",
"0.6155643",
"0.61142826",
"0.61126155",
"0.60822076",
"0.60154104",
"0.59854084",
"0.59773564",
"0.5953971",
"0.59463924",
"0.5943826",
"0.59142935",
"0.590513",
"0.59034467",
"0.58815825",
"0.58489704"
] | 0.77642673 | 0 |
Compute the strip prefix for a downloaded archive of the provided GitHub repository and commit. | def _strip_prefix(repository, commit, extra_strip_prefix):
repository_split = repository.split("/")
if len(repository_split) != 2:
fail("repository must be formatted as organization/project")
_, project = repository_split
# GitHub archives omit the "v" in version tags, for some reason.
if commit[0] == "v":
strip_commit = commit[1:]
else:
strip_commit = commit
result = project + "-" + strip_commit.replace("/", "-")
if extra_strip_prefix:
result += "/" + extra_strip_prefix
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trim_repo_url(url):\n return url.replace(\".git\", \"\")",
"def removeprefix(self, x) -> String:\n pass",
"def github_archive(\n name,\n repository = None,\n commit = None,\n commit_pin = None,\n sha256 = \"0\" * 64,\n build_file = None,\n patches = None,\n extra_strip_prefix = \"\",\n local_repository_override = None,\n mirrors = None,\n **kwargs):\n if repository == None:\n fail(\"Missing repository=\")\n if commit == None:\n fail(\"Missing commit=\")\n if mirrors == None:\n fail(\"Missing mirrors=; see mirrors.bzl\")\n\n build_file = _resolve_drake_abbreviation(name, build_file)\n patches = [\n _resolve_drake_abbreviation(name, one_patch)\n for one_patch in (patches or [])\n ]\n\n if local_repository_override != None:\n path = local_repository_override\n if extra_strip_prefix:\n path += \"/\" + extra_strip_prefix\n if build_file == None:\n native.local_repository(\n name = name,\n path = path,\n )\n else:\n native.new_local_repository(\n name = name,\n build_file = build_file,\n path = path,\n )\n return\n\n # Once we've handled the \"local_repository_override\" sidestep, we delegate\n # to a rule (not a macro) so that we have more leeway in the actions we can\n # take (i.e., so we can do more than just a simple download-and-extract).\n _github_archive_real(\n name = name,\n repository = repository,\n commit = commit,\n commit_pin = commit_pin,\n sha256 = sha256,\n build_file = build_file,\n patches = patches,\n extra_strip_prefix = extra_strip_prefix,\n mirrors = mirrors,\n **kwargs\n )",
"def get_repo_sha(base_dir):\n try:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=base_dir)\n return sha.decode('utf-8').strip()\n except Exception as e:\n print(\"Failed to get repo sha for '%s': %s\" % (base_dir, e))\n return \"\"",
"def remove_prefix(z, prefix):\n if z.startswith(prefix):\n return re.sub(r\"^{}\".format(prefix), \"\", z)\n else:\n return z",
"def strip_prefix(string, prefix):\n assert string.startswith(prefix), \"{!r} is not a prefix of {!r}\".format(prefix, string)\n return string[len(prefix):]",
"def stripPrefix(prefix, string):\n\n if string.startswith(prefix):\n return string[len(prefix):]\n\n return string",
"def test_head_name(repository: Repository) -> None:\n head = repository._repository.references[\"HEAD\"]\n name = head.target.removeprefix(\"refs/heads/\")\n assert name == repository.head.name",
"def short_branch_name(branch):\n return branch.replace('refs/heads/', '')",
"def trim_repo_path(self, path):\n # get the repo first\n repo = self.find_repo(path)\n\n if not repo:\n return path\n\n # then try to trim the path\n if path.startswith(repo.path):\n return path[len(repo.path) :]\n elif path.startswith(repo.windows_path):\n return path[len(repo.windows_path) :]\n elif path.startswith(repo.linux_path):\n return path[len(repo.linux_path) :]\n elif path.startswith(repo.osx_path):\n return path[len(repo.osx_path) :]\n return path",
"def get_short_id(owner, repo, branch):\n from .main import app\n repo_parts = []\n if owner != app.config['DEFAULT_USER']:\n repo_parts.append('%s' % owner)\n if repo_parts or repo != app.config['DEFAULT_REPO']:\n repo_parts.append(repo)\n repo_id = '/'.join(repo_parts)\n #\n if repo_id == '':\n return branch\n elif branch == 'default':\n return repo_id\n elif repo_id == branch:\n return repo_id # e.g., pypy/extradoc has a branch extradoc, just return 'extradoc'\n else:\n return '%s[%s]' % (repo_id, branch)\n return branch",
"def show_prefix(self, current_path):\n p = Popen(\n [\"git\", \"rev-parse\", \"--show-prefix\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = {\n \"code\": p.returncode,\n \"under_repo_path\": my_output.decode(\"utf-8\").strip(\"\\n\"),\n }\n return result\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git rev-parse --show-prefix\",\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def StripPrefix(string, prefix):\n assert string.startswith(prefix)\n return string[len(prefix):]",
"def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)",
"def _remove_prefix(self, path, prefix):\n expression = f\"_remove_prefix({repr(path)}, {repr(prefix)})\"\n return eval(expression, self.bzl_globals)",
"def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"",
"def get_git_hash() -> Optional[str]:\n rv = _git('rev-parse', 'HEAD')\n if rv:\n return rv[:6]",
"def remove_prefix(val: str, prefix: str) -> str:\n return val[len(prefix):] if val.startswith(prefix) else val",
"def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment",
"def strip_optional_prefix(string, prefix):\n if string.startswith(prefix):\n string = string[len(prefix):]\n return string",
"def strip_dot_git(url: str) -> str:\n \"\"\"Strip trailing .git\"\"\"\n return url[: -len(\".git\")] if url.endswith(\".git\") else url",
"def GetPrefix():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(1)\n return DEFAULT_DEPOT",
"def repo_name(git_url):\n name = git_url.split('/')[-1]\n\n if name.endswith('.git'):\n name = name[:-4]\n\n return name.lower()",
"def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func):\n if not path_parts:\n return None\n suffix_len = len(suffix)\n if path_parts[-1][-suffix_len:] == suffix:\n path_parts = path_parts[:]\n if len(path_parts[-1]) == suffix_len:\n del path_parts[-1]\n else:\n path_parts[-1] = path_parts[-1][:-suffix_len]\n t = _repos_pathtype(repos, path_parts, rev)\n if pathtype == t:\n return path_parts, t, view_func\n return None",
"def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text",
"def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text",
"def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text",
"def svn_branch():\n return svn_url().split('/')[-1]",
"def remove_prefix(text, prefix):\n\n if text.startswith(prefix):\n return text[len(prefix):]\n return text",
"def parse_repository_tag(repo_path):\n tag_separator = \":\"\n digest_separator = \"@\"\n\n if digest_separator in repo_path:\n repo, tag = repo_path.rsplit(digest_separator, 1)\n return repo, tag, digest_separator\n\n repo, tag = repo_path, \"\"\n if tag_separator in repo_path:\n repo, tag = repo_path.rsplit(tag_separator, 1)\n if \"/\" in tag:\n repo, tag = repo_path, \"\"\n\n return repo, tag, tag_separator"
] | [
"0.5652172",
"0.5554605",
"0.54442376",
"0.5387567",
"0.5379905",
"0.5352716",
"0.53320515",
"0.53320336",
"0.53294367",
"0.5319218",
"0.52457154",
"0.5232823",
"0.52119076",
"0.51832646",
"0.5151672",
"0.5133377",
"0.5076513",
"0.5054134",
"0.50435024",
"0.50215286",
"0.502025",
"0.50169533",
"0.49750584",
"0.49693546",
"0.4950765",
"0.4950765",
"0.4950765",
"0.49469718",
"0.492918",
"0.4927143"
] | 0.7977387 | 0 |
Returns true iff the commit is a hexadecimal string of length 40. | def _is_commit_sha(commit):
return len(commit) == 40 and all([
ch.isdigit() or (ch >= "a" and ch <= "f")
for ch in commit.elems()
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_git_sha(text):\n # Handle both the full sha as well as the 7-character abbreviation\n if len(text) in (40, 7):\n try:\n int(text, 16)\n return True\n except ValueError:\n pass\n return False",
"def __isHexString(self, text):\n return all(map(lambda c: c in \"0123456789abcdefABCDEF\", text))",
"def is_valid_git_sha1(hash):\r\n\r\n if len(hash) != 40:\r\n return False\r\n try:\r\n value = int(hash, 16)\r\n except ValueError:\r\n return False\r\n\r\n return True",
"def isHex(string, needHexPrefix):\n return (True)",
"def ishex(data: str) -> bool:\n return bool(re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)) or bool(re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data))",
"def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))",
"def is_sha(self):\n return self.name.startswith('SHA')",
"def is_commit_id_valid(commit_id, wit_path):\n\n if not is_branch(wit_path, commit_id):\n if commit_id.isalnum() and len(commit_id) == 40:\n\n if commit_id in _get_all_saves_names(wit_path):\n return True\n\n else:\n logging.error(f'No commit named {commit_id}.')\n\n else:\n logging.error('branch or commit does not exist. commit id must be 40 digits long and hexadecimal.')\n else:\n return True",
"def is_hex(n):\n hex_test = (1 + sqrt(1 + 8*n))/4\n if hex_test == int(hex_test):\n return True\n return False",
"def ishex(char: chr) -> bool:\n return char.isdigit() or char in \"abcdef\"",
"def is_valid_hex(hex_code: str) -> bool:\n\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hex_code)\n\n if match:\n return True\n else:\n return False",
"def validate_hash(h):\n if len(h) not in (32, 40, 64, 128):\n return False\n\n return bool(re.match(\"[0-9a-fA-F]*$\", h))",
"def ascii_hexchar(s: str) -> bool:\n return frozenset(s).issubset(_ascii_h)",
"def is_hex(s): \n # if it can be converted to a base 16 int then it is hex\n try:\n int(s, 16)\n return True\n \n except ValueError:\n # it could not be converted therefore is not hex\n return False\n # end try",
"def is_hex_bytes(hex_string):\n try:\n bytearray.fromhex(hex_string)\n except ValueError:\n return False\n\n return True",
"def comparehex(hex1: str, hex2: str) -> bool:\n if int(str(hex1), 16) == int(str(hex2), 16):\n return True\n return False",
"def is_hex_color(color, verbose='info'):\n # Set the logger\n set_logger(verbose=verbose)\n\n if not isinstance(color, str):\n logger.info('Hex [%s] should be of type string' %(str(color)))\n\n return False\n\n if color.startswith('#'):\n color = color[1:]\n else:\n logger.info('Hex [%s] should start with \"#\"' %(str(color)))\n return False\n\n if len(color) != 6:\n logger.info('Hex [%s] should be of length 7 incl \"#\"' %(str(color)))\n return False\n\n try:\n int(color, 16)\n return True\n except ValueError:\n return False",
"def is_commit_signature_valid(self, commit: git.Commit) -> bool:\n commit_status = self.get_commit_signature_status(commit)\n if commit_status in VALID_SIGNATURE_STATUSES:\n logger.debug(f\"Commit {commit.hexsha!r} signature is valid.\")\n return True\n\n logger.warning(f\"Commit {commit.hexsha!r} signature is not valid.\")\n return False",
"def line_part_of_commit(file, line, commit):\n if line == '0': return False\n\n line_val = git(\"blame\", \"-l\", \"-L{0},{0}\".format(line), file)\n return line_val.split(\" \", 1)[0] == commit",
"def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True",
"def is_binary_format(content, maxline=20):\n for lc in content[:maxline]:\n if b'format' in lc:\n if b'binary' in lc:\n return True\n return False\n return False",
"def is_hash(fhash):\n\n # Intentionally doing if/else statement for ease of testing and reading\n if re.match(re_md5, fhash):\n return True\n elif re.match(re_sha1, fhash):\n return True\n elif re.match(re_sha256, fhash):\n return True\n elif re.match(re_sha512, fhash):\n return True\n elif re.match(re_ssdeep, fhash):\n return True\n else:\n return False",
"def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False",
"def is_valid_node_id(val):\n if not val:\n return False\n if not isinstance(val, bytes) and not isinstance(val, bytearray):\n return False\n\n length = len(val)\n if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \\\n length != SHA3_BIN_LEN:\n return False\n\n return True",
"def is_crc(self):\n return self.name.startswith('CRC')",
"def IsBinaryData(self, data):\r\n # Derived from how Mercurial's heuristic, see\r\n # http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229\r\n return bool(data and \"\\0\" in data)",
"def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False",
"def have_hash_symbol(l):\r\n if \"#\" in str(l):\r\n return 1\r\n else:\r\n return 0",
"def isBinaryFormat(content, maxline=20):\n for lc in content[:maxline]:\n if b'format' in lc:\n if b'binary' in lc:\n return True\n return False\n return False",
"def check_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True"
] | [
"0.75310045",
"0.6916779",
"0.6751016",
"0.66939974",
"0.6577284",
"0.64339066",
"0.64078575",
"0.6291537",
"0.62212795",
"0.6217626",
"0.6196688",
"0.6097613",
"0.60645914",
"0.6018439",
"0.59760046",
"0.59714776",
"0.5865132",
"0.5767874",
"0.57330835",
"0.57021797",
"0.56573784",
"0.56421506",
"0.5632466",
"0.562026",
"0.5616663",
"0.56156534",
"0.5567345",
"0.55668163",
"0.55102915",
"0.5510244"
] | 0.79437214 | 0 |
Given a URL pattern for github.com or a Drakespecific mirror, substitutes in the given repository and commit (tag or git sha). | def _format_url(*, pattern, repository, commit):
is_commit_sha = _is_commit_sha(commit)
is_tag = not is_commit_sha
substitutions = {
"repository": repository,
"commit": commit,
"tag_name": commit if is_tag else None,
"commit_sha": commit if is_commit_sha else None,
}
for name, value in substitutions.items():
if value == None:
needle = "{" + name + "}"
if needle in pattern:
# If the pattern uses a substitution that we do not have,
# report that to our caller as "None"; don't return a URL
# string with a literal "None" in it!
return None
return pattern.format(**substitutions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_repository(post):\n pattern = re.compile(constants.REPOSITORY_REGEX)\n if \"links\" in post.json_metadata.keys():\n for link in post.json_metadata[\"links\"]:\n if link.startswith(\"/exit?url=\"):\n link = link[len(\"/exit?url=\"):]\n\n try:\n result = pattern.search(link).group(0)\n return result\n except AttributeError:\n continue\n else:\n for line in post.body.split():\n try:\n result = pattern.search(line).group(0)\n return result\n except AttributeError:\n continue\n\n return \"\"",
"def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )",
"def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url",
"def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"",
"def github_download_and_extract(\n repository_ctx,\n repository,\n commit,\n mirrors,\n output = \"\",\n sha256 = \"0\" * 64,\n extra_strip_prefix = \"\",\n commit_pin = None):\n urls = _urls(\n repository = repository,\n commit = commit,\n mirrors = mirrors,\n )\n\n repository_ctx.download_and_extract(\n urls,\n output = output,\n sha256 = _sha256(sha256),\n type = \"tar.gz\",\n stripPrefix = _strip_prefix(repository, commit, extra_strip_prefix),\n )\n\n # Create a summary file for Drake maintainers.\n generate_repository_metadata(\n repository_ctx,\n repository_rule_type = \"github\",\n repository = repository,\n commit = commit,\n version_pin = commit_pin,\n sha256 = sha256,\n urls = urls,\n )",
"def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)",
"def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)",
"def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url",
"def render_content_urls(repo, yum_vars, baseDest):\n patterns = [\n (re.compile('\\$basearch'), 'basearch'),\n (re.compile('\\$arch'), 'arch'),\n (re.compile('\\$uuid'), 'uuid')\n ]\n for repository_id, content_url in repo:\n new_content_url = content_url # create a new content_url that we will transform\n for pattern in patterns: # apply all patterns to our content_url\n new_content_url = re.sub(pattern[0], yum_vars[pattern[1]], new_content_url) # Replace any instances of yum variables with the real value\n \n # Brue force set the releasever. Will likely work 99.9% of the time\n # If a repo isn't for 7Workstation or 7Server, then this will fail\n if 'workstation' in repository_id.lower():\n new_content_url = re.sub('\\$releasever', '7Workstation', new_content_url)\n else:\n new_content_url = re.sub('\\$releasever', '7Server', new_content_url)\n \n new_content_url = os.path.join(baseDest, new_content_url[1:]) # Strip first '/' character so join works properly\n yield (repository_id, new_content_url) # Return tuple with valid dest",
"def change_url_format(repo, out_type='ssh'):\n url = repo.url\n url_parts = re.split('[/:]', url)\n in_type = url_parts[0]\n url_fmts = {\n 'https': ('.com/', 'https://'),\n 'ssh': ('.com:', 'git@'),\n }\n url_fmts['git'] = url_fmts['ssh']\n new_repo_url = url\n for old, new in zip(url_fmts[in_type], url_fmts[out_type]):\n new_repo_url = new_repo_url.replace(old, new)\n # Inplace change\n repo.url = new_repo_url\n print('new format repo.url = {!r}'.format(repo.url))",
"def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment",
"def add_repo_url(image, repository, repositories):\n try:\n path = repositories[repository]\n path = path.strip(\"/\").replace(\"https://\", \"\").replace(\"http://\", \"\")\n image = \"/\".join([path, image])\n except KeyError:\n raise KeyError(f\"Repository {repository} not defined!\")\n return image",
"def RepositoryName(url, add_if_missing=False):\n if url.endswith('.git'):\n url = url[:-4]\n\n repositories = Repository.query(Repository.urls == url).fetch()\n if repositories:\n return repositories[0].key.id()\n\n if add_if_missing:\n return _AddRepository(url)\n\n raise KeyError('Unknown repository URL: %s' % url)",
"def get_version(git_repo, commit):\n version = git_repo.rev_parse(commit, short=7)\n try:\n version = \"%s@%s\" % (git_repo.find_tag(commit), version)\n except GitRepositoryError:\n pass\n\n return version",
"def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())",
"def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta",
"def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url",
"def cli(ctx, url):\n for u in url:\n m = re.fullmatch(\n r\"(?:https?://)?(?:www\\.)?github\\.com\"\n r\"/(?P<owner>[^/]+)\"\n r\"/(?P<repo>[^/]+)\"\n r\"/(?:issues|pull)\"\n r\"/(?P<issue>\\d+)\"\n r\"(?:#issuecomment-(?P<comment>\\d+))?\",\n u,\n )\n if not m:\n click.echo(f\"{ctx.command_path}: could not parse {u!r}\", err=True)\n continue\n endpoint = ctx.obj.repos[m.group(\"owner\")][m.group(\"repo\")].issues\n if m.group(\"comment\") is None:\n endpoint = endpoint[m.group(\"issue\")].reactions\n else:\n endpoint = endpoint.comments[m.group(\"comment\")].reactions\n endpoint.post(json={\"content\": \"+1\"})",
"def get_commit_by_url(commit_url):\n commit_sql = \"SELECT * FROM github_commit WHERE url=?\"\n return dbutils.execute_query(commit_sql, (commit_url,), DATABASE_FILE)",
"def linkcode_resolve(domain, info):\n if domain != 'py' or not info['module']:\n return None\n filename = info['module'].replace('.', '/')\n return \"https://github.com/mathcamp/flywheel/blob/%s/%s.py\" % (version_data['ref'], filename)",
"def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)",
"def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date",
"def __get_repo_url_by_name(self, name, repos_list):\n for repo in repos_list:\n if repo['name'] == name:\n return repo['commits_url'].split('{')[0]",
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def github_archive(\n name,\n repository = None,\n commit = None,\n commit_pin = None,\n sha256 = \"0\" * 64,\n build_file = None,\n patches = None,\n extra_strip_prefix = \"\",\n local_repository_override = None,\n mirrors = None,\n **kwargs):\n if repository == None:\n fail(\"Missing repository=\")\n if commit == None:\n fail(\"Missing commit=\")\n if mirrors == None:\n fail(\"Missing mirrors=; see mirrors.bzl\")\n\n build_file = _resolve_drake_abbreviation(name, build_file)\n patches = [\n _resolve_drake_abbreviation(name, one_patch)\n for one_patch in (patches or [])\n ]\n\n if local_repository_override != None:\n path = local_repository_override\n if extra_strip_prefix:\n path += \"/\" + extra_strip_prefix\n if build_file == None:\n native.local_repository(\n name = name,\n path = path,\n )\n else:\n native.new_local_repository(\n name = name,\n build_file = build_file,\n path = path,\n )\n return\n\n # Once we've handled the \"local_repository_override\" sidestep, we delegate\n # to a rule (not a macro) so that we have more leeway in the actions we can\n # take (i.e., so we can do more than just a simple download-and-extract).\n _github_archive_real(\n name = name,\n repository = repository,\n commit = commit,\n commit_pin = commit_pin,\n sha256 = sha256,\n build_file = build_file,\n patches = patches,\n extra_strip_prefix = extra_strip_prefix,\n mirrors = mirrors,\n **kwargs\n )",
"def build_bitbucket_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://bitbucket.org/{namespace}/{name}\".format(\n namespace=namespace, name=name\n )\n if version:\n url = \"{url}/src/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url",
"def patch(text):\n if match := re.search(r'\\d+__\\d+__\\d+', text):\n tag = match.group(0)\n if tag not in ocds_tags:\n if ocds_version or not use_development_version:\n text = text.replace(tag, ocds_tag)\n else:\n text = text.replace(ocds_schema_base_url + tag, development_base_url)\n return text",
"def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\""
] | [
"0.59759855",
"0.58624494",
"0.5857148",
"0.5793736",
"0.57574517",
"0.5607502",
"0.55679005",
"0.55396855",
"0.54959273",
"0.5490272",
"0.54473674",
"0.54330915",
"0.5402495",
"0.53789747",
"0.5333615",
"0.52597594",
"0.52568436",
"0.5239093",
"0.52205473",
"0.5201553",
"0.51674914",
"0.5165408",
"0.51570565",
"0.5129165",
"0.51120514",
"0.5111826",
"0.50963384",
"0.50760514",
"0.50524",
"0.50522536"
] | 0.66242903 | 0 |
Compute the urls from which an archive of the provided GitHub repository and commit may be downloaded. | def _urls(*, repository, commit, mirrors):
result_with_nulls = [
_format_url(
pattern = x,
repository = repository,
commit = commit,
)
for x in mirrors.get("github")
]
return [
url
for url in result_with_nulls
if url != None
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True",
"def github_download_and_extract(\n repository_ctx,\n repository,\n commit,\n mirrors,\n output = \"\",\n sha256 = \"0\" * 64,\n extra_strip_prefix = \"\",\n commit_pin = None):\n urls = _urls(\n repository = repository,\n commit = commit,\n mirrors = mirrors,\n )\n\n repository_ctx.download_and_extract(\n urls,\n output = output,\n sha256 = _sha256(sha256),\n type = \"tar.gz\",\n stripPrefix = _strip_prefix(repository, commit, extra_strip_prefix),\n )\n\n # Create a summary file for Drake maintainers.\n generate_repository_metadata(\n repository_ctx,\n repository_rule_type = \"github\",\n repository = repository,\n commit = commit,\n version_pin = commit_pin,\n sha256 = sha256,\n urls = urls,\n )",
"def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment",
"def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)",
"def get_archive_urls(self):\n\n return ['https://zlib.net/']",
"def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):\n\n if github_token:\n github_release._github_token_cli_arg = github_token\n\n if not os.path.isdir(download_dir):\n os.mkdir(download_dir)\n\n hashalgo_dir = os.path.join(root_dir, hashalgo)\n if not os.path.isdir(hashalgo_dir):\n os.mkdir(hashalgo_dir)\n\n hashalgo_csv = download_fileindex_csv(\n repo_name, hashalgo_dir, hashalgo, github_token\n )\n fileindex = read_fileindex_csv(hashalgo_csv)\n\n logging.debug(hashalgo + \": downloading release assets\")\n # Find out which filenames are present in multiple versions (need to give them unique names)\n filenames = [checksum_filename[1] for checksum_filename in fileindex]\n from collections import Counter\n\n # Sort based on filename and filedate\n fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))\n\n filenames_counter = Counter(filenames)\n # download saves files to current working directory, so we need to temporarily\n # change working dir to hashalgo_dir folder\n with cd(hashalgo_dir):\n fileindex_with_local_filename = []\n for fileindex_item in fileindex:\n checksum = fileindex_item[COLUMN_CHECKSUM]\n filename = fileindex_item[COLUMN_FILENAME]\n filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else \"\"\n filepath = os.path.join(hashalgo_dir, checksum)\n if not os.path.isfile(filepath):\n if not github_release.gh_asset_download(repo_name, hashalgo, checksum):\n logging.error(\n hashalgo\n + \": failed to download \"\n + filename\n + \" (\"\n + checksum\n + \")\"\n )\n continue\n logging.debug(\n hashalgo + \": downloaded \" + filename + \" (\" + checksum + \")\"\n )\n\n # determine local filename\n if filenames_counter[filename] == 1:\n # unique filename\n local_filename = filename\n else:\n # multiple versions of the filename with different content\n # add checksum as suffix to distinguish them\n local_filename = filename + \".\" + checksum\n local_filepath = os.path.join(download_dir, local_filename)\n\n # set file name and date from index\n copyfile(filepath, local_filepath)\n set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))\n\n # save local fileindex\n fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])\n\n # Create new hashalgo.csv from existing and incoming files\n write_fileindex_csv(hashalgo_csv, fileindex)\n hashalgo_local_md = os.path.join(download_dir, hashalgo + \"_local.md\")\n write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True)",
"def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]",
"def _fetch_base_urls(repository_url):\n repo_config = _url_as_ini_file(repository_url)\n config = configparser.ConfigParser()\n config.read_file(repo_config)\n\n base_urls = list()\n for repo in config.sections():\n base_urls.append((config.get(repo, 'name'),\n config.get(repo, 'baseurl')))\n\n return base_urls",
"def _parse_url(repo_url: str) -> List[str]:\n try:\n return re.findall(r\"github\\.com/([^/]+)/([^\\/?]+)\", repo_url, re.I)[0]\n except IndexError:\n raise AnalyzerError(\"Incorrect repository URL\")",
"def download_from_github(self):\n logging.debug('download_from_github called')\n self.response = requests.get(f'{self.full_url}{self.filename}')\n status_code = self.response.status_code\n if status_code == 200:\n logging.debug('Success response gave status code 200')\n with open(f'{self.out_dir}docs/downloaded/{self.filename}',\n 'wb') as csv_written:\n csv_written.write(self.response.content)\n else:\n logging.debug('Error in requests download'\n 'status_code=%d', status_code)\n sys.exit()\n\n return self.response",
"def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)",
"def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)",
"def download_and_expand(self):\n top_dir_name = None\n if self.git_branch:\n # Download a source by git clone.\n top_dir_name = self._download_and_expand_by_git()\n else:\n # Download a source from the arcihve URL.\n # Downloading the compressed archive is better than \"git clone\",\n # because it is faster.\n # If download failed due to URL not found, try \"git clone\".\n try:\n top_dir_name = self._download_and_expand_from_archive_url()\n except RemoteFileNotFoundError:\n Log.info('Try to download by git clone.')\n top_dir_name = self._download_and_expand_by_git()\n return top_dir_name",
"def parse_urls(data):\n testing = [0] * len(data[\"Commit_URL\"])\n build = [0] * len(data[\"Commit_URL\"])\n maintenance = [0] * len(data[\"Commit_URL\"])\n for ii in range(len(data[\"Commit_URL\"])):\n try:\n html = urlopen(data[\"Commit_URL\"].iloc[ii])\n bsObj = BeautifulSoup(html, \"html.parser\")\n paths = bsObj.findAll(\"a\", {\"href\": re.compile(r\"#diff-[a-z0-9]+\")})\n for path in paths:\n if len(path.attrs) == 1:\n if re.match(r\".*(build|pom).*\", str(path)):\n build[ii] = 1\n if re.match(r\".*(test|tests|tester).*\", str(path)):\n testing[ii] = 1\n if re.match(r\".*(u|U)til.*\", str(path)) or re.match(r\".*(h|H)elper.*\", str(path)):\n maintenance[ii] = 1\n except HTTPError as e:\n print(data[\"Commit_ID\"].iloc[ii])\n except URLError as e:\n print(\"The server could not be found!\")\n data[\"Testing\"] = testing\n data[\"Build\"] = build\n data[\"Maintenance\"] = maintenance\n return data",
"def download_files(valid_links: list) -> list:\n print('Starting process...')\n print('')\n\n year_month_filepath = []\n\n for link_info in valid_links:\n\n # Get file extension\n extension = link_info[0].split('.')[-1]\n\n # Link to download\n link_to_download = link_info[0]\n\n # Get month\n month = link_info[1]\n\n # Get year\n year = link_info[2]\n\n # Create a standard filename to save\n file_name = f'{year}-{month}.{extension}'\n\n print(f'Downloading... {link_to_download} Saving... {file_name}')\n\n # Create a link to save into ./file directory\n link_to_save = f'./file/{file_name}'\n\n # Download file and save it\n wget.download(link_to_download, out=link_to_save)\n\n\n # Special treatment to zip and xlsx file\n if extension == 'zip':\n\n # Get right link to save (.csv) from zip function\n link_to_save = get_file_into_zip(link_to_save)\n\n elif extension == 'xlsx':\n # Get right link to save (.csv) from xlsx function\n link_to_save = excel2csv(link_to_save)\n\n # Include the tuple into a list\n year_month_filepath.append((year, month, link_to_save))\n\n print('Finishing process...')\n\n return year_month_filepath",
"def download_artifacts(token, artifacts):\n zipfiles = []\n for a in artifacts:\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n datename = a[\"name\"]+updated_at.strftime(\"-%Y-%m-%d\")\n filename = datename + \".zip\"\n if os.path.exists(filename):\n zipfiles.append((a[\"name\"], datename, filename))\n print(f\"{filename} skipped, already downloaded\")\n continue\n\n print(f\"Fetching {filename}\")\n ok = run_curl(token, a[\"archive_download_url\"], filename)\n if not ok:\n continue\n\n zipfiles.append((a[\"name\"], datename, filename))\n\n return zipfiles",
"def getURLs():",
"def fetch_all_snapshots(archive_dir, wayback_filename, target_url):\n # Read the list of snapshots.\n with open(wayback_filename) as f:\n data = f.read()\n\n url_template = \"http://web.archive.org/web/{timestamp}/{target_url}\"\n snapshots = data.split(\"\\n\")\n pages_downloaded = 0\n pages_failed = 0\n pages_skipped = 0\n for snapshot in snapshots:\n fields = snapshot.split()\n if len(fields) < 1:\n print(\"Bad fields. End of data?\")\n break\n date_string = fields[1]\n assert 14 == len(date_string)\n ymd = date_string[:8]\n year = int(date_string[:4])\n month = int(date_string[4:6])\n day = int(date_string[6:8])\n assert 1900 < year < 2100 and 1 <= month <= 12 and 1 <= day <=31\n date_of_fire = datetime.date(year,month, day)\n filename = F\"firedata_{year}_{month:02}_{day:02}.html\"\n path = os.path.join(archive_dir, filename)\n if os.path.exists(path):\n print(\"Not replacing \", path)\n pages_skipped += 1\n continue\n else:\n print(\"Downloading for \", path)\n url = url_template.format(timestamp=date_string, target_url=target_url)\n print(url)\n\n page = fetch(url)\n if page is None:\n print(\"Fetching above url failed.\")\n pages_failed +=1\n continue\n\n pages_downloaded += 1\n with open(path, \"wb\") as f:\n f.write(page)\n print(\"Page saved\")\n sleep(2)\n return pages_downloaded, pages_failed, pages_skipped",
"def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url",
"def get_track_urls(year):\r\n # assert int(year) >= 2023, f\"only support year >= 2023, but get {year}!!!\"\r\n project_root_folder = os.path.abspath(\r\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\r\n dat_file_pathname = os.path.join(\r\n project_root_folder, 'urls', f'track_archive_url_AAAI_{year}.dat'\r\n )\r\n proceeding_th_dict = {\r\n 1980: 1,\r\n 1902: 2,\r\n 1983: 3,\r\n 1984: 4,\r\n 1986: 5,\r\n 1987: 6,\r\n 1988: 7,\r\n 1990: 8,\r\n 1991: 9,\r\n 1992: 10,\r\n 1993: 11,\r\n 1994: 12,\r\n 1996: 13,\r\n 1997: 14,\r\n 1998: 15,\r\n 1999: 16,\r\n 2000: 17,\r\n 2002: 18,\r\n 2004: 19,\r\n 2005: 20,\r\n 2006: 21,\r\n 2007: 22,\r\n 2008: 23\r\n }\r\n if year >= 2023:\r\n base_url = r'https://ojs.aaai.org/index.php/AAAI/issue/archive'\r\n headers = {\r\n 'User-Agent': user_agents[-1],\r\n 'Host': 'ojs.aaai.org',\r\n 'Referer': \"https://ojs.aaai.org\",\r\n 'GET': base_url\r\n }\r\n if os.path.exists(dat_file_pathname):\r\n with open(dat_file_pathname, 'rb') as f:\r\n content = pickle.load(f)\r\n else:\r\n req = urllib.request.Request(url=base_url, headers=headers)\r\n content = urllib.request.urlopen(req).read()\r\n # content = open(f'..\\\\AAAI_{year}.html', 'rb').read()\r\n with open(dat_file_pathname, 'wb') as f:\r\n pickle.dump(content, f)\r\n soup = BeautifulSoup(content, 'html5lib')\r\n tracks = soup.find('ul', {'class': 'issues_archive'}).find_all('li')\r\n track_urls = dict()\r\n for tr in tracks:\r\n h2 = tr.find('h2')\r\n this_track = slugify(h2.a.text)\r\n if this_track.startswith(f'aaai-{year-2000}'):\r\n this_track += slugify(h2.div.text) + '-' + this_track\r\n this_url = h2.a.get('href')\r\n track_urls[this_track] = this_url\r\n print(f'find track: {this_track}({this_url})')\r\n else:\r\n if year >= 2010:\r\n proceeding_th = year - 1986\r\n elif year in proceeding_th_dict:\r\n proceeding_th = proceeding_th_dict[year]\r\n else:\r\n print(f'ERROR: AAAI proceeding was not held in year {year}!!!')\r\n return\r\n\r\n base_url = f'https://aaai.org/proceeding/aaai-{proceeding_th:02d}-{year}/'\r\n headers = {\r\n 'User-Agent': user_agents[-1],\r\n 'Host': 'aaai.org',\r\n 'Referer': \"https://aaai.org\",\r\n 'GET': base_url\r\n }\r\n if os.path.exists(dat_file_pathname):\r\n with open(dat_file_pathname, 'rb') as f:\r\n content = pickle.load(f)\r\n else:\r\n req = urllib.request.Request(url=base_url, headers=headers)\r\n content = urllib.request.urlopen(req).read()\r\n # content = open(f'..\\\\AAAI_{year}.html', 'rb').read()\r\n with open(dat_file_pathname, 'wb') as f:\r\n pickle.dump(content, f)\r\n soup = BeautifulSoup(content, 'html5lib')\r\n tracks = soup.find('main', {'class': 'content'}).find_all('li')\r\n track_urls = dict()\r\n for tr in tracks:\r\n this_track = slugify(tr.a.text)\r\n this_url = tr.a.get('href')\r\n track_urls[this_track] = this_url\r\n print(f'find track: {this_track}({this_url})')\r\n return track_urls",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def _get_github_fetcher(\n parsed_url: ParsedUrl, output_dir: str, spec: Optional[str] = None\n) -> WorkflowFetcherBase:\n # There are four different GitHub URLs we are interested in:\n # 1. URL to a repository: /<user>/<repo>\n # 2. Git URL: /<user>/<repo>.git\n # 3. URL to a branch/commit/tag: /<user>/<repo>/tree/<git_ref>\n # 4. URL to a zip snapshot: /<user>/<repo>/archive/.../<git_ref>.zip\n components = _match_url(\n parsed_url,\n [\n \"/<username>/<repository>/\",\n \"/<username>/<repository>.git/\",\n \"/<username>/<repository>/tree/<path:git_ref>\",\n \"/<username>/<repository>/archive/<path:zip_path>\",\n ],\n )\n\n username = components[\"username\"]\n repository = components[\"repository\"]\n git_ref = components.get(\"git_ref\")\n zip_path = components.get(\"zip_path\")\n\n if zip_path:\n # The name of the zip file is the git commit/branch/tag\n git_ref = parsed_url.basename_without_extension\n workflow_name = f\"{repository}-{git_ref}\"\n return WorkflowFetcherZip(parsed_url, output_dir, spec, workflow_name)\n else:\n repository_url = ParsedUrl(f\"https://github.com/{username}/{repository}.git\")\n return WorkflowFetcherGit(repository_url, output_dir, git_ref, spec)",
"def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url",
"def _fetch_srcs(opts, cache_dir, revision, desc=None, refspecs=None):\n\n git_dir = '--git-dir=' + cache_dir\n\n if not desc:\n desc = 'repository: {}'.format(opts.name)\n\n log('fetching most recent sources')\n prepared_fetch_cmd = [\n git_dir,\n 'fetch',\n '--progress',\n '--prune',\n 'origin',\n ]\n\n # limit fetch depth\n target_depth = 1\n if opts._git_depth is not None:\n target_depth = opts._git_depth\n limited_fetch = (target_depth and 'releng.git.no_depth' not in opts._quirks)\n\n depth_cmds = [\n '--depth',\n str(target_depth),\n ]\n\n # if a revision is provided, first attempt to do a revision-specific fetch\n quick_fetch = 'releng.git.no_quick_fetch' not in opts._quirks\n if revision and quick_fetch:\n ls_cmd = [\n 'ls-remote',\n '--exit-code',\n 'origin',\n ]\n debug('checking if tag exists on remote')\n if GIT.execute(ls_cmd + ['--tags', 'refs/tags/{}'.format(revision)],\n cwd=cache_dir, quiet=True):\n debug('attempting a tag reference fetch operation')\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append('+refs/tags/{0}:refs/tags/{0}'.format(revision))\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if GIT.execute(fetch_cmd, cwd=cache_dir):\n debug('found the reference')\n return True\n\n debug('checking if reference exists on remote')\n if GIT.execute(ls_cmd + ['--heads', 'refs/heads/{}'.format(revision)],\n cwd=cache_dir, quiet=True):\n debug('attempting a head reference fetch operation')\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append(\n '+refs/heads/{0}:refs/remotes/origin/{0}'.format(revision))\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if GIT.execute(fetch_cmd, cwd=cache_dir):\n debug('found the reference')\n return True\n\n # fetch standard (and configured) refspecs\n std_refspecs = [\n '+refs/heads/*:refs/remotes/origin/*',\n '+refs/tags/*:refs/tags/*',\n ]\n prepared_fetch_cmd.extend(std_refspecs)\n\n # allow fetching addition references if configured (e.g. pull requests)\n if refspecs:\n for ref in refspecs:\n prepared_fetch_cmd.append(\n '+refs/{0}:refs/remotes/origin/{0}'.format(ref))\n\n fetch_cmd = list(prepared_fetch_cmd)\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if not GIT.execute(fetch_cmd, cwd=cache_dir):\n err('unable to fetch branches/tags from remote repository')\n return False\n\n if revision:\n verbose('verifying target revision exists')\n exists_state = revision_exists(git_dir, revision)\n if exists_state in REVISION_EXISTS:\n pass\n elif (exists_state == GitExistsType.MISSING_HASH and\n limited_fetch and opts._git_depth is None):\n warn('failed to find hash on depth-limited fetch; fetching all...')\n\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append('--unshallow')\n\n if not GIT.execute(fetch_cmd, cwd=cache_dir):\n err('unable to unshallow fetch state')\n return False\n\n if revision_exists(git_dir, revision) not in REVISION_EXISTS:\n err('unable to find matching revision in {}\\n'\n ' (revision: {})', desc, revision)\n return False\n else:\n err('unable to find matching revision in {}\\n'\n 'revision: {})', desc, revision)\n return False\n\n return True",
"def download(\n urls,\n output_dir,\n *,\n existing=\"error\",\n jobs=6,\n develop_debug=False,\n authenticate=False, # Seems to work just fine for public stuff\n recursive=True,\n):\n urls = flattened([urls])\n if len(urls) > 1:\n raise NotImplementedError(\"multiple URLs not supported\")\n if not urls:\n # if no paths provided etc, we will download dandiset path\n # we are at, BUT since we are not git -- we do not even know\n # on which instance it exists! Thus ATM we would do nothing but crash\n raise NotImplementedError(\"No URLs were provided. Cannot download anything\")\n url = urls[0]\n girder_server_url, asset_type, asset_id = parse_dandi_url(url)\n\n # We could later try to \"dandi_authenticate\" if run into permission issues.\n # May be it could be not just boolean but the \"id\" to be used?\n client = girder.get_client(\n girder_server_url,\n authenticate=authenticate,\n progressbars=True, # TODO: redo all this\n )\n\n lgr.info(f\"Downloading {asset_type} with id {asset_id} from {girder_server_url}\")\n\n # there might be multiple asset_ids, e.g. if multiple files were selected etc,\n # so we will traverse all of them\n files = flatten(\n _get_asset_files(\n asset_id_, asset_type, output_dir, client, authenticate, existing, recursive\n )\n for asset_id_ in set(flattened([asset_id]))\n )\n\n Parallel(n_jobs=jobs, backend=\"threading\")(\n delayed(client.download_file)(\n file[\"id\"],\n op.join(output_dir, file[\"path\"]),\n existing=existing,\n attrs=file[\"attrs\"],\n # TODO: make it less \"fluid\" to not breed a bug where we stop verifying\n # for e.g. digests move\n digests={\n d: file.get(\"metadata\")[d]\n for d in metadata_digests\n if d in file.get(\"metadata\", {})\n },\n )\n for file in files\n )",
"def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))",
"def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes <to branch> on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"<%(branch)s> %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"",
"def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url",
"def test_download_file(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n assets_calculated_sha = 'notasha'\n sha_dict = {}\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == os.path.basename(TEST_FILENAME):\n\n # the uploaded asset\n request = requests.get(check_asset.browser_download_url)\n open(TEST_DOWNLOAD, 'wb').write(request.content)\n\n # recalc hash of downloaded file\n assets_calculated_sha = Arguments.get_hash(TEST_DOWNLOAD)\n\n elif check_asset.name == sha_filename:\n\n # the sha hash file\n request = requests.get(check_asset.browser_download_url)\n sha_dict = request.json()\n\n assert assets_calculated_sha == sha_dict[os.path.basename(TEST_FILENAME)]",
"def get_links(match_set, sha_validation=validate_sha_github):\n links = []\n for ticket in match_set.tickets:\n links.append(ticket_url % ticket)\n for PR in match_set.github_PRs:\n links.append(github_PR_url % PR)\n\n # validate github changeset SHA's\n for c in match_set.github_changesets:\n if sha_validation and sha_validation(c):\n links.append(github_changeset_url % c)\n\n return links"
] | [
"0.6456689",
"0.6333289",
"0.6226612",
"0.6225718",
"0.6178328",
"0.617227",
"0.60984576",
"0.6067744",
"0.598783",
"0.5919076",
"0.58724535",
"0.5857349",
"0.5800136",
"0.5778897",
"0.5738447",
"0.5725445",
"0.5716222",
"0.5699203",
"0.56966025",
"0.56940144",
"0.56504464",
"0.5633359",
"0.56117153",
"0.55871624",
"0.5570266",
"0.554827",
"0.55157274",
"0.5493995",
"0.5491159",
"0.54731846"
] | 0.68042713 | 0 |
Handle requests to /appointments route Retrieve & render all appointments in the db | def read_appointments():
if current_user.is_admin is False:
appointments = Appointment.query.filter_by(user_id=current_user.id).all()
else:
appointments = Appointment.query.all()
return render_template('appointments/index.html.j2', appointments=appointments, title='appointments') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def appointment_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)",
"def appointments(request):\n now = timezone.localtime(timezone.now())\n data = {}\n tables = {}\n rows = []\n seen = Appointment.objects.filter(seen_time__isnull=False).filter(\n checkin_date__iexact=now.date())\n # Today's COMPLETE patients\n complete = seen.filter(finish_time__isnull=False)\n for a in complete:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['Completed'] = rows\n rows = []\n # Today's IN_SESSION patients\n in_session = seen.filter(finish_time__isnull=True)\n for a in in_session:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['In Session'] = rows\n data['tables'] = tables\n return render(request, 'doctor/appointments.html', data)",
"def appointments(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #now get the doctors appointments with the most recent first.\n appointments = get_appointments_list(doctor.doctor_appointments.all().order_by(\"-date\"))\n\n return JsonResponse({\n \"appointments\": appointments\n })",
"def appointments(self, request, **kwargs):\n\n appointments = AppointmentSelector.user_appointments(request.user)\n return Response(AppointmentSerializer(appointments, many=True).data)",
"def view_appointments(request):\n\n appointments = Appointment.objects.all().order_by('date')\n\n if request.user.userprofile.is_patient():\n appointments = Appointment.objects.filter(patient=request.user.id).order_by('date')\n\n elif request.user.userprofile.is_doctor():\n appointments = Appointment.objects.filter(doctor=request.user.id).order_by('date')\n\n return render(request, 'view_appointments.html', {'appointments': appointments,\n 'the_user': request.user})",
"def completed_appointments(request):\n appointments = AppointmentRequests.objects.all().filter(completed=True)\n return render(request,\"completed_appointments.html\",{\"appointments\":appointments})",
"def dashboard(request):\n appointments = AppointmentRequests.objects.all().filter(completed=False)\n return render(request,\"dashboard.html\",{\"appointments\":appointments})",
"def get_appointments(self):\n current_date = timezone.now().date()\n filled_slots = self.slots.filter(\n appointment__isnull=False,\n date__gte=current_date\n )\n appointment_ids = filled_slots.values_list('appointment', flat=True)\n qs = Appointment.objects.filter(id__in=appointment_ids)\n return qs",
"def base_dashboard(request):\n appointments = None\n\n if request.user.userprofile.is_patient():\n appointments = Appointment.objects.filter(patient=request.user.id).order_by('date')\n elif request.user.userprofile.is_doctor():\n appointments = Appointment.objects.filter(doctor=request.user.id).order_by('date')\n else:\n appointments = Appointment.objects.all().order_by('date')\n\n return render(request, 'base_dashboard.html', {'appointments': appointments, 'the_user': request.user})",
"def calendar_view(request, calendar_id):\n calendar_obj = Calendar.objects.get(pk=calendar_id)\n try:\n appointments = Appointment.objects.all().filter(calendar=calendar_obj)\n appointments = jsonify(appointments)\n except:\n appointments = []\n calendar_obj = calendar_obj.serialize()\n calendar_obj[\"non_working_days\"] = [day for day in [0, 1, 2, 3, 4, 5, 6] if day not in calendar_obj[\"working_days\"]]\n return render(request, 'calendar_view.html', {'calendar_obj': calendar_obj, 'appointments': appointments})",
"def apt_list(cal, c_id, start, end):\n\n # Get the appointments returning it as list of dictionaries\n appointments_result = cal.events().list(\n calendarId=c_id,\n timeMin=start,\n timeMax=end,\n singleEvents=True,\n orderBy='startTime'\n ).execute()\n appointments = appointments_result.get('items', [])\n return appointments",
"def complete_appointment(request, calendar_id):\n calendar = Calendar.objects.get(pk=calendar_id)\n return render(request, 'complete_appointment.html', {'calendar': calendar})",
"def index():\n # return render_template('index.html', events=get_calendar_events_today(CALENDAR_URL))\n return render_template('index.html', events=get_calendar_events_limit(CALENDAR_URL), events_sorted=True)",
"def create_patient_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response = requests.post(server_url + 'patient/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n\n response = response.json()\n\n if response.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('patients/appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('patients/dashboard.html')",
"def touragenda(request):\n active_events = TourAgendaModel.objects.order_by('number')\n friday_events = TourAgendaModel.objects.all().filter(day='FRIDAY')\n saturday_events = TourAgendaModel.objects.all().filter(day='SATURDAY')\n sunday_events = TourAgendaModel.objects.all().filter(day='SUNDAY')\n\n context = {\n 'active_events': active_events,\n 'friday_events': friday_events,\n 'saturday_events': saturday_events,\n 'sunday_events': sunday_events,\n }\n\n return render(request, 'tourAgenda.html', context=context)",
"def get_made_appointments(iso_datetime):\n appointments = []\n request_d_time = datetime.fromisoformat(iso_datetime)\n request_date = datetime(request_d_time.year,\n request_d_time.month,\n request_d_time.day)\n try:\n query = db.session.query(Appointment).filter(\n Appointment.d_time >= request_date\n ).all()\n appointments = list(map(lambda appointment: appointment.d_time, query))\n except Exception as e:\n app.logger.error(str(e))\n raise\n else:\n return appointments",
"def refresh_checkins(request):\n if request.method == 'GET':\n data = {}\n # today's checked in appointments\n data['appointments'] = get_checked_in_appointments()\n return JsonResponse(data) # JsonResponse; returned to $.ajax",
"def index(request):\n template = 'index.html'\n hospitals = Hospital.objects.all()\n return render_to_response(template, {'hospitals': hospitals}, context_instance=RequestContext(request))",
"def appointment():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )",
"def appointments(resources_slots, from_date, to_date, resources=[], status_all=[], resources_all={}):\n\n query = \"\"\"\n SELECT A.STARTTIME, A.ENDTIME, V.APPOINTMENTTYPEID, V.TYPE, \\\n A.RESOURCEID, APPOINTMENTDATE, S.STATUS, S.APPOINTMENTSTATUSID\n FROM PATIENT P\n JOIN PATIENT_APPOINTMENTS AS A ON P.PATIENTID = A.PATIENTID\n JOIN APPOINTMENTTYPE AS V ON a.APPOINTMENTTYPEID = v.APPOINTMENTTYPEID\n LEFT OUTER JOIN APPOINTMENTSTATUS AS S ON A.APPOINTMENTSTATUSID = S.APPOINTMENTSTATUSID\n left join (PATIENTINSURANCE PAI\n join INSURANCE_TYPE IT on IT.INSURANCE_TYPE_ID=PAI.INSURANCE_TYPEID\n join INSURANCE_COMPANY IC on IC.INSURANCE_COMPANY_ID=PAI.INSURANCE_COMPANY_ID)\n on P.PatientID=PAI.PATIENTID and PAI.INSURANCE_TYPEID=1 and PAI.ACTIVE = 1\n WHERE V.APPOINTMENTTYPEID = A.APPOINTMENTTYPEID AND P.PATIENTID = A.PATIENTID\n AND A.ACTIVE = 1\n \"\"\"\n\n if from_date and to_date:\n query += \" AND APPOINTMENTDATE >= '%s' AND APPOINTMENTDATE <= '%s' \" % (from_date, to_date)\n\n if resources:\n query += \" AND A.RESOURCEID IN (%s)\" % ','.join([str(r) for r in resources])\n\n query += \" ORDER BY A.STARTTIME\"\n results = []\n if not EMRSQLServer.connection():\n return results\n\n rows = EMRSQLServer.execute_query(query)\n\n output = defaultdict(list)\n for row in rows:\n output[row['RESOURCEID']].append(row)\n for item, value in output.items():\n studies = defaultdict(list)\n for i, v in enumerate(output[item]):\n studies_start_date = v['APPOINTMENTDATE'].strftime('%Y-%m-%d')\n studies[item].append({\n 'name': v['TYPE'],\n 'start_time': v['STARTTIME'],\n 'end_time': v['ENDTIME'],\n 'studies_start_date': studies_start_date,\n 'status': v['STATUS'],\n 'APPOINTMENTSTATUSID': v['APPOINTMENTSTATUSID']\n })\n\n studies_by_date = defaultdict(list)\n studies_seen = defaultdict(list)\n for st in studies[item]:\n studies_by_date[st['studies_start_date']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n studies_seen[st['APPOINTMENTSTATUSID']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n\n number_of_confirmed_studies = sum([len(studies_seen[int(i)]) for i in status_all])\n days_taken_for_studies = len(studies_by_date)\n total_slots_for_days = resources_slots[item] * days_taken_for_studies\n utilization = (number_of_confirmed_studies * 100) // total_slots_for_days\n\n if utilization <= 79:\n color_code, text_color = '#d9534f', 'white'\n elif (utilization >= 80) and (utilization <= 89):\n color_code, text_color = '#ffe14b', 'black'\n elif utilization >= 90:\n color_code, text_color = '#3c903d', 'white'\n\n results.append({\n 'ResourceID': item,\n 'ResourceName': resources_all[item],\n 'TotalStudies': len(value),\n 'Studies': studies[item],\n 'studies_by_date': studies_by_date,\n 'utilization': '{0}%'.format(utilization),\n 'scheduled_percentage': '{0}%'.format((len(value) * 100) // total_slots_for_days),\n 'number_of_confirmed_studies': number_of_confirmed_studies,\n 'seen_percentage': '{0}%'.format((number_of_confirmed_studies * 100) // len(value)),\n 'total_slots_in_a_day': total_slots_for_days,\n 'color_code': color_code,\n 'text_color': text_color\n })\n return results",
"def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)",
"def index(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance unavailable!\")\n\n #counting patients who have recovered or are asymptomatic\n asymptomatic_patients = doctor.patients.filter(asymptomatic=True).count()\n\n #call function to calculate doctor age\n age = age_calculation(doctor.user.date_of_birth)\n\n #call method to filter for valid appointments\n appointments = get_appointments_list(doctor.doctor_appointments.all())\n\n context = {\n \"hospital\": doctor.hospital,\n \"doctor\": doctor,\n \"appointments_number\": len(appointments),\n \"patients_number\": doctor.patients.filter(asymptomatic=False).count(),\n \"recovered_number\": asymptomatic_patients,\n \"age\": age \n }\n #return information\n return render(request, \"doctors/index.html\", context)",
"def archives(request):\n template_var = base_template_vals(request)\n try:\n template_var[\"events\"] = Event.objects.all().filter(\n is_approved=True).order_by(\"-event_time\")\n except Event.DoesNotExist:\n raise Http404\n return render_to_response(\"event/event_listview.html\", template_var,\n context_instance=RequestContext(request))",
"def meetings(request):\n meeting_list = Meeting.objects.order_by('held_date')\n context = {'meeting_list': meeting_list}\n return render(request, 'sacms/meetings.html', context)",
"def clerk_create_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response_clerk_create_appointment = requests.post(server_url + 'medical_clerk/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n response_clerk_create_appointment = response_clerk_create_appointment.json()\n\n if response_clerk_create_appointment.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('clerks/clerk_appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('clerks/home.html')",
"def analysis():\n\n response_all_doctors_and_appointments = requests.post(server_url + 'doctor/all_doctors_and_all_appointments')\n doctors_and_appointments = response_all_doctors_and_appointments.json()\n\n return render_template('clerks/analysis.html', doctors_and_appointments=doctors_and_appointments)",
"def clerk_home():\n response_doctors = requests.post(server_url + 'doctor/all_doctors_and_all_appointments', json={})\n doctors = response_doctors.json()\n\n response_get_all_patients = requests.get(server_url + 'patient/get_all_patients')\n patients = response_get_all_patients.json()\n\n return render_template('clerks/home.html', patients=patients, doctors=doctors)",
"def show_events(request):\n event_list = Event.objects.order_by('-date')\n\n event_form = EventForm()\n\n context = {'events': event_list, 'form': event_form}\n return render(request, 'metro_app/events_view.html', context)",
"def look_vacant_offices(request):\n if request.GET:\n if request.GET['datetime_from'] and request.GET['datetime_to']:\n offices = NumberOffice.objects.all()\n reservations = Reservation.objects.all()\n post_from = request.GET['datetime_from']\n post_to = request.GET['datetime_to']\n filteroffice = reservations.all().filter(\n datetime_from__gte=post_from, datetime_to__lte=post_to\n )\n reservednumberoffice = set()\n # set reserved office for corect time\n for i in filteroffice:\n reservednumberoffice.add(i.number_office)\n context = {'offices': offices, \"reservednumberoffice\": reservednumberoffice}\n return render(request, 'coworkings/vacant_offices.html', context)\n else:\n text = 'Enter the correct data or fill in all fields.'\n context = {'text': text}\n return render(request, 'coworkings/look_vacant_offices.html', context)\n else:\n return render(request, 'coworkings/look_vacant_offices.html')",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")"
] | [
"0.74196094",
"0.73475456",
"0.71870583",
"0.6975466",
"0.6908662",
"0.6689468",
"0.643109",
"0.62312776",
"0.6093619",
"0.60288614",
"0.59134096",
"0.5899959",
"0.5825844",
"0.5782626",
"0.57654375",
"0.5696964",
"0.5693876",
"0.5687839",
"0.56864846",
"0.5657706",
"0.56550545",
"0.56285834",
"0.5603306",
"0.5585834",
"0.55696046",
"0.55567724",
"0.55482924",
"0.5544931",
"0.5542222",
"0.5538536"
] | 0.7767089 | 0 |
Handle requests to /appointments route Create & save a new appointment | def create_appointment():
form = AppointmentForm()
if form.validate_on_submit():
appointment = Appointment(
title = form.title.data,
description = form.description.data,
location = form.location.data,
start = form.start.data,
client = form.client.data,
user = current_user
)
try:
db.session.add(appointment)
db.session.commit()
flash('Successfully created the appointment.')
return redirect(url_for('appointment.read_appointments'))
except:
flash('Error creating the appointment')
return render_template('appointments/form.html.j2', form=form, title='Create appointment') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_patient_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response = requests.post(server_url + 'patient/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n\n response = response.json()\n\n if response.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('patients/appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('patients/dashboard.html')",
"def create_appointment_form(request, post):\n # string_date = \"{0}-{1}-{2}\".format(year, month, day)\n # date = datetime.datetime.strptime(string_date, '%Y-%m-%d').date()\n new_appointment = None\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n the_user = request.user\n notes = post.get(\"notes\")\n\n if the_user.userprofile.is_doctor():\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n doctor = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n elif request.user.userprofile.is_patient():\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n patient = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n return new_appointment",
"def clerk_create_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response_clerk_create_appointment = requests.post(server_url + 'medical_clerk/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n response_clerk_create_appointment = response_clerk_create_appointment.json()\n\n if response_clerk_create_appointment.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('clerks/clerk_appointment_failed.html')\n elif response_clerk_create_appointment.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('clerks/clerk_appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('clerks/home.html')",
"def create_appointment(request):\n dates = get_dates()\n users = User.objects.all()\n\n if request.POST:\n new_appointment = create_appointment_form(request, request.POST)\n if new_appointment:\n messages.add_message(request, messages.SUCCESS, 'Your appointment as been created successfully.')\n else:\n messages.add_message(request, messages.ERROR, 'An error occurred. Your appointment could not be created.'\n 'If this error persists, try contacting our service desk at'\n '1-800-RIX-AJAZ')\n return redirect('view_appointments')\n\n return render(request, 'create_appointment.html', {'the_user': request.user,\n 'dates': dates,\n 'users': users,\n 'hours': range(1, 13),\n 'minutes': range(1, 60)})",
"def create_appointment():\n\n msg = render_template('date')\n return question(msg)",
"def save_appointment_details(request, calendar_id):\n def schedule_mail(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_mail, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n def schedule_sms(reminder_date, appointment):\n # Configure our scheduler for reminder\n try:\n trigger = DateTrigger(run_date=reminder_date)\n scheduler.add_job(send_appointment_sms, args=[appointment], trigger=trigger)\n except Exception as exp:\n print(exp)\n \n start_time = request.GET['start_time'][:19]\n end_time = request.GET['end_time'][:19]\n \n start_time = datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%S\")\n end_time=datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%S\")\n \n calendar_obj = Calendar.objects.get(pk=calendar_id)\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n\n # create a form instance and populate it with data from the request:\n form = AppointmentForm(request.POST)\n\n # check whether it's valid and save it\n if form.is_valid():\n # Save appointment details\n \n mobilephone = form.data['mobilephone']\n email = form.data['email']\n first_name = form.data['first_name']\n last_name = form.data['last_name']\n notes = form.data['notes']\n\n appointment = Appointment(start_time=start_time, end_time=end_time, first_name=first_name, \n last_name=last_name, email=email, mobilephone=mobilephone, notes=notes)\n \n appointment.calendar = calendar_obj\n appointment.save()\n\n try:\n send_appointment_mail(appointment) # send appointment details email\n except Exception as exp:\n print(exp)\n \n try:\n send_appointment_sms(appointment) # send appointment details sms\n except Exception as exp:\n print(exp)\n \n # Calculate reminder schedule dates\n reminder1 = start_time - timedelta(hours=2)\n reminder2 = start_time - timedelta(hours=24)\n reminder3 = start_time - timedelta(days=7)\n\n # Schedule mails\n schedule_mail(reminder1, appointment)\n schedule_mail(reminder2, appointment)\n schedule_mail(reminder3, appointment)\n \n # Schedule sms\n schedule_sms(reminder1, appointment)\n schedule_sms(reminder2, appointment)\n schedule_sms(reminder3, appointment)\n \n return redirect(reverse('appointment:complete_appointment', args=[calendar_id]))\n \n # if a GET (or any other method) we'll create a blank form\n else:\n form = AppointmentForm()\n return render(request, 'appointment_form.html', {'form': form, 'start_time': start_time, 'end_time': end_time,\n 'office_location': calendar_obj.office_location})",
"def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')",
"def create_appointments(\n data: AppointmentCreate,\n background_tasks: BackgroundTasks, \n user: User = Depends(deps.get_user),\n db: Session = Depends(deps.get_db),\n rdc: RedisCache = Depends(deps.get_redis)\n) -> Any:\n db_provider = crud_user.get_user_by_id(db, str(data.provider_id))\n if not db_provider:\n raise HTTPException(\n status_code=404, \n detail=\"Cabeleireiro não encontrado\"\n )\n\n current_date = datetime.now()\n compare_date = data.date.replace(tzinfo=None)\n if compare_date < current_date:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marcar agendamento em datas passadas\"\n )\n \n if data.date.hour < 8 or data.date.hour > 17:\n raise HTTPException(\n status_code=400, \n detail=\"Você só pode cria agendamentos entre 8:00 e 17:00\"\n )\n\n if data.provider_id == user.id:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marca agendamento consigo mesmo\"\n )\n\n validate_date = crud_appointment.get_appointment_by_date(db, data.provider_id, data.date)\n if validate_date:\n raise HTTPException(status_code=400, detail=\"Este horario já esta agendado\")\n\n appointment = crud_appointment.create(db, data, user)\n msg = f\"Novo agendamento de {user.name} {user.surname} para o {date.format_date(data.date)}\"\n background_tasks.add_task(crud_notification.create, str(data.provider_id), msg)\n date_time = data.date\n rdc.invalidate_cache(\n f\"providers-appointments:{data.provider_id}:{date_time.year}:{date_time.month}:{date_time.day}\"\n )\n rdc.invalidate_cache(f\"user-appointments:{user.id}\")\n\n return appointment",
"def post(self):\n\n try:\n\n controller = self.controller()\n kwargs = controller.date_time_parser(request.json)\n schema = self.schema(many=False)\n raw_data = controller.create(**kwargs)\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())",
"def post(self):\n\n params = request.get_json()\n\n # application point of contact email\n if not 'contact' in params:\n resp = {'message': 'Request must include \\'contact\\' parameter.'}\n return resp, 400\n\n # application name\n if not 'name' in params:\n resp = {'message': 'Request must include \\'name\\' parameter.'}\n return resp, 400\n\n # have they \"signed\" the honor code?\n # see `HONOR-CODE.md`\n if not 'honorcode' in params:\n resp = {'message': 'Request must include \\'honorcode\\' parameter '\n 'signifying that you will abide by the Olin API '\n 'Honor Code found at '\n 'https://github.com/DakotaNelson/olin-api'\n '/blob/master/HONOR-CODE.md'}\n return resp, 400\n\n # TODO conversion to bool is gross and mistake-prone\n if bool(params['honorcode']) != True:\n # TODO include link to honor code\n resp = {'message': 'You must agree to the Olin API Honor Code'\n ' in order to use the Olin API.'}\n return resp, 400\n\n try:\n desc = params['description']\n except KeyError:\n desc = None\n\n try:\n homepage = params['homepage']\n except KeyError:\n homepage = None\n\n # get the basics saved to the db\n app = Application(contact=params['contact'], name=params['name'])\n\n try:\n app.save()\n except NotUniqueError:\n resp = {\"message\": \"Application with that name and \"\n \"contact email already exists.\"}\n return resp, 400\n except ValidationError:\n resp = {\"message\": \"Application contact must be a properly \"\n \"formatted email address, and application name \"\n \"must be 140 characters or less.\"}\n return resp, 400\n\n # if the app successfully made it to the DB, add in its details\n # (double-save is inefficient but the code is simpler to grok)\n app.desc = desc\n app.homepage = homepage\n\n try:\n app.save()\n except ValidationError:\n resp = {\"message\": \"Application homepage must be a properly \"\n \"formatted URL. (Make sure to include \"\n \"\\'http://\\')\"}\n app.delete()\n return resp, 400\n\n # generate and return application auth token\n token = app.generate_token()\n\n # if creator's email isn't validated, send email with validation token\n if not app.validated:\n validation_token = app.generate_validation_token()\n\n validation_url = api.url_for(ValidateApp,\n token=validation_token,\n _external=True)\n\n # TODO actual email template that isn't terrible\n send_email(params['contact'],\n \"Here's your Olin-API validation token\",\n \"<a href=\\\"{}\\\">Click here</a>\".format(validation_url))\n\n resp = {'message': 'Success! Application will be valid once email '\n 'has been proven.',\n 'token': token,\n 'validated': False}\n else:\n # token is already valid\n resp = {'message': 'Success! Email has already been proven, so '\n 'you\\'re good to go.',\n 'token': token,\n 'validated': True}\n\n return resp, 200",
"def post(self, request):\n data = dict(request.data)\n ser = _CreateScheduleSerializer(data=data)\n if ser.is_valid(raise_exception=False):\n ser.save()\n return send_200(\n data={\"data\": ser.data}, message=\"schedule created/updated successfully\"\n )\n else:\n return send_400(\n status=\"FAILURE\",\n data={\"errors\": ser.errors},\n message=ser.extract_error_msg(),\n )",
"def createAgenda():\n try:\n responseWrapper: ResponseWrapper = connectMongo.createNewAgenda(request.json)\n except ValueError as valueError:\n print(valueError)\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")\n except TypeError as typeError:\n return jsonify(response=400, msg=\"you didn't contain the right\")\n\n if responseWrapper.operationDone:\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n # can't get through here\n return jsonify(response=500, msg=\"Creation Failed\")",
"def update_appointment(request,pk):\n appointment = AppointmentRequests.objects.get(id=pk)\n form = AppointmentUpdate(instance=appointment)\n if request.method == \"POST\":\n form = AppointmentUpdate(request.POST,instance=appointment)\n if form.is_valid():\n form.save()\n return redirect(\"dashboard\")\n else:\n messages.info(request,\"Invalid Data sent, Make sure you provided right data.\")\n return redirect(\"update_appointment\",pk=pk)\n else:\n return render(request,\"update_appointment.html\",{\"form\":form})",
"def appointment_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)",
"def _handle_app(self, _opt, _req_id, _req_body):\n\n resource = None\n app = None\n\n # Validate request.\n if _opt == \"create\":\n app = self.ahandler.validate_for_create(_req_id, _req_body)\n elif _opt == \"update\":\n app = self.ahandler.validate_for_update(_req_id, _req_body)\n elif _opt == \"delete\":\n app = self.ahandler.validate_for_delete(_req_id, _req_body)\n elif _opt == \"confirm\":\n app = self.ahandler.validate_for_confirm(_req_id, _req_body)\n elif _opt == \"rollback\":\n app = self.ahandler.validate_for_rollback(_req_id, _req_body)\n\n if app is None:\n return None\n elif app.status != \"ok\":\n return app\n\n # Check if datacenter is locked.\n # Set the expired time of current lock.\n lock_status = self.lock.is_my_turn(app.datacenter_id)\n if lock_status is None:\n return None\n elif lock_status == \"no\":\n app.status = \"locked\"\n return app\n\n # Load valet rules.\n if self.rh.load_group_rules_from_db() is None:\n return None\n\n if _opt == \"create\":\n # Make placement decisions for newly created servers in stack.\n\n # Load resource (hosts, racks, metadata, groups) from DB.\n if not self.rh.load_resource(_req_body.get(\"datacenter\")):\n return None\n\n resource = self.rh.resource_list[0]\n\n # Sync rsource status with platform (OpenStack Nova).\n if not resource.sync_with_platform():\n self.logger.error(\"fail to sync resource status\")\n app.status = \"fail to sync resource status\"\n return app\n\n app.set_resource(resource)\n\n self.ahandler.set_for_create(app)\n if app is None:\n return None\n elif app.status != \"ok\":\n return app\n\n self.optimizer.place(app)\n if app.status != \"ok\":\n return app\n\n elif _opt == \"update\":\n # TODO(Gueyoung): assume only image update and\n # Valet does not deal with this update.\n\n self.ahandler.set_for_update(app)\n if app is None:\n return None\n elif app.status != \"ok\":\n return app\n\n return app\n\n elif _opt == \"delete\":\n # Mark delete state in stack and servers.\n\n # Load resource (hosts, racks, metadata, groups) from DB\n if not self.rh.load_resource(_req_body.get(\"datacenter\")):\n return None\n\n resource = self.rh.resource_list[0]\n\n # Sync rsource status with platform\n if not resource.sync_with_platform():\n self.logger.error(\"fail to sync resource status\")\n app.status = \"fail to sync resource status\"\n return app\n\n app.set_resource(resource)\n\n self.optimizer.update(app)\n if app.status != \"ok\":\n return app\n\n elif _opt == \"confirm\":\n # Confirm prior create, delete, or update request.\n\n datacenter_info = {\"id\": app.datacenter_id, \"url\": \"none\"}\n\n # Load resource (hosts, racks, metadata, groups) from DB\n # No sync with platform.\n if not self.rh.load_resource(datacenter_info):\n return None\n\n resource = self.rh.resource_list[0]\n\n app.set_resource(resource)\n\n self.optimizer.confirm(app)\n if app.status != \"ok\":\n return app\n\n elif _opt == \"rollback\":\n # Rollback prior create, delete, or update request.\n\n datacenter_info = {\"id\": app.datacenter_id, \"url\": \"none\"}\n\n # Load resource (hosts, racks, metadata, groups) from DB\n # No sync with platform.\n if not self.rh.load_resource(datacenter_info):\n return None\n\n resource = self.rh.resource_list[0]\n\n app.set_resource(resource)\n\n self.optimizer.rollback(app)\n if app.status != \"ok\":\n return app\n\n # Check timeout before store data.\n if self.lock.expired < now():\n app.status = \"timeout\"\n return app\n\n # Store app info into DB.\n if not self.ahandler.store_app(app):\n return None\n self.logger.info(\"requested app(\" + app.app_name + \") is stored\")\n\n # Store resource into DB.\n if not resource.store_resource(opt=_opt, req_id=_req_id):\n return None\n self.logger.info(\"resource status(\" + resource.datacenter_id + \") is stored\")\n\n # TODO(Gueyoung): if timeout happened at this moment,\n # Rollback data change.\n\n return app",
"def edit_appointment(request, id):\n users = User.objects.all()\n appointment = get_object_or_404(Appointment, pk=id)\n if request.POST:\n post = request.POST\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n try:\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n appointment.date = date\n except ValueError:\n pass\n the_user = request.user\n notes = post.get(\"notes\")\n appointment.notes = notes\n\n if the_user.userprofile.is_doctor():\n try:\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n appointment.patient = patient\n except ValueError:\n pass\n\n elif request.user.userprofile.is_patient():\n try:\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n appointment.doctor = doctor\n except ValueError:\n pass\n\n if appointment:\n messages.add_message(request, messages.SUCCESS, 'Your changes have been saved.')\n else:\n messages.add_message(request, messages.ERROR, 'An error occurred. Please contact an admin for assistance.')\n appointment.save()\n return redirect('view_appointments')\n return render(request, 'edit_appointment.html', {'appointment': appointment,\n 'the_user': request.user,\n 'users': users})",
"def post(self, request, pk, format=None):\n serializer = TimeReminderSerializer(data=request.data)\n if (serializer.is_valid()):\n user = self.get_user(pk=pk)\n tr = serializer.save()\n user.time_reminders.add(tr)\n return Response(serializer.data, status.HTTP_201_CREATED)\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)",
"def test_admin_alarm_admin_add(self):\n response = self.client.get(\"/admin/appointment/alarm/add/\")\n self.assertEqual(response.status_code, 200)",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def add_event(request, owner_type, owner_id):\n\n # Like before, get the request's context.\n context = RequestContext(request)\n event_added = False\n\n user = request.user\n # If it's a HTTP POST, we're interested in processing form data.\n if request.method == 'POST':\n verified_obj = verified_calendar(context, owner_type, owner_id, user)\n if not isinstance(verified_obj, HttpResponse):\n calendar, edit_priv = verified_obj\n else:\n return verified_obj\n\n # Attempt to grab information from the raw form information.\n event_form = EventForm(data=request.POST)\n if event_form.is_valid():\n # Save the event's form data to the database.\n event = event_form.save(commit=False)\n event.cal = calendar\n event.creator = user\n\n event.save()\n\n event_added = True\n\n #notify the subscribers\n created_event.send(sender=None, owner_type=owner_type, owner_id=owner_id,\n event=event, user=user)\n\n\n # Invalid form or forms - mistakes or something else?\n # Print problems to the terminal.\n # They'll also be shown to the user.\n else:\n print event_form.errors\n\n # Not a HTTP POST, so we render our form using the EventForm.\n # These forms will be blank, ready for user input.\n else:\n event_form = EventForm()\n\n # Render the template depending on the context.\n return render_to_response(\n 'scheduler/add_event.html', {'event_form': event_form, 'user' : user,\n 'event_added': event_added},\n context)",
"def appointments(request):\n now = timezone.localtime(timezone.now())\n data = {}\n tables = {}\n rows = []\n seen = Appointment.objects.filter(seen_time__isnull=False).filter(\n checkin_date__iexact=now.date())\n # Today's COMPLETE patients\n complete = seen.filter(finish_time__isnull=False)\n for a in complete:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['Completed'] = rows\n rows = []\n # Today's IN_SESSION patients\n in_session = seen.filter(finish_time__isnull=True)\n for a in in_session:\n d = {}\n d['id'] = a.id\n d['name'] = a.first_name + ' ' + a.last_name\n h, m, s = to_hms(get_waiting_time(a, now.time()))\n wait_time = \"\" + str(h) + \":\" + str(m) + \":\" + str(s)\n d['wait_time'] = wait_time\n rows.append(d)\n tables['In Session'] = rows\n data['tables'] = tables\n return render(request, 'doctor/appointments.html', data)",
"def event_process():\n title = request.args.get(\"title\")\n description = request.args.get(\"des\")\n location = request.args.get(\"location\")\n start_date_time = request.args.get(\"start_date_time\")\n end_date_time = request.args.get(\"end_date_time\")\n user_id = session['user']\n sport_id = request.args.get(\"sport\")\n\n event = Event(title = title, description = description,\n location = location,date = start_date_time, time = end_date_time,\n user_id=user_id, sport_id=sport_id)\n\n db.session.add(event)\n db.session.commit()\n return redirect('/')",
"async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})",
"def book(request):\n if request.method == \"POST\": # getting all fields\n first_name = request.POST.get(\"first_name\") \n last_name = request.POST.get(\"last_name\")\n email_address = request.POST.get(\"email_address\")\n phone_code = request.POST.get(\"phone_code\")\n phone_number = request.POST.get(\"phone_number\")\n countries = request.POST.getlist(\"countries\")\n company = request.POST.get(\"company\")\n objective = request.POST.get(\"objective\")\n details = request.POST.get(\"details\")\n print(first_name,last_name,email_address,phone_code,phone_number,countries,company,objective,details)\n # if all fields not None and have value\n if first_name and last_name and email_address and phone_code and phone_number and countries and company and objective and details:\n try: # to check that phone number is not text, try to convert it to integar\n phone_number = int(phone_number)\n except: # if failed to be converted to integar\n messages.info(request,\"Phone number field must be filled with numbers only.\") # display this message for user\n return redirect(\"book\") # reload the page\n mobile_number = phone_code + str(phone_number) # getting complete mobile number as string\n selected_countries = \", \".join(countries) # converting countries list to be saved as string\n print(selected_countries)\n if not AppointmentRequests.objects.filter(phone_number=mobile_number): # if a user tries to request an appointment with new info of mobile number and email address (not already exist in database)\n if not AppointmentRequests.objects.filter(email_address=email_address):\n\n AppointmentRequests.objects.create(first_name=first_name,last_name=last_name,email_address=email_address,phone_number=mobile_number,\n countries=selected_countries,company= company,objective=objective, details=details) # create an appointment\n\n\n # send email to user\n send_mail( \n subject=f\"Service Provider Appointment\",\n message=f\"\"\"\n Dear {first_name} {last_name},\n [+] Your Info provided:\n 1- First name: {first_name}.\n 2- Last name: {last_name}.\n 3- Email address: {email_address}.\n 4- Phone number: {mobile_number}.\n 5- Countries: {selected_countries}.\n 6- Company: {company}.\n 7- Objective: {objective}.\n 8- Details:\n {details}\n \\n\n We will communicate with you as soon as possible.\n \"\"\",\n recipient_list=[email_address,],from_email=\"[email protected]\",fail_silently=False,\n )\n # send email to service provider agent\n send_mail(\n subject=f\"A new requested Appointment by {first_name} {last_name}\",\n message=f\"\"\"\n [+] Info provided:\n 1- First name: {first_name}.\n 2- Last name: {last_name}.\n 3- Email address: {email_address}.\n 4- Phone number: {mobile_number}.\n 5- Countries: {selected_countries}.\n 6- Company: {company}.\n 7- Objective: {objective}.\n 8- Details:\n {details}\n \"\"\",\n recipient_list=[\"[email protected]\",],from_email=\"[email protected]\",fail_silently=False,\n )\n return redirect(\"confirm\")\n\n else:\n messages.info(request,\"You have already sent a request, we will communicate you as soon as possible, we will handle any changes you want (if exist) when contact.\")\n return redirect(\"book\") # reload the page\n\n else: # if user tries to request a new appointment using same mobile number\n messages.info(request,\"You have already sent a request, we will communicate you as soon as possible, we will handle any changes you want (if exist) when contact.\")\n return redirect(\"book\") # reload the page\n \n\n\n else: # if any field is empty or None\n messages.info(request,\"Please, fill empty fields\")\n return redirect(\"book\") # reload the page\n \n return render(request,\"book_appointment.html\")",
"def add_event():\n\n business = request.form.get('bus_name')\n name_evt = request.form.get('name_evt')\n\n start = request.form.get('start')\n end = request.form.get('end')\n description = request.form.get('description')\n\n #TODO might run into service option problems\n # service = request.form.get('service')\n\n #business = get bus_id from session?\n\n # new_evt = crud.create_event(name_evt, start, end, description, service, business)\n\n # return redirect('/')\n \n return render_template('add_evts.html')",
"def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n if is_application_owner(self.request.user, application) and (\n application.questionnaire.status != 'complete'\n ) and app_complete is not None and (\n app_complete.authorized_email is not None\n ) and app_complete.questionnaire.completed_by_candidate and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach authorized email & questionnaire to application\"\"\"\n application.authorized_email = app_complete.authorized_email\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n raise Http404(_(\"No application found matching the query\"))",
"def post(self, request, pk, format=None):\n serializer = LocationReminderSerializer(data=request.data)\n if (serializer.is_valid()):\n user = self.get_user(pk=pk)\n lr = serializer.save()\n user.location_reminders.add(lr)\n return Response(serializer.data, status.HTTP_201_CREATED)\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)",
"def post(self):\n response = \"Null\"\n\n data = request.json\n self.logger.info(\"########## Events API Called\")\n self.logger.info(data)\n\n if data.get('type') == \"INSERT\":\n name = data.get('name')\n date = data.get('event_date')\n event_type = data.get('event_type')\n location = data.get('event_location')\n\n print(name + \" \" + date + \" \" + event_type + \" \" + location + \" \\n\\n\\n\\n\")\n\n if name is None or date is None or event_type is None or location is None:\n response = \"Couldnt perfomr action: Missing data\"\n else:\n response = EVENTS.insert_event(name, date, event_type, location)\n\n return jsonify({\n 'events': response\n }), 201",
"def create(self, request, *args, **kwargs):\n if request.user.role < User.ORGANIZER:\n return Response(\n {'error_message': _('Only Organizer can create events.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'title' not in request.data:\n return Response(\n {'error_message': _('No title provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'city_id' not in request.data:\n return Response(\n {'error_message': _('No city provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'currency_id' not in request.data:\n return Response(\n {'error_message': _('No currency provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'min_price' not in request.data and 'max_price' not in request.data:\n return Response(\n {'error_message': _('Min_price or max_price should be provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'datetimes' not in request.data:\n if 'start_datetime' not in request.data:\n return Response(\n {'error_message': _('No start_datetime provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'end_datetime' not in request.data:\n return Response(\n {'error_message': _('No end_datetime provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n start_datetime = dateutil.parser.parse(request.data.pop('start_datetime'))\n end_datetime = dateutil.parser.parse(request.data.pop('end_datetime'))\n\n start_time = datetime.datetime.strftime(start_datetime, settings.TIME_STRING_FIELD_FORMAT)\n end_time = datetime.datetime.strftime(end_datetime, settings.TIME_STRING_FIELD_FORMAT)\n request.data['datetimes'] = [{\n 'date': date_to_string(date, settings.DATE_STRING_FIELD_FORMAT),\n 'start_time': start_time,\n 'end_time': end_time,\n } for date in daterange(start_datetime.date(), end_datetime.date())]\n else:\n for item in request.data['datetimes']:\n if 'date' not in item or \\\n 'start_time' not in item or \\\n 'end_time' not in item:\n return Response(\n {'error_message': _('Wrong datetimes format provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n try:\n string_to_date(str(item['date']), settings.DATE_STRING_FIELD_FORMAT)\n string_to_time(item['start_time'], settings.TIME_STRING_FIELD_FORMAT)\n string_to_time(item['end_time'], settings.TIME_STRING_FIELD_FORMAT)\n except:\n return Response(\n {'error_message': _('Wrong datetimes format provided.')},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n else:\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )",
"def create_event():\n event = None\n form = CreateEventForm()\n if form.validate_on_submit():\n venue = Venue.query.filter_by(address=form.address.data).first()\n if venue is None: # venue not already in db, need to add it\n venue_data = form.data\n venue_data[\"name\"] = venue_data[\"venue_name\"]\n venue_data[\"state\"] = CreateEventForm.convert_choice_to_value(form.state.data, \"STATES\")\n venue = Venue.create(**venue_data)\n event_type = EventType.query.get(form.event_type.data)\n event_category = EventCategory.query.get(form.category.data)\n start_time = CreateEventForm.convert_choice_to_value(form.start_time.data, \"TIMES\")\n end_time = CreateEventForm.convert_choice_to_value(form.end_time.data, \"TIMES\")\n event = Event(\n title=form.title.data,\n start_datetime=datetime.combine(form.start_date.data, start_time),\n end_datetime=datetime.combine(form.end_date.data, end_time),\n venue=venue,\n event_type=event_type,\n event_category=event_category,\n user=current_user._get_current_object(),\n )\n db.session.commit()\n return redirect(url_for(\"events.event_details\", id=event.id))\n return render_template(\"events/create_event.html\", form=form, event=event)"
] | [
"0.72616637",
"0.684108",
"0.6836346",
"0.66382486",
"0.65843296",
"0.63732463",
"0.62283784",
"0.6220162",
"0.6175483",
"0.60073113",
"0.60027397",
"0.58442044",
"0.5837622",
"0.5805806",
"0.57763404",
"0.5776016",
"0.574254",
"0.56820095",
"0.5660373",
"0.56394625",
"0.5607393",
"0.5603617",
"0.55678064",
"0.5566342",
"0.5563907",
"0.5537104",
"0.55342555",
"0.5524704",
"0.5513104",
"0.549971"
] | 0.73936397 | 0 |
Maximum likelihood labels for some distribution over y's | def label(self, p_y_given_x):
return np.argmax(p_y_given_x, axis=2).T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def NLL(self,y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]\r\n # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]",
"def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def get_y_logl(self, y_list):",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def negative_log_likelihood(self, y):\r\n \r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r",
"def negative_log_likelihood(self, y):\r\n # start-snippet-2\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and\r\n # T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.\r\n\r\n #print \"y.ndim = \",y.ndim\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r",
"def negative_log_likelihood(self, y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def negative_log_likelihood(self, y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\r\n # number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def max_y_arg(self):\n return max((self(0).y,0), (self(1).y,1))[1]",
"def LL(y, yhat):\n\n return -np.sum(norm.logpdf(y, loc=yhat, scale=np.std(y)))",
"def labels(self):\n return self.label(self.p_y_given_x)",
"def loss_maxL(y, tx, w):\n # to avoid problems when 0 in log\n epsilon=0.00000001\n sig=sigmoid(tx.dot(w))\n # calculated probability\n p=y.T.dot(np.log(sig+epsilon)) + (1-y).T.dot(np.log(1-sig+epsilon))\n #divides with number of samples so that learning rate is not dependant on number of samples\n p=p/len(y)\n return np.squeeze(- p)",
"def fit(self, X, y):\n proportions = y.value_counts()/y.value_counts().sum()\n self.labels = proportions.index.values.astype(bool)\n self.guess = np.argmax(proportions)\n self.fitted = True",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def max_log_likelihood(data):\n # Assume data is given as counts\n tot = sum(data)\n return _np.sum([nlogp(n, n / tot) for n in data if n > 0])",
"def _get_max_likelihood_genus(self, words,\n word_posteriors, word_idxs):\n #Argmax prod( p(vi|G) )\n row_idxs = filter(not_none, map(word_idxs.get, words))\n likelihoods = word_posteriors[row_idxs].prod(0)\n # avoid .argmax() to solve tie problem.\n return (likelihoods == likelihoods.max()).nonzero()[0]",
"def y_to_label(self, data, Y):\n pass",
"def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def _calculate_probs_and_entropy_y(self):\n #calculate y probabilities and H(Y)\n #H(Y) = Sum(y € Y)(-P(Y=y) * log(P(Y=y)))\n self.lab_entropy = 0\n s = sum(self.lab_counts.values())\n for label, count in self.lab_counts.items():\n self.lab_probs[label] = count / s\n self.lab_entropy -= self.lab_probs[label] * self.log(self.lab_probs[label])",
"def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass",
"def choice(Y):\n m,n = Y.shape\n max_entropy = - m*math.log(1.0/n)\n log_Y = np.ma.log(Y)\n return - np.sum(Y * log_Y) / max_entropy",
"def lnprob(theta, model, priors, x, y, yerr):\n lp = lnprior(theta, priors)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, model, x, y, yerr)",
"def lnlike(theta, model, x, y, yerr):\n return -np.nansum(0.5 * np.log([2 * np.pi] * len(y)))\\\n -np.nansum(np.log(yerr))\\\n -0.5*np.nansum(((y-model(x, *theta))/yerr)**2)",
"def comp_probs_and_labels(self, x_feats):\n scores = sparse.dot(x_feats, self.W) + self.Wb # [l, d] x [d, m] + [m] => [l, m]\n relation_probs = T.nnet.softmax(scores)\n labels = T.argmax(scores, axis=1) # [l, m] => [l]\n return labels, relation_probs",
"def nr_labels(self):\n return None if self.pY is None else self.Y.shape[1]",
"def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx"
] | [
"0.6788078",
"0.65894145",
"0.6543456",
"0.6543456",
"0.6543217",
"0.6507876",
"0.64932126",
"0.6468587",
"0.6444979",
"0.6444979",
"0.6328523",
"0.6295609",
"0.6202991",
"0.61464185",
"0.61247444",
"0.611593",
"0.6109292",
"0.60936534",
"0.60499567",
"0.60223633",
"0.5948092",
"0.5911787",
"0.59058017",
"0.59014636",
"0.5852467",
"0.5837943",
"0.58378166",
"0.58358777",
"0.5832428",
"0.5830224"
] | 0.710404 | 0 |
Estimate marginals, log p(xi|yj) for each possible type. | def marginal_p(self, xi, thetai):
if self.marginal_description == 'gaussian':
mu, sig = thetai # mu, sig have size m by k
xi = xi.reshape((-1, 1, 1))
return (-(xi - mu)**2 / (2. * sig) - 0.5 * np.log(2 * np.pi * sig)).transpose((1, 0, 2)) # log p(xi|yj)
elif self.marginal_description == 'discrete':
# Discrete data: should be non-negative integers starting at 0: 0,...k. k < 32 because of np.choose limits
logp = [theta[np.newaxis, ...] for theta in thetai] # Size dim_visible by n_hidden by dim_hidden
return np.choose(xi.reshape((-1, 1, 1)), logp).transpose((1, 0, 2))
else:
print('Marginal description "%s" not implemented.' % self.marginal_description)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1",
"def set_marginals(self, bw_method=None):\n \n # Log density\n def kde(sample):\n k = gaussian_kde(np.transpose(sample), bw_method=bw_method)\n return lambda X: k.logpdf(np.array(X))[0]\n \n for para in [\"theta\", \"q\"]:\n for typ in [\"prior\", \"post\"]:\n sample = getattr(self, typ)[para][\"sample\"]\n \n if sample is None:\n getattr(self, typ)[para][\"marginal\"] = [\n None\n for I in util.marg_1_2]\n continue\n \n getattr(self, typ)[para][\"marginal\"] = [\n kde(sample[:, I])\n for I in util.marg_1_2]\n \n if self.hyperpara[0] == 3:\n if self.hyperpara[1] == \"i\":\n qu_diff_dist = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER)\n for i in range(3)]\n qu_dist = [\n qu_diff_dist[0],\n qu_diff_dist[0] + qu_diff_dist[1],\n qu_diff_dist[0] + qu_diff_dist[1] + qu_diff_dist[2]]\n \n self.prior[\"q\"][\"marginal\"][:3] = [\n qu_dist[i].computeLogPDF\n for i in range(3)]\n elif self.hyperpara[1] == \"me\":\n self.prior[\"q\"][\"marginal\"][:3] = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER).computeLogPDF\n for i in range(3)]",
"def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact",
"def calculate_marginals_on_samples(self, theta, Xm, return_ratio=True):\n n_samples, n_visible = Xm.shape\n log_marg_x = np.zeros((self.n_hidden, n_samples, n_visible, self.dim_hidden)) #, dtype=np.float32)\n if n_visible > 1 and self.pool is not None:\n args = zip([self] * len(theta), Xm.T, theta)\n log_marg_x = np.array(self.pool.map(unwrap_f, args)).transpose((1, 2, 0, 3))\n else:\n for i in range(n_visible):\n log_marg_x[:, :, i, :] = self.calculate_p_xi_given_y(Xm[:, i], theta[i])\n if return_ratio: # Return log p(xi|y)/p(xi) instead of log p(xi|y)\n # Again, I use the same p(y) here for each x_i, but for missing variables, p(y) on obs. sample may be different.\n # log_marg_x -= logsumexp(log_marg_x + self.log_p_y.reshape((self.n_hidden, 1, 1, self.dim_hidden)), axis=3)[..., np.newaxis]\n log_marg_x += self.log_p_y.reshape((self.n_hidden, 1, 1, self.dim_hidden))\n if self.pool is not None:\n log_marg_x -= np.array(self.pool.map(logsumexp2, log_marg_x))[..., np.newaxis]\n else:\n log_marg_x -= logsumexp(log_marg_x, axis=3)[..., np.newaxis]\n log_marg_x -= self.log_p_y.reshape((self.n_hidden, 1, 1, self.dim_hidden))\n return log_marg_x",
"def get_marginals(word, model):\n # forward and backward message at once\n char_count, _ = word.shape\n alpha = np.zeros((char_count, model.dimY)) # alphas\n lbeta = np.zeros((char_count, model.dimY)) # log version of betas\n\n first_term = np.dot(word, model.getW(model.labels))\n second_term_a = model._T\n second_term_b = model._T.T\n for i in range(1, char_count):\n sum_term_a = (first_term[i-1] + alpha[i-1]) + second_term_a\n sum_term_b = (first_term[char_count-i] +lbeta[char_count-i]) + second_term_b\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term_a) \n lbeta[char_count-i-1] = np.apply_along_axis(logsumexp_trick, 1, sum_term_b)\n\n marginal_Y = np.zeros((char_count, model.dimY))\n marginal_Y_Y1 = np.zeros((char_count-1, model.dimY, model.dimY)) \n \n for i in range(char_count):\n sum_term = first_term[i] + alpha[i] + lbeta[i]\n log_marginal_y = sum_term - logsumexp_trick(sum_term)\n marginal_Y[i] = np.exp(log_marginal_y)\n # calculate other marginal dist as well\n if i < char_count-1:\n transition = model._T.transpose() # T_{yi, yi+1}\n outer_sum_w = np.add.outer(first_term[i], first_term[i+1]).reshape(model.dimY,model.dimY)\n outer_sum_m = np.add.outer(alpha[i], lbeta[i+1])\n sum_term_all = outer_sum_w + transition + outer_sum_m\n log_marginal_y_y1 = sum_term_all - logsumexp_trick(sum_term_all)\n marginal_Y_Y1[i] = np.exp(log_marginal_y_y1)\n # Got Denominator same as Zx , which is correct\n return marginal_Y, marginal_Y_Y1",
"def _object_func_marginals_c_log(log_params, *args, **kwargs):\n\t return _object_func_marginals_c(numpy.exp(log_params), *args, **kwargs)",
"def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)",
"def _object_func_marginals_log(log_params, *args, **kwargs):\n return _object_func_marginals(numpy.exp(log_params), *args, **kwargs)",
"def marginal_2D(self, wrt_x, wrt_y, figure_name=None):\n res = self.marginalize_wrt_x_y(wrt_x, wrt_y)\n tagx= np.array([tup[0] for tup in res], dtype=np.int16)\n tagy= np.array([tup[1] for tup in res], dtype=np.int16)\n z = np.array([tup[2] for tup in res], dtype=np.float32)\n\n if wrt_x == 'logD':\n x = tagx[:]\n else:\n x = self.convert_tags_to_features(tagx, wrt_x)\n if wrt_y == 'logD':\n y = tagy[:]\n else:\n y = self.convert_tags_to_features(tagy, wrt_y)\n\n nx = ny = 101\n xi = np.linspace(min(x), max(x), nx)\n yi = np.linspace(min(y), max(y), ny)\n xi_ = xi[None,:]\n yi_ = yi[:,None]\n # zi0 = mlab.griddata(x, y, z, xi, yi, interp='linear')\n zi = interpolate.griddata((x, y), z, (xi_, yi_), method='cubic')\n zi[np.isnan(zi)] = 0.0\n max_zi = np.max(zi)\n zi_low = max_zi / 100.0\n ind_zero = zi <= zi_low\n zi[zi <= ind_zero] = 0\n\n if figure_name is None: return (x, y, xi, yi, zi)\n\n # Continue making the figure\n fig, ax = plt.subplots(1, figsize=(4,4), dpi=100, tight_layout=True)\n\n lev = marg_contour_levels\n # c = ax.contour(xi, yi, zi, lev, linestyle='dotted', linewidth=0.5, color='k')\n cf = ax.contourf(xi, yi, zi, lev, zorder=1, cmap=plt.get_cmap('Greys'),\n norm=plt.Normalize(vmin=0, vmax=abs(zi).max())) \n ax.scatter(x, y, marker=',', facecolor='grey', edgecolor='grey', s=1, zorder=2)\n cb = fig.colorbar(cf, ax=ax, shrink=1.00)\n\n ax.set_xlabel(utils.feature_name_in_latex(wrt_x))\n ax.set_ylabel(utils.feature_name_in_latex(wrt_y))\n\n if wrt_x == 'logD': \n ax.xaxis.set_ticks(range(5))\n ax.set_xticklabels(logD_ticks, rotation=45, fontsize='small')\n\n if wrt_y == 'logD': \n ax.yaxis.set_ticks(range(5))\n ax.set_yticklabels(logD_ticks, rotation=45, fontsize='small')\n\n if figure_name is not None:\n plt.savefig(figure_name)\n logger.info('marginal_2D: saved {0}'.format(figure_name))\n plt.close()\n\n return (x, y, xi, yi, zi)",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def marginal(self):\n m = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n m += self.received[fnode]\n return np.exp(normalize(m))",
"def marginalize(counts):\n\t# Sum columns\n\tx_counts = np.sum(counts, axis=0)\n\t# Sum rows\n\ty_counts = np.sum(counts, axis=1)\n\n\tmarginalized = {'theta_1': x_counts, 'theta_2': y_counts}\n\n\treturn marginalized",
"def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)",
"def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()",
"def get_marginals(self, x):\n # get conditional probability\n conditions = self.get_conditional(x)\n\n # multiply conditional probability with pi_{k}\n culmulate = conditions.dot(self._pi)\n return culmulate.flatten()",
"def log_marginal_likelihood_normal_cdf(self):\n #we define the loop for the batchsize\n num_batches = int(np.ceil(self.W.shape[0] / self.batchsize_dim))\n slices=np.array_split(np.arange(0,self.W.shape[0]),num_batches)\n def batch_indices(iter):\n idx = iter \n return slice(slices[idx][0],slices[idx][-1]+1)\n \n batch_slices=[batch_indices(iter) for iter in range(num_batches)]\n #print(batch_slices,num_batches,self.batchsize_dim)\n def innerloop(slices):\n if type(slices)!=list:\n slices=[slices]\n #print(slices)\n ml=[]\n for idx in slices:\n if self.type_y=='affine':\n γp, Γp, _, _ = self.compute_gammas_affine(self.params,self.X,self.W[idx,:],self.Z[idx,:])\n elif self.type_y=='mixed':\n γp, Γp, _, _, _ = self.compute_gammas_mixed(self.params,self.X,self.Y,self.C,self.W[idx,:],self.Z[idx,:])\n #print(y1.shape)\n res = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n ml.append(res)\n return ml\n \n if self.type_y=='affine':\n \n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n #print(results)\n res1=np.sum(results)\n \n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n #print()\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n #print( np.log(res1+1e-300),logres2)\n res= np.log(res1+1e-300)-logres2 \n elif self.type_y=='regression':\n if self.latent_dim>0:\n γp, Γp, γ, Γ = self.compute_gammas_regression(self.params,self.X,self.Y,self.C)\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n #from scipy.stats import multivariate_normal\n try:\n res1 = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n res= np.log(res1+1e-300)-np.log(res2+1e-300)\n except:\n #print(self.params, Γp)\n res=-10.0**300\n else:\n return 0.0\n elif self.type_y=='mixed':\n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n res1=np.sum(results)\n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n res= np.log(res1+1e-300)-logres2\n if np.isnan(res):\n return -10.0**300 \n else:\n return res",
"def obtain_consistent_marginals(self, priv_marginal_config, priv_split_method) -> Marginals:\n\n # generate_all_pub_marginals() generates all the one and two way marginals of the public set which is implemented in DataLoader.py\n if self.data.pub_ref:\n pub_marginals = self.data.generate_all_pub_marginals()\n \n # get_noisy_marginals() is in synthesizer.py\n # which first calls generate_..._by_config(), and computes on priv_data to return marginal_sets, epss\n # (note that 'marginal_key' could be 'priv_all_one_way' or 'priv_all_two_way')\n # later it calls anonymize() which add noises to marginals\n # (what decides noises is 'priv_split_method') \n # priv_split_method[set_key]='lap' or....\n # Step 1: generate noisy marginals\n noisy_marginals = self.get_noisy_marginals(priv_marginal_config, priv_split_method)\n\n # since calculated on noisy marginals\n # we use mean function to estimate the number of synthesized records\n num_synthesize_records = np.mean([np.sum(x.values) for _, x in noisy_marginals.items()]).round().astype(np.int)\n print(\"------------------------> now we get the estimate of records' num by averaging from nosiy marginals:\", num_synthesize_records)\n \n \n \n # the list of all attributes' name(str) except the identifier attribute\n self.attr_list = self.data.obtain_attrs()\n # domain_list is an array recording the count of each attribute's candidate values\n self.domain_list = np.array([len(self.data.encode_schema[att]) for att in self.attr_list])\n \n # map the attribute str to its index in attr_list, for possible use\n # use enumerate to return Tuple(index, element) \n self.attr_index_map = {att: att_i for att_i, att in enumerate(self.attr_list)}\n\n\n # views are wrappers of marginals with additional functions for consistency\n # if there exist public dataset to refer to\n if self.data.pub_ref:\n pub_onehot_view_dict, pub_attr_view_dict = self.construct_views(pub_marginals)\n # Step 2: create some data structures\n noisy_onehot_view_dict, noisy_attr_view_dict = self.construct_views(noisy_marginals)\n \n # all_views is one-hot to view dict, views_dict is attribute to view dict\n # they have different format to satisfy the needs of consistenter and synthesiser\n # to fit in code when we do not have public things to utilize \n if not self.data.pub_ref:\n pub_onehot_view_dict = noisy_onehot_view_dict\n pub_attr_view_dict = noisy_attr_view_dict\n\n self.onehot_view_dict, self.attrs_view_dict = self.normalize_views(\n pub_onehot_view_dict,\n pub_attr_view_dict,\n noisy_attr_view_dict,\n self.attr_index_map,\n num_synthesize_records)\n\n # consist the noisy marginals to submit to some rules\n consistenter = Consistenter(self.onehot_view_dict, self.domain_list)\n consistenter.consist_views()\n\n # consistenter uses unnormalized counts;\n # after consistency, synthesizer uses normalized counts\n for _, view in self.onehot_view_dict.items():\n view.count /= sum(view.count)\n\n return noisy_marginals, num_synthesize_records",
"def marginalDistribution(self, x, variable):\n return self._distribution.marginal(x, variable)",
"def marginal(self, variables):\n indx = self.index(variables)\n mu = self.mu.extract(indx, [-1])\n cov = self.cov.extract(indx, indx)\n marginalized = self.copy()\n marginalized._variables = variables\n marginalized._mean = mu\n marginalized._cov = cov\n return marginalized",
"def _object_func_marginals(params, data_vec, model_func, pts, \n lower_bound=None, upper_bound=None, \n verbose=0, multinom=True, flush_delay=0,\n func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n output_stream=sys.stdout, store_thetas=False):\n \n nmarginals= len(data_vec)\n #print \"nmarginals in marginal_optimization\"\n #print nmarginals\n \n global _counter\n _counter += 1\n\n #if nmarginals <2:\n #\tprint \"error: number of marginals less than two, but optimization function for multiple marginals is used!\"\n # \treturn dadi.Inference._out_of_bounds_val\n\n # Deal with fixed parameters\n params_up = dadi.Inference._project_params_up(params, fixed_params)\n\n # Check our parameter bounds\n if lower_bound is not None:\n for pval,bound in zip(params_up, lower_bound):\n if bound is not None and pval < bound:\n #print \"failure in bounds!, pval<lower_bound\"\n return -dadi.Inference._out_of_bounds_val/ll_scale\n if upper_bound is not None:\n for pval,bound in zip(params_up, upper_bound):\n if bound is not None and pval > bound:\n return -dadi.Inference._out_of_bounds_val/ll_scale\n \n \n all_ns = [data_vec[marg_num].sample_sizes for marg_num in range(nmarginals)]\n #print \"in marginal_optimization, all_ns is\"\n #print all_ns\n \n \n all_args = [params_up, all_ns] + list(func_args)\n # Pass the pts argument via keyword, but don't alter the passed-in \n # func_kwargs\n func_kwargs = func_kwargs.copy()\n func_kwargs['pts'] = pts\n \n #print all_args\n #print func_kwargs\n all_sfs = model_func(*all_args, **func_kwargs)\n #this supposes that the two thetas are equal. This should be verified in the end! \n if multinom:\n\tresult=numpy.sum([ll_multinom(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n else:\n result = numpy.sum([ll(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\n if store_thetas:\n global _theta_store\n dadi.Inference._theta_store[tuple(params)] = numpy.mean([optimal_sfs_scaling(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n \n # Bad result\n if numpy.isnan(result):\n result = dadi.Inference._out_of_bounds_val\n \n if (verbose > 0) and (_counter % verbose == 0):\n param_str = 'array([%s])' % (', '.join(['%- 12g'%v for v in params_up]))\n output_stream.write('%-8i, %-12g, %s%s' % (_counter, result, param_str,\n os.linesep))\n Misc.delayed_flush(delay=flush_delay)\n\n return -result/ll_scale",
"def get_marginal(\n target=None\n ):\n pass",
"def _marginals(*contingency):\n raise NotImplementedError(\"The contingency table is not available\"\n \"in the general ngram case\")",
"def all_marginal_1D(self, figure_name):\n if not self.marginalize_done: self.marginalize()\n results = self.get('marginal_results')\n names = self.get('feature_names')\n n_dim = len(results)\n n_rows = 2\n n_cols = n_dim // n_rows if n_dim % n_rows == 0 else n_dim // n_rows + 1\n\n fig, tup_ax = plt.subplots(n_rows, n_cols, figsize=(6.5, 5), dpi=100, tight_layout=True)\n arr_ax = tup_ax.reshape(-1)\n\n for i_ax in range(n_dim):\n wrt = names[i_ax]\n ax = arr_ax[i_ax]\n _res = results[i_ax]\n x_tag = np.array([_tup[0] for _tup in _res])\n prob = np.array([_tup[1] for _tup in _res])\n\n x_marg= self.convert_tags_to_features(x_tag, wrt) if wrt != 'logD' else x_tag\n\n ax.plot(x_marg, prob, marker='o', linestyle='solid', color='grey', ms=4)\n\n # ax.set_xlabel(utils.feature_name_in_latex(wrt))\n ax.set_xlabel(utils.feature_name_in_layman(name=wrt, short=False))\n\n # Cosmetics\n if wrt == 'logD': \n ax.xaxis.set_ticks(range(5))\n ax.set_xticklabels(logD_ticks, rotation=45, fontsize='small')\n\n if wrt == 'fov': ax.xaxis.set_ticks(np.unique(x_marg)[::2])\n if wrt == 'Z': ax.xaxis.set_ticks(np.unique(x_marg)) \n if wrt == 'Xc': \n ax.xaxis.set_ticks(np.linspace(0, 0.71, 5))\n for item in ax.get_xticklabels(): item.set_rotation(45)\n\n if self.exclude_eta_column:\n ax = arr_ax[-1]\n ax.set_axis_off()\n\n plt.savefig(figure_name)\n logger.info('all_marginal_1D: saved {0}'.format(figure_name))\n plt.close()",
"def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))",
"def log_marginal_likelihood(self, X, W):\n phi_X = self.phi(X, W)\n S_n = phi_X.T @ phi_X + np.eye(self.M)\n mu_n = np.linalg.inv(S_n) @ phi_X.T @ self.Y\n a_n = self.gamma_a0 + self.N / 2\n A = np.diag(self.Y.T @ self.Y)\n C = np.diag(mu_n.T @ S_n @ mu_n)\n b_n = self.gamma_b0 + 0.5 * (A - C)\n\n # Compute Lambda term.\n sign, logdet = np.linalg.slogdet(S_n)\n lambda_term = -0.5 * sign * logdet\n\n # Compute b_n term.\n b_term = self.gamma_a0 * np.log(self.gamma_b0) - a_n * np.log(b_n)\n\n # Compute a_n term.\n gamma_term = gammaln(a_n) - gammaln(self.gamma_a0)\n\n # Compute sum over all y_n.\n return np.sum(gamma_term + b_term + lambda_term)",
"def _object_func_marginals_c(params, data_vec, model_func, pts, \n\t lower_bound=None, upper_bound=None, \n\t verbose=0, multinom=True, flush_delay=0,\n\t func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n\t output_stream=sys.stdout, store_thetas=False,nmarginals=2):\n\t #print \"data vec is\"\n\t #print data_vec.shape\n\t \n\t \n\t global _counter\n\t _counter += 1\n\t\n\t if nmarginals <2 and not warningiIssued:\n\t \tprint \"Warning: number of marginals less than two, but optimization function for multiple marginals is used!\"\n\t \twarningIssued=True\n\t \t#return dadi.Inference._out_of_bounds_val\n\t\n\t # Deal with fixed parameters\n\t params_up = dadi.Inference._project_params_up(params, fixed_params)\n\t\n\t # Check our parameter bounds\n\t if lower_bound is not None:\n\t for pval,bound in zip(params_up, lower_bound):\n\t if bound is not None and pval < bound:\n\t return -dadi.Inference._out_of_bounds_val/ll_scale\n\t if upper_bound is not None:\n\t for pval,bound in zip(params_up, upper_bound):\n\t if bound is not None and pval > bound:\n\t return -dadi.Inference._out_of_bounds_val/ll_scale\n\t \n\t \n\t all_ns = [data_vec[marg_num].sample_sizes for marg_num in range(nmarginals)]\n\t #print \"in marginal_optimization, all_ns is\"\n\t #print all_ns\n\t \n\t \n\t all_args = [params_up, all_ns] + list(func_args)\n\t # Pass the pts argument via keyword, but don't alter the passed-in \n\t # func_kwargs\n\t func_kwargs = func_kwargs.copy()\n\t func_kwargs['pts'] = pts\n\t all_sfs = model_func(*all_args, **func_kwargs)\n\t allcoarse=[coarsen.flatten(coarsen.split(all_sfs[i],coarsenings[i])) for i in range(len(all_sfs))]\n\t #this supposes that the two thetas are equal. This should be verified in the end! \n\t if multinom:\n\t\tresult=numpy.sum([ll_multinom(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t else:\n\t result = numpy.sum([ll(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t\n\t if store_thetas:\n\t global _theta_store\n\t dadi.Inference._theta_store[tuple(params)] = numpy.mean([optimal_sfs_scaling(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t \n\t # Bad result\n\t if numpy.isnan(result):\n\t result = dadi.Inference._out_of_bounds_val\n\t \n\t if (verbose > 0) and (_counter % verbose == 0):\n\t param_str = 'array([%s])' % (', '.join(['%- 12g'%v for v in params_up]))\n\t output_stream.write('%-8i, %-12g, %s%s' % (_counter, result, param_str,\n\t os.linesep))\n\t Misc.delayed_flush(delay=flush_delay)\n\t\n\t return -result/ll_scale",
"def _responsibility_matrix(y, mean, covariance, weight, covariance_type):\n\n precision = _compute_precision_cholesky(covariance, covariance_type)\n weighted_log_prob = np.log(weight) + \\\n _estimate_log_gaussian_prob(y, mean, precision, covariance_type)\n\n log_likelihood = scipy.misc.logsumexp(weighted_log_prob, axis=1)\n with np.errstate(under=\"ignore\"):\n log_responsibility = weighted_log_prob - log_likelihood[:, np.newaxis]\n\n responsibility = np.exp(log_responsibility).T\n \n return (responsibility, log_likelihood)",
"def marginalize_2d(x,y,axobj,*args,**kwargs):\n\n # Get values of various possible kwargs\n bins = kwargs.pop('bins',50)\n levs = kwargs.pop('levs',[1.,2.,3.])\n extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])\n cmap = kwargs.pop('cmap','Greys')\n\n cmap = cm.get_cmap(cmap.capitalize())\n cmap = cmap(np.linspace(0,1,np.asarray(levs).size))\n \n Xbins = np.linspace(extent[0],extent[1],bins+1)\n Ybins = np.linspace(extent[2],extent[3],bins+1)\n\n # Bin up the samples. Will fail if x or y has no dynamic range\n try:\n H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))\n except ValueError: return ValueError(\"One of your columns has no dynamic range... check it.\")\n\n # Generate contour levels, sort probabilities from most to least likely\n V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)\n # Here we slightly smooth the contours to account for the finite number\n # of MCMC samples. can adjust the 0.7 below, but too small is artificial\n # and looks like shit.\n H = scipy.ndimage.filters.gaussian_filter(H,0.2*np.log10(x.size))\n Hflat = H.flatten()\n inds = np.argsort(Hflat)[::-1]\n Hflat = Hflat[inds]\n sm = np.cumsum(Hflat)\n sm /= sm[-1]\n\n # Find the probability levels that encompass each sigma's worth of likelihood\n for i,v0 in enumerate(V):\n try: V[i] = Hflat[sm <= v0][-1]\n except: V[i] = Hflat[0]\n\n V = V[::-1]\n clevs = np.append(V,Hflat.max())\n X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])\n\n if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)\n axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\\\n linestyles=kwargs.get('linestyles','solid'))\n axobj.set_xlim(extent[0],extent[1])\n axobj.set_ylim(extent[2],extent[3])\n \n return axobj",
"def marginalize_pose_test():\n # create a temporary hessian\n hessian = np.arange(400).reshape(20, 20)\n num_poses = 4\n pose_size = 3\n num_lm = 8\n lm_size = 1\n pose_id = 1\n lm_ids = np.array([0, 1, 2])\n mg.marginalize_pose(hessian, num_poses, pose_size, num_lm, lm_size,\n pose_id, lm_ids)",
"def get_marginal(self, var):\n return self.vs[var].marginal()"
] | [
"0.6380683",
"0.63611037",
"0.627736",
"0.61449707",
"0.5994979",
"0.59877026",
"0.59743273",
"0.59641844",
"0.59251755",
"0.5905187",
"0.5860727",
"0.5847705",
"0.58418274",
"0.5805165",
"0.57915455",
"0.5769771",
"0.5734626",
"0.57292646",
"0.57107455",
"0.5693731",
"0.5669806",
"0.5641033",
"0.5621262",
"0.55951667",
"0.5584048",
"0.55738425",
"0.5571854",
"0.5518222",
"0.5450712",
"0.5447152"
] | 0.6595073 | 0 |
Takes a resource and a search term and return a list of countries or a country. | def _get_country_list(cls, resource, term="", filters=None):
# create the filter string
filters_uri_string = ""
if filters:
filter_string = cls.QUERY_SEPARATOR.join(filters)
filters_uri_string = "fields={}".format(filter_string)
# build uri
if term and not resource.endswith("="):
# add the forward slash only when there is a term
# and it is not specifying the value part of a query string
term = "/{}".format(term)
uri = "{}{}{}".format(cls.BASE_URI, resource, term)
if filters:
prefix = "?"
if "?" in uri:
prefix = "&"
uri += "{}{}".format(prefix, filters_uri_string)
response = requests.get(uri)
if response.status_code == 200:
result_list = []
data = json.loads(response.text) # parse json to dict
if type(data) == list:
for (
country_data
) in (
data
): # in case it is a list create python list with country instances
country = Country(country_data)
result_list.append(country)
else:
return Country(data)
return result_list
elif response.status_code == 404:
raise requests.exceptions.InvalidURL
else:
raise requests.exceptions.RequestException | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_places(self, search, country=\"True\", city=\"True\"):\n params = {}\n params[\"query\"] = search\n params[\"includeCities\"] = city\n params[\"includeCountries\"] = country\n placeRequestPath = \"/apiservices/autosuggest/v1.0/\"\n browsePlacesURL = self.rootURL + placeRequestPath + self.originCountry + \"/\" + self.currency + \"/\" + self.locale + \"/\"\n response = self.session.get(browsePlacesURL, params=params)\n resultJSON = json.loads(response.text)\n return resultJSON",
"def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)",
"def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass",
"def search(self, name, country):\n # we only want exact matches, and we only want one possible match.\n return self._api_call('GET', 'search', name_equals=name, country=country, maxRows=1)",
"def search():\n # q is the name of the http parameter\n request.args.get(\"q\")\n\n #check for missing arguments\n if not(request.args.get(\"q\")):\n raise RuntimeError(\"Missing geo!\")\n\n #\"%\":match any number of characters\n q=request.args.get(\"q\") + \"%\"\n\n #retrieve data from database\n rows=db.execute(\"SELECT * from places WHERE postal_code LIKE :pc OR place_name LIKE :city OR admin_name1 LIKE :state\", pc=q,city=q,state=q)\n\n return jsonify(rows)",
"def search():\n\n # Store the 'q' part of the URL as a string called 'q'. Check 'q' loaded, and produce runtime error if not.\n # e.g. '12589'\n q = request.args.get(\"q\")\n if not q:\n raise RuntimeError(\"missing location\")\n\n # Rewrites user input as lowercase\n q = str.lower(q)\n\n # Select the entire row from database 'places' that at least contains the value of 'q' in one of the 'postal_code', 'place_name', or 'admin_name1' fields.\n # e.g. [{'country_code':'US','postal_code':'12589'}]\n q_info = db.execute(\"SELECT * FROM places WHERE postal_code LIKE :q OR LOWER(place_name) LIKE :q OR LOWER(admin_name1) LIKE :q LIMIT 10\", q='%'+q+'%')\n\n # Run 'q_info' dict through 'jsonify()' function to convert some elements to JSON compatible(?)\n return jsonify(q_info)",
"def search(api_key, term, location, categories, offset, price):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': int(params['limit']),\n 'offset': offset,\n 'categories': categories,\n 'price':price\n }\n \n find_locs = request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)\n \n return json_normalize(find_locs['businesses'])",
"def countries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"countries\")",
"def countries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"countries\")",
"def searchTerm(self, resource, search, mode='default'):\n\n search = urlparse_quote(search)\n if mode == 'exact':\n url = self.fullUrl(resource) + '?$filter=Term%20eq%20%27{0}%27'.format(search)\n else:\n url = self.fullUrl(resource) + '?$filter=startswith(Term,%27{0}%27)%20eq%20true'.format(search)\n\n req = self.getRequest(url)\n return req\n #return req.json()",
"def get_countries_by_region(cls, region, filters=None):\n resource = \"/region\"\n return cls._get_country_list(resource, region, filters=filters)",
"def query_api(term, location):\n response = search(term, location)\n\n businesses = response.get('businesses')\n\n if not businesses:\n print 'No businesses for {0} in {1} found.'.format(term, location)\n return\n\n business_id = businesses[0]['id']\n \n print '{0} businesses found, querying business info for the top result \"{1}\" ...'.format(\n len(businesses),\n business_id\n )\n \n response=[]\n for biz in range(len(businesses)):\n response.append(get_business(businesses[biz]['id']))\n #response = get_business(business_id)\n return response",
"def search_by_term():\n body = request.get_json()\n term = body.get('searchTerm', '')\n current_category = None\n\n if term == '':\n abort(422)\n\n questions = Question.query.filter(Question.question.ilike('%'+term+'%')).all()\n formatted_questions = [question.format() for question in questions]\n\n if len(formatted_questions) > 0:\n current_category = formatted_questions[0]['category']\n\n return jsonify({\n 'success': True,\n 'questions': formatted_questions,\n 'total_questions': len(formatted_questions),\n 'current_category': current_category\n })",
"def countries(fixtures: Path) -> List[Dict[str, Any]]:\n raw_patterns = srsly.read_json(fixtures / \"countries.json\")\n fuzzy_patterns = [\n {\n \"label\": \"COUNTRY\",\n \"pattern\": pattern[\"name\"],\n \"type\": \"fuzzy\",\n \"id\": pattern[\"name\"],\n }\n for pattern in raw_patterns\n ]\n return fuzzy_patterns",
"def shortsearch(term,location):\n results = search(term,location)['listings']\n result = []\n for business in results:\n result.append([business['id'],business['name'],\"Yellow Pages\"])\n return result",
"def country(self, *flags: tuple) -> list:\n\n # If the countries are valid, return those countries \n # in capital letters. if not, ignore\n flags = [\n flag.upper() for flag in flags \n if Froxy._is_valid_country(flag)\n ]\n\n # If there are no flags, returns an empty list to not perform a linear search\n if not flags:\n return []\n\n return self._base_proxies_filter(category='country', filters=flags)",
"def search(api_key, term, location):\n\n\n\n url_params = {\n\n 'term': term.replace(' ', '+'),\n\n 'location': location.replace(' ', '+'),\n\n 'limit': SEARCH_LIMIT\n\n }\n\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)",
"def _derive_country_IE(place):\n derived = []\n if _COUNTY_REGEX.search(place.name):\n stripped = _COUNTY_REGEX.sub(\"\", place.name.lower())\n derived += [\"co \" + stripped, \"county \" + stripped]\n\n #\n # Alternative name cases that aren't as straightforward as the above.\n #\n try:\n derived += {\n \"loch garman\": [\"co wexford\"],\n \"uíbh fhailí\": [\"co offaly\"],\n \"maigh eo\": [\"co mayo\"],\n \"an iarmhí\": [\"co westmeath\"],\n }[place.name.lower()]\n except KeyError:\n pass\n\n return [DerivedName(text, \"en\") for text in derived]",
"def search(api_key, term, location):\r\n\r\n url_params = {\r\n 'term': term.replace(' ', '+'),\r\n 'location': location.replace(' ', '+'),\r\n 'limit': SEARCH_LIMIT\r\n }\r\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)",
"def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(data__qg_location__0__country__icontains=self.value())",
"def search(query: str):\n try:\n # Search with user query.\n # TODO: Handle list with multiple data.\n data: List = pycountry.countries.search_fuzzy(query)\n\n # extract alpha2 value\n _, _, alpha_2, _ = utils.extract_fuzzy_country_data(data)\n\n # Get a list of timezone names.\n result = utils.get_timezones(alpha_2)\n\n payload: List = []\n\n # If length is greater than one, show terminal menu.\n if len(result) > 1:\n entry = utils.handle_interaction(result)\n\n payload.append(entry)\n\n return utils.get_local_time(payload)\n except LookupError:\n return console.print(\n \"Couldn't resolve your query, please try other keywords.:x:\"\n )\n\n return utils.get_local_time(result)",
"def lookup(self, term):\n results = []\n lookup_term = term.lower()\n for char, latex, description, user_description in self.entries:\n if (char == term or\n latex.startswith(lookup_term) or\n latex[1:].startswith(lookup_term) or\n lookup_term in description.lower() or\n (user_description and lookup_term in user_description)):\n results.append((char, latex, description, user_description))\n return results",
"def get_countries(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_countries\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result",
"def search(api_key, term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)",
"def search_recipes(request):\n\n string_to_find = request.GET.get(\"term\", None)\n\n if string_to_find is None:\n return HttpResponse(status=400)\n\n matching_recipes = Recipe.objects.filter(title__icontains=string_to_find)\n\n context = {}\n for r in matching_recipes:\n context[r.title] = reverse('recipes:recipe', kwargs={'recipe_slug': r.slug})\n\n return HttpResponse(json.dumps(context), content_type='application/json')",
"def lookfor(term=\"\", field=[], filter=[], method='GET', pretty_print='0'):\n params = {\n 'lookfor': [term],\n 'filter[]': [\n 'building:\"0/AALTO/\"',\n ] + filter,\n 'field[]': field,\n 'prettyPrint': [pretty_print],\n 'lng': ['en-gb']\n }\n \n signal.signal(signal.SIGALRM, timeout_handler)\n # Allow 4 seconds to get a response back from finna api\n signal.alarm(5)\n\n sess = requests.Session()\n sess.headers.update(__headers)\n sess.params.update(params)\n\n r = sess.request(url=__url + 'search', method=method)\n sess.close()\n\n signal.alarm(0)\n\n res = {'status_code': r.status_code, 'json': r.json()}\n return res",
"def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)",
"def search_using_magento_region(cls, region, country):\n subdivisions = cls.search([\n ('name', 'ilike', region),\n ('country', '=', country.id),\n ])\n\n # TODO: Exception need be created if subdivison does not exist.\n\n return subdivisions and subdivisions[0] or None",
"def _perContinentChoiceSelector(self, params):\n\n entity = params['entity']\n choices = soc.models.countries.COUNTRIES_TO_CONTINENT\n\n if 'fields' in params:\n fields = params['fields']\n\n for field in fields:\n entity = entity.__getattribute__(field)\n\n return choices[entity.res_country]",
"def get_doctors(term, location, category=\"\"):\n headers = {\n \"Authorization\": 'Bearer ' + api_key\n }\n\n payload = {\n \"term\": \"doctors\", \n \"location\": location, \n \"sort_by\": \"rating\", \n \"categories\":category\n }\n\n r = requests.get(\n f\"{base_url}businesses/search\",\n headers=headers, \n params=payload\n ).json()\n \n return r[\"businesses\"]"
] | [
"0.60441583",
"0.59755903",
"0.5808493",
"0.57474273",
"0.5598684",
"0.55965686",
"0.5556006",
"0.553263",
"0.553263",
"0.54796916",
"0.53904295",
"0.5386155",
"0.5330523",
"0.5315031",
"0.5309764",
"0.530596",
"0.53020495",
"0.5272663",
"0.52635056",
"0.52504873",
"0.5243132",
"0.52404636",
"0.52389884",
"0.5224738",
"0.52243525",
"0.5222588",
"0.52204967",
"0.5196221",
"0.51952386",
"0.5194666"
] | 0.724008 | 0 |
Returns a `Country` object by alpha code. | def get_country_by_country_code(cls, alpha, filters=None):
resource = "/alpha"
return cls._get_country_list(resource, alpha, filters=filters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def country(alpha_2_code: str) -> None:",
"def get_countries_by_country_codes(cls, codes, filters=None):\n resource = \"/alpha?codes=\"\n codes = cls.QUERY_SEPARATOR.join(codes)\n return cls._get_country_list(resource, codes, filters=filters)",
"def get_country_from_code(country_code):\n country = []\n countries = [(country.alpha_2, country.name) for country in pycountry.countries]\n for country_index in range(len(countries)):\n # index 0 is the country code selected from the form\n if(countries[country_index][0] == country_code):\n country.append(countries[country_index])\n return country[0][1]",
"def get_country_by_country_code(country_code):\n # string is default converter for dynamic routes\n # get_or_404() is like get() but aborts with 404 if not found\n # instead of returning None.\n return jsonify(Country.query.get_or_404(country_code).to_dict())",
"def country_codes(country):\n countryObject = None\n try:\n countryObject = pycountry.countries.search_fuzzy(country)\n return countryObject[0].alpha_2\n except LookupError:\n pass\n try:\n splittedCountry = country.split(',')[0]\n countryObject = pycountry.countries.search_fuzzy(splittedCountry)\n return countryObject[0].alpha_2\n except LookupError:\n return 'No Code'",
"def get_countries_by_calling_code(cls, calling_code, filters=None):\n resource = \"/callingcode\"\n return cls._get_country_list(resource, calling_code, filters=filters)",
"def get_country_code(contry_name):\n for code, name in COUNTRIES.items():\n if name == contry_name:\n return code\n return None",
"def country(name):\n return location_db().find(name=name)[\"country\"]",
"def search_using_magento_code(cls, code):\n countries = cls.search([('code', '=', code)])\n\n if not countries:\n return cls.raise_user_error(\n \"country_not_found\", error_args=(code, )\n )\n\n return countries[0]",
"def country_letter_code(self):\n if \"countryLetterCode\" in self._prop_dict:\n return self._prop_dict[\"countryLetterCode\"]\n else:\n return None",
"def get_country_code(country_name) :\n for code, name in COUNTRIES.items() :\n if name==country_name :\n return code\n # if the country wasn't found, return None\n return None",
"def convert(dictCountry):\n\treturn Country(dictCountry['name'], dictCountry['code'])",
"def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None",
"def country_identifier(name):\n if name.lower() in _country_dict.keys():\n return _country_dict[name.lower()]\n else:\n return name",
"def resolveCountryCode(country_code):\n country_name = None\n if len(country_code) > 2:\n country_name = country_code\n country_code = next((cc for cc, country in countries.items() if country == country_code), None)\n if country_code not in countries:\n logger.error(\"Country code %s unknown. For a list of know codes execute:\")\n logger.error(sys.argv[0] + ' --list-countries \\tList all available countries that can be blocked.')\n sys.exit(255)\n if not country_name:\n country_name = countries[country_code]\n return [country_code, country_name]",
"def get_country_code(country_name):\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n # If country was not found, return nothing\n return None",
"def get_country_code(self):\n\n try:\n sub_div = next(sub_div for sub_div in pycountry.subdivisions if sub_div.name == self.location)\n country = next(country for country in pycountry.countries if country.alpha_2 == sub_div.country_code)\n return country.alpha_3\n except StopIteration as exc:\n print(\"Cannot find subdivision in\" + str(exc))\n return 'XXX'",
"def get_country_code(country_name):\n # values = list(COUNTRIES.values())\n # keys = list(COUNTRIES.keys())\n #\n # try:\n # index = values.index(country_name)\n # except ValueError:\n # # Not found\n # return None\n #\n # return keys[index]\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n return None",
"def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None",
"def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n # if string isn't found returns None\n else:\n continue",
"def by_code(cls, code):\n return dbsession.query(cls).filter_by(_code=code).first()",
"def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf",
"def country():\r\n\r\n cursor.execute('SELECT country_names FROM countries \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def mock_country_code_by_addr(self, ip_addr):\r\n ip_dict = {\r\n '1.0.0.0': 'CU',\r\n '2.0.0.0': 'IR',\r\n '3.0.0.0': 'SY',\r\n '4.0.0.0': 'SD',\r\n '5.0.0.0': 'AQ', # Antartica\r\n }\r\n return ip_dict.get(ip_addr, 'US')",
"def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]",
"def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')",
"def country_code(self):\n return self.__country_code",
"def country_code(self):\n return self.__country_code",
"def get_language_name(iso_code):\n if iso_code not in LANGUAGES_BY_CODE:\n try:\n lang = iso639.languages.get(part3=iso_code)\n except KeyError:\n lang = None\n\n if lang:\n # we only show up to the first semi or paren\n lang = re.split(r\";|\\(\", lang.name)[0].strip()\n\n LANGUAGES_BY_CODE[iso_code] = lang\n\n return LANGUAGES_BY_CODE[iso_code]",
"def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry"
] | [
"0.7089632",
"0.7018981",
"0.68360543",
"0.68055433",
"0.6680105",
"0.62955844",
"0.6288947",
"0.62801284",
"0.6251623",
"0.62398905",
"0.60558957",
"0.6047952",
"0.5955333",
"0.5927633",
"0.59150565",
"0.59130657",
"0.5884987",
"0.5858456",
"0.58371097",
"0.5823054",
"0.57946956",
"0.5704622",
"0.5691163",
"0.5636928",
"0.5575162",
"0.55690515",
"0.5549335",
"0.5549335",
"0.5548976",
"0.55451334"
] | 0.81850433 | 0 |
Check if the log file exists and can be openend | def log_file_exists(path):
try:
f = open(path)
f.close()
except IOError:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_log_file_path(log_file_path):\n try:\n if not os.path.exists(log_file_path):\n print(\"Log file path not exists\")\n return False\n else:\n return True\n except:\n print(\"Log file path generic error: \" + log_file_path)\n return False",
"def log_file_exist(self, file_path_name):\n return os.path.isfile(file_path_name)",
"def log_file_exists(file_name: str):\n if os.path.isfile(get_complete_file_name(file_name)):\n return True\n return False",
"def check_file_exist(self):\n return False",
"def _verify_logging(self):\n log_file = self.device.log_file_name\n self.assertTrue(os.path.exists(log_file),\n f\"{self.device.name}'s log file {log_file} does not exist\")\n self.assertTrue(os.path.getsize(log_file),\n f\"{self.device.name}'s log file {log_file} is empty\")",
"def file_exist() -> bool:\n pass",
"def is_file_exists(self):\n pass",
"def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def checkFileExistance(filePath):\n\n try:\n with open(filePath, 'r') as f:\n logger.info(\"Se encontro {}\".format(filePath))\n return True\n except FileNotFoundError as e:\n return False\n except IOError as e:\n return False",
"def check_file_validity(self):\n # Initialize key variables\n file_ = self.tailed_file\n\n # Check if exists\n if os.path.exists(file_) is False:\n log_message = 'File {} does not exist.'.format(file_)\n log.log2die(1018, log_message)\n\n # Check if file\n if os.path.isfile(file_) is False:\n log_message = '{} is not a file.'.format(file_)\n log.log2die(1035, log_message)\n\n # Check if readable\n if not os.access(file_, os.R_OK):\n log_message = 'File {} is not readable.'.format(file_)\n log.log2die(1036, log_message)",
"def ensure_file_exists(file_path):\n\n if not (os.path.exists(file_path) and os.access(file_path, os.R_OK)):\n # This is bad.\n raise CronException(\"Path {0} does not exist or can not be read.\".format(file_path))",
"def check_file_creation(today_filename):\r\n\r\n if os.path.exists(today_filename):\r\n print(\"Today's file {} has been created\".format(today_filename))\r\n else:\r\n print(\"Today's file was not created\")",
"def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False",
"def isLogTransfer(logPath):\n\n if logPath != \"\":\n status = True\n else:\n status = False\n return status",
"def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False",
"def _check_file_exists(self, filename):\n if not os.path.exists(filename):\n print('\\n[-] ERROR: %s is not at the specified path! \\\n Please check the filepath and filename...' \n %filename)\n return False\n return True",
"def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False",
"def check_file(self) -> bool:\n log.info(\"Checking if the database file exists.\")\n try:\n with open(self.host, 'r'):\n log.info(\"Database file exists. Checking it's integrity\")\n except FileNotFoundError:\n log.error(\"The database file doesn't exists.\")\n return False\n else:\n try:\n self.setup_tables()\n return True\n except Exception:\n log.critical(\"An exception was raised.\")\n raise",
"def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)",
"def loggingToFile(self):\n return not self.fileObject is None",
"def db_file_exist(path: str):\n if os.path.exists(path):\n logger.debug('>> DB-file found')\n else:\n open(path, 'a').close()\n logger.debug('>> DB-file not found. Creating...')",
"def validate_file_handler(self):\n if self.fh.closed:\n try:\n self.fh = open(self.path, \"r\")\n self.fh.seek(0, 2)\n except OSError as err:\n logging.error(\"Could not reopen file: {}\".format(err))\n return False\n\n open_stat = os.fstat(self.fh.fileno())\n try:\n file_stat = os.stat(self.path)\n except OSError as err:\n logging.error(\"Could not stat file: {}\".format(err))\n return False\n\n if open_stat != file_stat:\n self.log\n self.fh.close()\n return False\n\n return True",
"def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)",
"def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0",
"def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False",
"def exists(self):\n log.warning('Could not determine whether %s exists due to unhandled scheme.', self.file_name)",
"def checkExists(fileName):\n if fileName == '' or not pathlib.Path(fileName).exists():\n print('Error: {} is not found !!!'.format(fileName))\n exit()",
"def _check_file_exists(self, filepath, should_exist=True):\n _, _, stderr = self.execute_command(CommandBuilder.list(filepath))\n compare = operator.ne if should_exist else operator.eq\n if compare(len(stderr.readlines()), 0):\n msg = \"not found\" if should_exist else \"already exists\"\n raise OSError(f\"{filepath} {msg} on server\")",
"def check_log():\r\n errors = 0\r\n warnings = 0\r\n log_handle = open(CurrentLogPath.path)\r\n for line in log_handle:\r\n if 'ERROR:' in line:\r\n errors += 1\r\n elif 'WARNING' in line:\r\n warnings += 1\r\n log_handle.close()\r\n if errors or warnings:\r\n print '!'*80\r\n if errors:\r\n print '{0} errors occurred during the run! See log file for more ' \\\r\n 'details!'.format(errors)\r\n if warnings:\r\n print '{0} warnings occurred during the run! See log file for ' \\\r\n 'more details!'.format(warnings)\r\n print 'Run was completed anyway.'\r\n print '\\n\\n'"
] | [
"0.7649238",
"0.7530432",
"0.7347039",
"0.715572",
"0.71378726",
"0.6962555",
"0.68749493",
"0.6834695",
"0.66220766",
"0.66092384",
"0.65755713",
"0.6534093",
"0.6484807",
"0.6470693",
"0.641887",
"0.64120764",
"0.64054453",
"0.64047426",
"0.6366032",
"0.6365602",
"0.6356147",
"0.63527465",
"0.63504386",
"0.6348428",
"0.62963194",
"0.6273308",
"0.62492514",
"0.6248021",
"0.62259233",
"0.6225758"
] | 0.796735 | 0 |
Log the location requests and write them to a log file | def log_request(forecast):
from time import gmtime, strftime
current_day = strftime("%d/%m/%Y", gmtime())
current_time = strftime("%H:%M", gmtime())
with open('locations.log', 'a') as log:
# print(request.remote_addr, file=log) # IP address
# print(request.user_agent, file=log) # User agent
# print(where.title(), current, forecast, file=log, sep=' | ')
location = forecast['location']
temperature = str(forecast['temperature']) + '°C'
weather = forecast['weather']
print(location.title(), current_day, current_time, temperature, weather.title(), file=log, sep=' | ') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')",
"def log(self):\n\n\t\theader_dict = dict(request.headers)\n\n\t\ttry:\n\t\t\ttracker_id = header_dict[\"tracker_id\"]\n\t\texcept Exception:\n\t\t\ttracker_id = None\n\t\t\n\t\ttry:\n\t\t\tuser_agent = header_dict[\"User-Agent\"]\n\t\texcept Exception:\n\t\t\tuser_agent = None\n\n\t\ttry:\n\t\t\tlanguage = header_dict[\"Accept-Language\"]\n\t\texcept Exception:\n\t\t\tlanguage = None\n\n\t\ttry:\n\t\t\treferer = header_dict[\"Referer\"]\n\t\texcept Exception:\n\t\t\treferer = None\n\n\t\ttry:\n\t\t\torigin = header_dict[\"Origin\"]\n\t\texcept Exception:\n\t\t\torigin = None\n\n\t\ttry:\n\t\t\tjson_data = request.json\n\t\texcept Exception:\n\t\t\tjson_data = None\n\n\t\ttry:\n\t\t\tplatform = request.user_agent.platform.title()\n\t\texcept Exception:\n\t\t\tplatform = None\n\n\t\ttry:\n\t\t\tbrowser = request.user_agent.browser.title()\n\t\texcept Exception:\n\t\t\tbrowser = None\n\n\t\ttry:\n\t\t\tauth_header_token = header_dict[\"Authorization\"].split(\" \")[1]\n\t\texcept Exception:\n\t\t\tauth_header_token = None\n\t\t\n\t\t## If set to run before a request: This is the default setting\n\t\tif self.pre_request:\n\t\t\[email protected]_request()\n\t\t\tdef run():\n\t\t\t\t## If the path accessed is in the do_not_log list, it is skipped\n\t\t\t\tif request.path in self.do_not_log:\n\t\t\t\t\treturn\n\t\t\t\t## If the path accessed is not in the do_not_log list, it is posted\n\t\t\t\telse:\n\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\"status_code\": 200, ## Assumed to be 200 due to the nature of the function\n\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t}\n\n\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\treturn\n\n\t\t\treturn run\n\t\t\n\t\t## If set to as a wrapper to a function\n\t\telse:\n\t\t\tdef log_decorator(func):\n\n\t\t\t\t@wraps(func)\n\t\t\t\tdef execute(*args, **kwargs):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\n\t\t\t\t\t\tresult_response = make_response(result)\n\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": result_response.status_code,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrace = traceback.format_exc()\n\n\t\t\t\t\t\tkwargs = {\n\t\t\t\t\t\t\t\"trace\": trace,\n\t\t\t\t\t\t\t\"exception\": str(e)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": str(e),\n\t\t\t\t\t\t\t\"stack_trace\": trace,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": 500,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\t\t\t\t\t\n\t\t\t\t\treturn result\n\t\t\t\t\n\t\t\t\treturn execute\n\t\t\t\n\t\t\treturn log_decorator",
"def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:\n LOGGER.info(f\"logged request: {response.url}\")\n with log_path.open(mode=\"a\", encoding=\"utf-8\") as f:\n all_responses = [response]\n\n # Poll and wait for operations, if applicable\n is_operation_request = bool(\n re.match(re.compile(\".*/api/versioned/v1/operations/.*\"), response.url)\n )\n is_get_request = response.request.method == \"GET\"\n if is_get_request and is_operation_request:\n wait_resp = _collect_operation_calls(response=response)\n all_responses.extend(wait_resp)\n\n all_json = [_response_to_json(r, ip_dict) for r in all_responses]\n f.writelines([f\"{j}\\n\" for j in all_json])",
"def log(self, client_addr, request):\n with codecs.open(self.log_path, \"a\", 'UTF-8') as fh_out:\n print >> fh_out, (time.strftime('%Y-%m-%d %H:%M:%S') + \"\\t\" +\n ':'.join([str(i) for i in client_addr]) + \"\\t\" +\n request)",
"def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass",
"def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)",
"def log_request_response(task_request, response):\n log_request(task_request, response.request)\n log_response(task_request, response)",
"def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')",
"def log_route():\n return send_file(path.join('..', 'app.log'), as_attachment=True)",
"def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)",
"def log_request(ingredient_list):\n logPath = getLogPath()\n ingredient_string = \"\".join([str(i) for i in ingredient_list])\n with open(logPath, 'a') as log:\n log.write(ingredient_string)\n log.write(\"\\n\")",
"def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)",
"def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response",
"def attach_request_log(response):\n allure.attach(\n dump.dump_all(response).decode(\"utf-8\"),\n name=\"Full request log\",\n extension=\"txt\",\n )",
"def log():\n data = {}\n log = {}\n log['dia'] = date.today().strftime(\"%d/%m/%Y\")\n log['info'] = ('Rooms IP: %s %s %s')%(request.remote_addr,request.method, request.url)\n data['data'] = log\n try:\n r = requests.post(uri, json=data)\n except requests.exceptions.RequestException as e:\n print(e)\n print(\"\\n\\nThe microservice Log is unvailable. The Log is %s.\"%(log['info']))\n else:\n if r.status_code == 200:\n print(\"Register Log was a success\")\n else:\n print(\"Register Log was an unsuccess\")",
"def main():\n\n logfile = open(LOGNAME, \"r\")\n datafile = open(DATANAME, \"w\")\n\n logfile.readline() # first line is always a date\n print(\"fetching addresses...\")\n\n line = logfile.readline()\n while not line.startswith(\"***\") and line.strip():\n cat, lat, lng = line.split(';')\n\n latlng = \"%s,%s\" % (lat, lng)\n params = {\n 'latlng': latlng\n }\n\n req = requests.get(GOOGLE_MAPS_API_URL, params=params)\n res = req.json()\n print(res)\n result = res['results'][0]\n address = result['formatted_address']\n\n datafile.write(\"%s en %s |%s,%s\" % (cat, address.partition(\",\")[0], lat, lng))\n\n line = logfile.readline()\n\n logfile.close()\n datafile.close()\n\n print(\"done.\")",
"def loggGps(lat, lon, name):\r\n\r\n loggfil = dataDir + 'gpslogg.txt' # Log file\r\n\r\n # File names of FeatureCollections containing ALL valid data\r\n gpspunkt = dataDir + 'gpspunkt.geojson' # last valid data point\r\n gpskurve = dataDir + 'gpskurve.geojson' # curve with newest data points\r\n\r\n # This is javascript-friendly formatted date string\r\n tid = datetime.datetime.now().strftime(\"%a %b %d %Y %H:%M:%S\") + ' GMT+0000'\r\n\r\n # Logg all incoming requests to file\r\n with open(loggfil ,'a') as outfile:\r\n outfile.write(tid + \"\\t\" + str(lon)+ \"\\t\" + str(lat)+ \"\\t\" + name + \"\\n\")\r\n\r\n # Sanity check\r\n if not name or not lat or not lon:\r\n return \"NOT OK - one or more parameters missing!\"\r\n try:\r\n lon = float(str(lon))\r\n lat = float(str(lat))\r\n except ValueError:\r\n return \"lon, lat not recognized as numeric values\"\r\n\r\n # Sanity check of lat, lon values\r\n if not -90 < lat < 90:\r\n return \"latitude outside accepted range\"\r\n if not -180 < lon < 180:\r\n return \"longitude outside accepted range\"\r\n\r\n # Stripping \"name\" for anything not alphanumeric!\r\n name2 = re.sub( '\\W+', '', name)\r\n if name2 != name:\r\n return \"Non alphanumeric characters not accepted in name!\"\r\n\r\n # Template for GeoJson feature\r\n pointdata = {\r\n \"type\": \"Feature\",\r\n \"geometry\": {\r\n \"type\": \"Point\",\r\n \"coordinates\": [float(str(lon)), float(str(lat))]\r\n },\r\n \"properties\": {\r\n \"id\": str(name),\r\n \"time\" : tid\r\n }\r\n }\r\n\r\n # (over)writing single feature to file determined by name parameter\r\n with open(dataDir + name + '.geojson' ,'w') as outfile:\r\n json.dump(pointdata, outfile)\r\n\r\n # Adding point to featureCollection of points\r\n # If there exist a feature with correct ID in this collection we replace it\r\n # If not we append to it.\r\n gjdata, idx = readGeoJson( gpspunkt, name )\r\n if idx >= 0:\r\n gjdata['features'][idx] = pointdata\r\n\r\n else:\r\n gjdata['features'].append( pointdata)\r\n\r\n with open(gpspunkt, \"w\") as outfile:\r\n json.dump(gjdata, outfile)\r\n\r\n # Appending new point to the LineString-feature found in featureCollection\r\n # of LineString's If no matching ID is found in collection we will append\r\n # a new LineString-feature to it.\r\n gjdata, idx = readGeoJson( gpskurve, name)\r\n if idx >= 0:\r\n gjdata['features'][idx]['geometry']['coordinates'].append(\r\n [float(str(lon)), float(str(lat))]\r\n )\r\n\r\n # Removing ancient history\r\n gjdata['features'][idx]['geometry']['coordinates'] = \\\r\n gjdata['features'][idx]['geometry']['coordinates'][-historyPoints:]\r\n\r\n # Updating timestamp\r\n gjdata['features'][idx]['properties']['time'] = tid\r\n\r\n newCurve = gjdata['features'][idx]\r\n\r\n else:\r\n # Creating a new lineString feature.\r\n newCurve = pointdata\r\n newCurve['geometry']['type'] = \"LineString\"\r\n pos = newCurve['geometry']['coordinates']\r\n # To have a valid lineString we repeat coordinates\r\n newCurve['geometry']['coordinates'] = [ pos, pos]\r\n gjdata['features'].append( newCurve)\r\n\r\n with open(gpskurve, \"w\") as outfile:\r\n json.dump(gjdata, outfile)\r\n\r\n # (over)writes track with recent history to file whose name is derived\r\n # from name parameter\r\n newCurve['geometry']['coordinates'] = \\\r\n newCurve['geometry']['coordinates'][-historyPoints:]\r\n with open( dataDir + name + '_kurve.geojson' ,'w') as outfile:\r\n json.dump(newCurve, outfile)\r\n\r\n # Return to caller.\r\n return( 'ok' )",
"def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)",
"def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")",
"def task_records_to_log(self, records: List[Any]) -> None:\n for r in records:\n self._logger.info(\n \"[%s]: %s - %s - %s/%s\",\n r[\"createdDateTime\"],\n r[\"userPrincipalName\"],\n r[\"ipAddress\"],\n r[\"location\"][\"countryOrRegion\"],\n r[\"location\"][\"city\"],\n )",
"def process_request(self, request):\n logger.info(request.get_full_path)",
"def log_response(task_request, response):\n msg = \"{0.status_code} {0.reason} for {0.url}: {0.content}\".format(response)\n log_info(task_request, msg)",
"def debug_requests_on():\n HTTPConnection.debuglevel = 2\n\n logging.basicConfig(filename='example1.log', filemode='w', level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.getLogger().setLevel(logging.DEBUG)\n\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True",
"def main(logfile, location_id, units, api_key, interval, mode):\n\n logging.basicConfig(\n filename=logfile,\n filemode=\"a\",\n format=\"%(created)f %(message)s\",\n level=logging.INFO,\n )\n\n url = build_url(location_id, api_key, units)\n\n while True:\n result = get_data(url)\n weather = parse_data(result)\n\n if mode == \"once\":\n print(weather)\n break\n\n logging.info(weather)\n time.sleep(interval)",
"def logs_directory(self):",
"def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response",
"def _serve_logdir(self, request):\n # TODO(chihuahua): Remove this method once the frontend instead uses the\n # /data/environment route (and no deps throughout Google use the\n # /data/logdir route).\n return http_util.Respond(\n request, {'logdir': self._logdir}, 'application/json')",
"def log_request(self, code='-', size='-'):\n if self.server.log_requests:\n BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)",
"def logging_data(self):\n with open('sensor_data.log','w') as f:\n json.dump(self.read_continuous_data, f)",
"def log_info(request, piece_id):\n\n asset = AssetMap.objects.get(piece=piece_id)\n url = '/'.join([FTP_URL, asset.folder, asset.name+'.log',])\n context = {\n 'keyform': KeySearchForm(auto_id=False),\n 'piece' : Piece.objects.get(pk=piece_id),\n 'logfile': requests.get(url),\n }\n return render(request, 'mutopia/piece_log.html', context)"
] | [
"0.64807194",
"0.6284928",
"0.6123197",
"0.61184096",
"0.6048109",
"0.6014808",
"0.5960959",
"0.5927083",
"0.58963996",
"0.58346736",
"0.57979995",
"0.57882774",
"0.57657814",
"0.5727881",
"0.5725551",
"0.5710415",
"0.5706792",
"0.56919837",
"0.56617963",
"0.56570566",
"0.56485695",
"0.56268907",
"0.5579296",
"0.55485845",
"0.54763484",
"0.5464198",
"0.54536957",
"0.54358524",
"0.54318655",
"0.54149574"
] | 0.7201425 | 0 |
Set trait values by using the keyword arguments. Use cls.create() to create a new instance of this model. | def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
for (key, value) in kwargs.iteritems():
# use setattr so that validation is triggered
setattr(self, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, **attributes):\n self.set(**attributes)",
"def set(self, **kwargs):\n raise NotImplementedError",
"def set(self, **kwargs):\n field_names = self.get_field_names()\n for name, value in kwargs.iteritems():\n if name in field_names:\n setattr(self, name, value)",
"def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def setup(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)",
"def set ( self, trait_change_notify = True, **traits ):\n if not trait_change_notify:\n self._trait_change_notify( False )\n try:\n for name, value in traits.items():\n setattr( self, name, value )\n finally:\n self._trait_change_notify( True )\n else:\n for name, value in traits.items():\n setattr( self, name, value )\n\n return self",
"def set_attr(self, **kwargs):\n # set specified values\n for key in kwargs:\n if key in self.variables:\n data = self.get_attr(key)\n if kwargs[key] is None:\n data.set_attr(is_set=False)\n try:\n data.set_attr(is_var=False)\n except KeyError:\n pass\n continue\n\n try:\n float(kwargs[key])\n is_numeric = True\n except (TypeError, ValueError):\n is_numeric = False\n\n # dict specification\n if (isinstance(kwargs[key], dict) and\n not isinstance(data, dc_simple)):\n data.set_attr(**kwargs[key])\n\n # value specification for component properties\n elif isinstance(data, dc_cp) or isinstance(data, dc_simple):\n if is_numeric:\n if np.isnan(kwargs[key]):\n data.set_attr(is_set=False)\n if isinstance(data, dc_cp):\n data.set_attr(is_var=False)\n\n else:\n data.set_attr(val=kwargs[key], is_set=True)\n if isinstance(data, dc_cp):\n data.set_attr(is_var=False)\n\n elif (kwargs[key] == 'var' and\n isinstance(data, dc_cp)):\n data.set_attr(is_set=True, is_var=True)\n\n elif isinstance(data, dc_simple):\n data.set_attr(val=kwargs[key], is_set=True)\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif isinstance(data, dc_cc) or isinstance(data, dc_cm):\n # value specification for characteristics\n if (isinstance(kwargs[key], CharLine) or\n isinstance(kwargs[key], CharMap)):\n data.char_func = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif isinstance(data, dc_gcp):\n # value specification of grouped component parameter method\n if isinstance(kwargs[key], str):\n data.method = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif key in ['design', 'offdesign']:\n if not isinstance(kwargs[key], list):\n msg = (\n 'Please provide the ' + key + ' parameters as list '\n 'at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n if set(kwargs[key]).issubset(list(self.variables.keys())):\n self.__dict__.update({key: kwargs[key]})\n\n else:\n msg = (\n 'Available parameters for (off-)design specification '\n 'are: ' + str(list(self.variables.keys())) + ' at ' +\n self.label + '.')\n logger.error(msg)\n raise ValueError(msg)\n\n elif key in ['local_design', 'local_offdesign',\n 'printout', 'char_warnings']:\n if not isinstance(kwargs[key], bool):\n msg = (\n 'Please provide the parameter ' + key + ' as boolean '\n 'at component ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n else:\n self.__dict__.update({key: kwargs[key]})\n\n elif key == 'design_path' or key == 'fkt_group':\n if isinstance(kwargs[key], str):\n self.__dict__.update({key: kwargs[key]})\n elif kwargs[key] is None:\n self.design_path = None\n elif np.isnan(kwargs[key]):\n self.design_path = None\n else:\n msg = (\n 'Please provide the design_path parameter as string. '\n 'For unsetting use np.nan or None.')\n logger.error(msg)\n raise TypeError(msg)\n\n self.new_design = True\n\n # invalid keyword\n else:\n msg = (\n 'Component ' + self.label + ' has no attribute ' +\n str(key) + '.')\n logger.error(msg)\n raise KeyError(msg)",
"def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs: Any):\n for name, value in kwargs.items():\n setattr(self, name, value)",
"def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)",
"def update(self, **kwargs):\n print(\"Updating model\")\n print(kwargs)\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def __init__(self, **kwds ):\n super(Model, self).__init__()\n self.__key = None \n for name, value in kwds.items():\n self[name] = value",
"def __init__(self, **kwargs):\n\n args = {\n 'nobs': None, # Number of observations\n 'npred': None, # Number of predictors\n 'nrelpred': None, # Number of relevant predictors\n 'relpos': None, # Position of relevant predictor components\n 'gamma': None, # Decay factor of eigenvalue of predictor\n 'rsq': None, # Coefficient of determination\n 'sim_type': None, # Type of simulation: univariate, bivariate, multivariate\n }\n for key, value in args.items():\n setattr(self, key, value)\n\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def set(self, **inputs):\r\n for property, value in inputs.items():\r\n try:\r\n setattr(self,property,value)\r\n except:\r\n raise Exception(property + \" keyword argument not recognized\")\r\n\r\n # update values\r\n self._check_attributes()\r\n self._set_functions()",
"def update(self, **kwargs):\n for k, v in kwargs.iteritems():\n if hasattr(self, k):\n setattr(self, k, v)",
"def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def update(self, *args, **kwargs):\n if args:\n arg_order = [\"id\", \"size\", \"x\", \"y\"]\n for index, arg in enumerate(args):\n setattr(self, arg_order[index], arg)\n elif kwargs:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def __init__(self, **attrs):\n \n # set given attributes\n for name, value in attrs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\"Attribute not found! --> %s\" % name)",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n self._params = self.find_params()",
"def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def fill(self, **kwargs):\r\n for name in kwargs.keys():\r\n setattr(self, name, kwargs[name])\r\n return self",
"def update(self, *args, **kwargs):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n if len(args) > 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n self.id = kwargs.get(\"id\", self.id)\n self.size = kwargs.get(\"size\", self.size)\n self.x = kwargs.get(\"x\", self.x)\n self.y = kwargs.get(\"y\", self.y)",
"def set(self, **kwargs: Any) -> None: # nosec\n attributes = {}\n user_id: int = int(kwargs[\"user_id\"])\n user = self.first(id_int=user_id)\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = _serialize(user, to_bytes=True)\n\n self.update_one(query={\"id_int\": user_id}, values=attributes)",
"def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)",
"def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)",
"def update_from_kwargs(self, **kwargs):\n for (key, value) in kwargs.items():\n setattr(self, key, value)"
] | [
"0.67126644",
"0.65768415",
"0.6475461",
"0.6397765",
"0.6382912",
"0.6368563",
"0.6310652",
"0.6293842",
"0.622368",
"0.622368",
"0.622368",
"0.6221523",
"0.6202023",
"0.61928356",
"0.60710394",
"0.6064031",
"0.6038382",
"0.602038",
"0.6000997",
"0.59984493",
"0.599653",
"0.59900546",
"0.5985465",
"0.59746784",
"0.5946623",
"0.59223515",
"0.59182036",
"0.59156644",
"0.5910228",
"0.59053475"
] | 0.7016906 | 0 |
Create a new instance of this model. The trait values on this new instance will be the same values as on the original instance, with the exception of immutable types like integers. | def __copy__(self):
trait_data = self.__getstate__()
inst = self.__class__.create(trait_data)
return inst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new",
"def __new__(cls, *args, **kwargs):\n # This is needed because in python 2.6 object.__new__ only accepts the\n # cls argument.\n new_meth = super(Model, cls).__new__\n if new_meth is object.__new__:\n inst = new_meth(cls)\n else:\n inst = new_meth(cls, **kwargs)\n\n inst._trait_values = {}\n inst._trait_dyn_inits = {}\n\n # Make the TraitType instances set their default values on the\n # instance.\n for key in dir(cls):\n # Some attributes raise AttributeError like zope.interface's\n # __provides__ attributes even though they exist. This causes\n # AttributeErrors even though they are listed in dir(cls).\n try:\n value = getattr(cls, key)\n except AttributeError:\n pass\n else:\n if isinstance(value, TraitType):\n value.instance_init(inst)\n\n return inst",
"def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)",
"def clone(self):\n return attr.evolve(self)",
"def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))",
"def clone(self) -> Mutator:\n raise NotImplementedError",
"def clone(self):\n return attr.evolve(self, env=dict(self._env))",
"def new_object(cls):\n return cls.for_value([])",
"def create(self, validated_data):\n ModelClass = self.Meta.model\n instance = ModelClass()\n self.instance = instance\n for key, value in validated_data.items():\n setattr(instance, key, value)\n return super().create(validated_data)",
"def new_value(self):\n return data_value_factory(self.type)",
"def copy(self):\n return self.__class__(**vars(self))",
"def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new",
"def copy(self):\n return self.__class__(self.value, self.is_cloud)",
"def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True",
"def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()",
"def clone_traits ( self, traits = None, memo = None, copy = None,\n **metadata ):\n if memo is None:\n memo = {}\n memo[ 'traits_copy_mode' ] = copy\n new = self.__new__( self.__class__ )\n memo[ id( self ) ] = new\n new.copy_traits( self, traits, memo, copy, **metadata )\n return new",
"def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model",
"def new_value(self):\n return data_value_factory(self.type, self.name)",
"def new_value(self):\n return data_value_factory(self.type, self.name)",
"def copy(self):\n return Struct(self)",
"def clone(self):\n return _libsbml.ModelCreator_clone(self)",
"def clone(self):\n raise NotImplementedError",
"def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result",
"def copy(self):\n new = object.__new__(type(self))\n new.approximate_online_count = self.approximate_online_count\n new.approximate_user_count = self.approximate_user_count\n new.description = self.description\n new.discovery_splash_hash = self.discovery_splash_hash\n new.discovery_splash_type = self.discovery_splash_type\n new.emojis = self.emojis.copy()\n features = self.features\n if (features is not None):\n features = (*features,)\n new.features = features\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = self.id\n new.invite_splash_hash = self.invite_splash_hash\n new.invite_splash_type = self.invite_splash_type\n new.stickers = self.stickers.copy()\n new.name = self.name\n return new",
"def __new__(cls, value: int):\n new_object = super(CalibrationStatus, cls).__new__(cls, value)\n new_object._value = value # type: ignore[attr-defined]\n new_object._binary = new_object._to_binary() # type: ignore[attr-defined]\n return new_object",
"def copy(self):\n return object.__new__(type(self))",
"def MakeModel(self):\n pass",
"def clone(self):\n return _libsbml.Model_clone(self)",
"def __copy__(self):\n return self.__class__(self.m, self.n, self.data)",
"def __copy__(self):\n return type(self)(self.number)"
] | [
"0.6676559",
"0.6509449",
"0.6344153",
"0.6336235",
"0.631485",
"0.6277068",
"0.5996439",
"0.5996178",
"0.59518605",
"0.5939236",
"0.5934095",
"0.5916765",
"0.5915471",
"0.5902185",
"0.5884394",
"0.5878762",
"0.58628356",
"0.58510655",
"0.58510655",
"0.5834655",
"0.5824118",
"0.5819322",
"0.5802363",
"0.5786273",
"0.57855093",
"0.5782583",
"0.578076",
"0.57717085",
"0.5771116",
"0.5765727"
] | 0.7120307 | 0 |
Build a dictionary of traits and their current values. | def __getstate__(self):
# grab all the traits
traits = self.traits()
# filter out transient traits
traits = [trait for trait in traits if traits[trait].get_metadata("transient") in [None, False]]
# build a dictionary
# TODO: use self.__dict__ instead of self._trait_values ?
result = dict([(traitname, self._trait_values[traitname]) for traitname in traits])
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def traits(self):\n members = [member for member in getmembers(self.__class__) if isinstance(member[1], TraitType)]\n traits = dict(members)\n return traits",
"def traits ( self, **metadata ):\n traits = self.__base_traits__.copy()\n for name in self.__dict__.keys():\n if name not in traits:\n trait = self.trait( name )\n if trait is not None:\n traits[ name ] = trait\n\n if len( metadata ) == 0:\n return traits\n\n for meta_name, meta_eval in metadata.items():\n if type( meta_eval ) is not FunctionType:\n metadata[ meta_name ] = _SimpleTest( meta_eval )\n\n result = {}\n for name, trait in traits.items():\n for meta_name, meta_eval in metadata.items():\n if not meta_eval( getattr( trait, meta_name ) ):\n break\n else:\n result[ name ] = trait\n\n return result",
"def add_traits_meta_data ( self, bases, class_dict, base_traits,\n class_traits, instance_traits, prefix_traits,\n view_elements ):\n class_dict[ BaseTraits ] = base_traits\n class_dict[ ClassTraits ] = class_traits\n class_dict[ InstanceTraits ] = instance_traits\n class_dict[ PrefixTraits ] = prefix_traits\n class_dict[ ViewTraits ] = view_elements",
"def traits(self):\n return instance_traits.get(self.inst)",
"def to_dict(self):\n state = self.__getstate__()\n\n for (trait_name, trait_value) in state.iteritems():\n if isinstance(trait_value, list):\n new_list = []\n for element in trait_value:\n if isinstance(element, Model):\n element_dict = element.to_dict()\n new_list.append(element_dict)\n else:\n new_list.append(element)\n state[trait_name] = new_list\n elif isinstance(trait_value, dict):\n new_dict = {}\n for (key, value) in trait_value.iteritems():\n if isinstance(value, Model):\n value_dict = value.to_dict()\n new_dict[key] = value_dict\n else:\n new_dict[key] = value\n state[trait_name] = new_dict\n elif isinstance(trait_value, Model):\n value_dict = trait_value.to_dict()\n state[trait_name] = value_dict\n\n return state",
"def class_traits(cls):\n members = [member for member in getmembers(cls) if isinstance(member[1], TraitType)]\n traits = dict(members)\n return traits",
"def facts(self): # pylint: disable=invalid-overridden-method\n return {}",
"def get_efo_traits_data():\n data = []\n # Use set() to avoid duplication when an entry belongs to several categories\n traits_list = set()\n for category in TraitCategory.objects.all().prefetch_related('efotraits__associated_scores','efotraits__traitcategory').order_by('label'):\n cat_scores_count = 0\n cat_id = category.parent.replace(' ', '_')\n\n cat_traits = []\n\n for trait in category.efotraits.all():\n trait_scores_count = trait.scores_count\n if trait_scores_count == 0:\n continue\n cat_scores_count += trait_scores_count\n trait_entry = {\n \"name\": trait.label,\n \"size\": trait_scores_count,\n \"id\": trait.id\n }\n cat_traits.append(trait_entry)\n # Traits table\n traits_list.add(trait)\n\n if cat_scores_count == 0:\n continue\n\n cat_traits.sort(key=lambda x: x[\"name\"].lower())\n\n cat_data = {\n \"name\": category.label,\n \"colour\" : category.colour,\n \"id\" : cat_id,\n \"size_g\": cat_scores_count,\n \"children\": cat_traits\n }\n data.append(cat_data)\n\n traits_list = list(traits_list)\n traits_list.sort(key=lambda x: x.label)\n\n return [traits_list, data]",
"def _trait_to_json(x, self):\n return x",
"def __init__ ( self, cls, class_name, bases, class_dict, is_category ):\n # Create the various class dictionaries, lists and objects needed to\n # hold trait and view information and definitions:\n base_traits = {}\n class_traits = {}\n prefix_traits = {}\n prefix_list = []\n view_elements = ViewElements()\n\n # Create a list of just those base classes that derive from HasTraits:\n hastraits_bases = [ base for base in bases\n if base.__dict__.get( ClassTraits ) is not None ]\n\n # Create a list of all inherited trait dictionaries:\n inherited_class_traits = [ base.__dict__.get( ClassTraits )\n for base in hastraits_bases ]\n\n # Move all trait definitions from the class dictionary to the\n # appropriate trait class dictionaries:\n for name, value in class_dict.items():\n rc = isinstance( value, CTrait )\n if (not rc) and isinstance( value, TraitFactory ):\n value = trait_factory( value )\n rc = isinstance( value, CTrait )\n if (not rc) and isinstance( value, ForwardProperty ):\n rc = True\n validate = _property_method( class_dict, '_validate_' + name )\n if validate is None:\n validate = value.validate\n value = Property(\n _property_method( class_dict, '_get_' + name ),\n _property_method( class_dict, '_set_' + name ),\n validate, True, value.handler, **value.metadata )\n if rc:\n del class_dict[ name ]\n if name[-1:] != '_':\n base_traits[ name ] = class_traits[ name ] = value\n handler = value.handler\n if handler is not None:\n if handler.has_items:\n items_trait = handler.items_event()\n if value.instance_handler=='_list_changed_handler':\n items_trait = _clone_trait( items_trait )\n items_trait.instance_handler = \\\n '_list_items_changed_handler'\n class_traits[ name + '_items' ] = items_trait\n if handler.is_mapped:\n class_traits[ name + '_' ] = _mapped_trait_for(\n value )\n else:\n name = name[:-1]\n prefix_list.append( name )\n prefix_traits[ name ] = value\n elif isinstance( value, FunctionType ):\n _check_method( cls, class_dict, name, value )\n elif isinstance( value, property ):\n class_traits[ name ] = generic_trait\n\n # Handle any view elements found in the class:\n elif isinstance( value, ViewElement ):\n\n # Add the view element to the class's 'ViewElements' if it is\n # not already defined (duplicate definitions are errors):\n if name in view_elements.content:\n raise TraitError, \\\n \"Duplicate definition for view element '%s'\" % name\n view_elements.content[ name ] = value\n\n # Replace all substitutable view sub elements with 'Include'\n # objects, and add the sustituted items to the 'ViewElements':\n value.replace_include( view_elements )\n\n # Remove the view element from the class definition:\n del class_dict[ name ]\n else:\n for ct in inherited_class_traits:\n if name in ct:\n class_traits[ name ] = value = ct[ name ]( value )\n del class_dict[ name ]\n handler = value.handler\n if (handler is not None) and handler.is_mapped:\n class_traits[ name + '_' ] = _mapped_trait_for(\n value )\n break\n\n # Process all HasTraits base classes:\n migrated_properties = {}\n for base in hastraits_bases:\n base_dict = base.__dict__\n\n # Merge base traits:\n for name, value in base_dict.get( BaseTraits ).items():\n if name not in base_traits:\n property_info = value.property()\n if property_info is not None:\n key = id( value )\n migrated_properties[ key ] = value = \\\n self.migrate_property( name, value, property_info,\n class_dict )\n base_traits[ name ] = value\n elif is_category:\n raise TraitError, (\"Cannot override '%s' trait \"\n \"definition in a category\" % name)\n\n # Merge class traits:\n for name, value in base_dict.get( ClassTraits ).items():\n if name not in class_traits:\n property_info = value.property()\n if property_info is not None:\n new_value = migrated_properties.get( id( value ) )\n if new_value is not None:\n value = new_value\n else:\n value = self.migrate_property( name, value,\n property_info, class_dict )\n class_traits[ name ] = value\n elif is_category:\n raise TraitError, (\"Cannot override '%s' trait \"\n \"definition in a category\" % name)\n\n # Merge prefix traits:\n base_prefix_traits = base_dict.get( PrefixTraits )\n for name in base_prefix_traits['*']:\n if name not in prefix_list:\n prefix_list.append( name )\n prefix_traits[ name ] = base_prefix_traits[ name ]\n elif is_category:\n raise TraitError, (\"Cannot override '%s_' trait \"\n \"definition in a category\" % name)\n\n # If the base class has a 'ViewElements' object defined, add it to\n # the 'parents' list of this class's 'ViewElements':\n parent_view_elements = base_dict.get( ViewTraits )\n if parent_view_elements is not None:\n view_elements.parents.append( parent_view_elements )\n\n # Make sure there is a definition for 'undefined' traits:\n if (prefix_traits.get( '' ) is None) and (not is_category):\n prefix_list.append( '' )\n prefix_traits[''] = Python\n\n # Save a link to the prefix_list:\n prefix_traits['*'] = prefix_list\n\n # Make sure the trait prefixes are sorted longest to shortest\n # so that we can easily bind dynamic traits to the longest matching\n # prefix:\n prefix_list.sort( lambda x, y: len( y ) - len( x ) )\n\n # Get the list of all possible 'Instance'/'List(Instance)' handlers:\n instance_traits = _get_instance_handlers( class_dict, hastraits_bases )\n\n # If there is an 'anytrait_changed' event handler, wrap it so that\n # it can be attached to all traits in the class:\n anytrait = _get_def( class_name, class_dict, bases,\n '_anytrait_changed' )\n if anytrait is not None:\n anytrait = StaticAnyTraitChangeNotifyWrapper( anytrait )\n\n # Save it in the prefix traits dictionary so that any dynamically\n # created traits (e.g. 'prefix traits') can re-use it:\n prefix_traits['@'] = anytrait\n\n # Make one final pass over the class traits dictionary, making sure\n # all static trait notification handlers are attached to a 'cloned'\n # copy of the original trait:\n cloned = {}\n for name in class_traits.keys():\n trait = class_traits[ name ]\n handlers = [ anytrait,\n _get_def( class_name, class_dict, bases,\n '_%s_changed' % name ),\n _get_def( class_name, class_dict, bases,\n '_%s_fired' % name ) ]\n\n # Check for an 'Instance' or 'List(Instance)' trait with defined\n # handlers:\n instance_handler = trait.instance_handler\n if ((instance_handler is not None) and\n (name in instance_traits) or\n ((instance_handler == '_list_items_changed_handler') and\n (name[-6:] == '_items') and\n (name[:-6] in instance_traits))):\n handlers.append( getattr( HasTraits, instance_handler ) )\n\n events = trait.event\n if events is not None:\n if isinstance(events, basestring):\n events = [ events ]\n for event in events:\n handlers.append( _get_def( class_name, class_dict, bases,\n '_%s_changed' % event ) )\n handlers.append( _get_def( class_name, class_dict, bases,\n '_%s_fired' % event ) )\n\n handlers = [ h for h in handlers if h is not None ]\n default = _get_def( class_name, class_dict, bases,\n '_%s_default' % name )\n if (len( handlers ) > 0) or (default is not None):\n if name not in cloned:\n cloned[ name ] = None\n class_traits[ name ] = trait = _clone_trait( trait )\n if len( handlers ) > 0:\n _add_notifiers( trait._notifiers( 1 ), handlers )\n if default is not None:\n trait.default_value( 8, default )\n\n # Handle the case of properties whose value depends upon the value\n # of other traits:\n if (trait.type == 'property') and (trait.depends_on is not None):\n cached = trait.cached\n if cached is True:\n cached = '_' + name\n depends_on = trait.depends_on\n if isinstance( depends_on, basestring ):\n depends_on = [ depends_on ]\n for name2 in depends_on:\n dot_name = None\n col = name2.find( '.' )\n if col >= 0:\n dot_name = name2[ col + 1: ].strip()\n name2 = name2[ : col ].strip()\n trait2 = class_traits.get( name2 )\n if trait2 is not None:\n if name2 not in cloned:\n cloned[ name2 ] = None\n class_traits[ name2 ] = trait2 = _clone_trait(\n trait2 )\n handlers = [ self._property_dependency_handler( name,\n cached, dot_name ) ]\n _add_notifiers( trait2._notifiers( 1 ), handlers )\n\n # Handle the case of a list dependency:\n if dot_name is None:\n handler = trait2.handler\n if ((handler is not None) and\n (handler.default_value_type == 5)):\n name2 += '_items'\n trait2 = class_traits.get( name2 )\n if trait2 is not None:\n if name2 not in cloned:\n cloned[ name2 ] = None\n class_traits[ name2 ] = trait2 = \\\n _clone_trait( trait2 )\n _add_notifiers( trait2._notifiers( 1 ),\n handlers )\n\n # Add the traits meta-data to the class:\n self.add_traits_meta_data(\n bases, class_dict, base_traits, class_traits, instance_traits,\n prefix_traits, view_elements )",
"def metadata(self):\n metadata = dict([(key,{}) for key in self.keys])\n for day in self.days:\n metadata[\"Days\"].append(day.attrs)\n for period in day.period:\n metadata[\"Periods\"].append(period.attrs)\n for course in period.courses:\n metadata[\"Courses\"].append(course.attrs)\n for instructor in course.instructor:\n metadata[\"Instructors\"].append(instructor.attrs)\n return metadata",
"def _facts(facts):\n return {'swift_facts': facts}",
"def class_traits ( cls, **metadata ):\n if len( metadata ) == 0:\n return cls.__base_traits__.copy()\n\n result = {}\n\n for meta_name, meta_eval in metadata.items():\n if type( meta_eval ) is not FunctionType:\n metadata[ meta_name ] = _SimpleTest( meta_eval )\n\n for name, trait in cls.__base_traits__.items():\n for meta_name, meta_eval in metadata.items():\n if not meta_eval( getattr( trait, meta_name ) ):\n break\n else:\n result[ name ] = trait\n\n return result",
"def asDict(self) -> dict:\n return {\n \"predominant_occlusion\": self.predominantOcclusion.value,\n \"estimations\": {\n \"chin\": self.chin,\n \"mouth\": self.mouth,\n \"clear\": self.clear,\n \"correct\": self.correct,\n \"partially\": self.partially,\n \"full\": self.full,\n },\n }",
"def trait_names ( self, **metadata ):\n return self.traits( **metadata ).keys()",
"def get_unit_map(self):\n units = dict()\n for t in META:\n for c in META[t]:\n for i in META[t][c]:\n unit = DEFAULT_UNIT\n if (isinstance(i, (tuple, list))):\n val, unit = i\n else:\n val = i\n # category/metric\n n = \"/\".join((c, val))\n units[n] = (unit, t)\n return units",
"def observation_spec(self) -> Dict[str, Any]:",
"def get_composition(self) -> Dict[KappaAgent, int]:\n agent_types = self.get_agent_types_present()\n composition = dict(zip(agent_types, [0] * len(agent_types)))\n for agent_type in agent_types:\n for kappa_complex, abundance in self.get_all_complexes_and_abundances():\n complex_composition = kappa_complex.get_complex_composition()\n local_abundance = complex_composition[agent_type] if agent_type in complex_composition else 0\n composition[agent_type] += abundance * local_abundance\n return composition",
"def as_ctrait(self):\n from .traits import CTrait\n\n metadata = getattr(self, \"_metadata\", {})\n getter = getattr(self, \"get\", None)\n setter = getattr(self, \"set\", None)\n if (getter is not None) or (setter is not None):\n if getter is None:\n getter = _write_only\n metadata.setdefault(\"transient\", True)\n elif setter is None:\n setter = _read_only\n metadata.setdefault(\"transient\", True)\n trait = CTrait(4)\n n = 0\n validate = getattr(self, \"validate\", None)\n if validate is not None:\n n = _arg_count(validate)\n trait.property(\n getter,\n _arg_count(getter),\n setter,\n _arg_count(setter),\n validate,\n n,\n )\n metadata.setdefault(\"type\", \"property\")\n else:\n type = getattr(self, \"ctrait_type\", None)\n if type is None:\n type = trait_types.get(metadata.get(\"type\"), 0)\n trait = CTrait(type)\n\n validate = getattr(self, \"fast_validate\", None)\n if validate is None:\n validate = getattr(self, \"validate\", None)\n if validate is None:\n validate = getattr(self, \"is_valid_for\", None)\n if validate is not None:\n validate = self._is_valid_for\n else:\n validate = getattr(self, \"value_for\", None)\n if validate is not None:\n validate = self._value_for\n\n if validate is not None:\n trait.set_validate(validate)\n\n post_setattr = getattr(self, \"post_setattr\", None)\n if post_setattr is not None:\n trait.post_setattr = post_setattr\n trait.is_mapped(self.is_mapped)\n\n # Note: The use of 'rich_compare' metadata is deprecated; use\n # 'comparison_mode' metadata instead:\n rich_compare = metadata.get(\"rich_compare\")\n if rich_compare is not None:\n trait.rich_comparison(rich_compare is True)\n\n comparison_mode = metadata.get(\"comparison_mode\")\n if comparison_mode is not None:\n trait.comparison_mode(comparison_mode)\n\n metadata.setdefault(\"type\", \"trait\")\n\n trait.default_value(*self.get_default_value())\n\n trait.value_allowed(metadata.get(\"trait_value\", False) is True)\n\n trait.handler = self\n\n trait.__dict__ = metadata.copy()\n\n return trait",
"def values(self) -> Dict[str, Any]:\n all_values = {}\n for name in self.names():\n idx = self.hyperparams[name][1]\n hp_type = self.hyperparams[name][0]\n if hp_type == 'object':\n all_values[name] = self.hyperparams[name][2][idx]\n else:\n all_values[name] = idx\n\n return all_values",
"def test_to_rich_dict(self):\n F81().to_rich_dict()\n HKY85().to_rich_dict()\n GN().to_rich_dict()\n # TODO need to assess ability to reconstruct from this",
"def to_dict(self):\n\n # base features\n dict_ = {\n 'name': self.name,\n 'desc_short': self.desc_short,\n 'desc_long': self.desc_long,\n 'rating_fun': self.rating_fun,\n 'rating_scenic': self.rating_scenic,\n 'rating_aerobic': self.rating_aerobic,\n 'rating_technical': self.rating_technical\n }\n \n # composition features\n for component_name, val in self.composition.items():\n dict_['composition_' + component_name] = val\n\n return dict_",
"def createTraits(self,fileName,startLine,stopLine):\n traits_file = open(fileName,'r')\n \n \n read_file = ''\n temp_dict = {}\n temp_line = ''\n while read_file[:-2].lower() != startLine.lower():\n read_file = traits_file.readline()\n \n for line in traits_file:\n if line == \"\\n\":\n pass\n elif line[:-2] == stopLine or line[:-1] == stopLine:\n traits_file.close()\n return temp_dict \n elif len(line) > 0 and \":\" in line:\n temp_line = line[:line.index(\":\")] \n temp_dict[line[:line.index(\":\")]] = ''\n \n elif len(line) > 0:\n if len(temp_dict) == 0:\n pass\n else:\n temp_dict[temp_line] = line[:-1]",
"def _make_observation(self) -> Dict[str, np.ndarray]:\n return {\n \"cur_pos\": np.array([self.cur_pos], dtype=int),\n }",
"def dict(self) -> dict():\n\n dict_reg_hive = {}\n\n for _attribute in self.attributes.__dict__.items():\n if isinstance(_attribute[1], str):\n if not True in [_attribute[1].startswith(prefix) for prefix in ['<', 'providers.', 'None']]:\n _attribute_value = getattr(self, _attribute[1])\n dict_reg_hive.update({_attribute[1]: _attribute_value})\n\n return dict_reg_hive",
"def data(self):\n data = {}\n if self.base_rule:\n data.update(self.base_rule.data)\n for condition in list(chain.from_iterable(self._conditions.itervalues())):\n data.setdefault(condition.key, []).append(condition)\n for action in list(chain.from_iterable(self._actions.itervalues())):\n data[action.key] = [action] # you can only take a given action _once_\n return data",
"def init_metric_dict(self, metrics=[\"\"], phases=[\"train\", \"val\"]):\n metric_dict = {phase: {metric: [] for metric in metrics} for phase in phases}\n return metric_dict",
"def makeDictionary(self):\n self.dictionary = {}\n for i in range(len(self.movie)):\n if self.movie[i] in self.dictionary:\n vectors = self.dictionary[self.movie[i]]\n vectors[self.user[i]] = self.rating[i]\n self.dictionary[self.movie[i]] = vectors\n else:\n newMovie = dict([(self.user[i], self.rating[i])])\n self.dictionary[self.movie[i]] = newMovie\n return self.dictionary",
"def add_traits(self, **traits):\n super().add_traits(**traits)\n for name, trait in traits.items():\n if trait.get_metadata('sync'):\n self.keys.append(name)\n self.send_state(name)",
"def get_dict(self):\n subt_map = {}\n for seqkey,seqs in self._seqdict.iteritems():\n for seq,seqentry in seqs.iteritems():\n subt_map[seqentry['name']] = {\n 'subtype': seqentry['subtype'],\n 'accessions': seqentry['accessions'],\n 'loci': seqentry['loci']\n }\n\n return subt_map"
] | [
"0.7039768",
"0.63538194",
"0.6289249",
"0.6105193",
"0.5906057",
"0.56297034",
"0.560206",
"0.5465477",
"0.54623127",
"0.5447713",
"0.5443782",
"0.538566",
"0.5379683",
"0.537879",
"0.53405625",
"0.53195786",
"0.5283414",
"0.5221009",
"0.5196004",
"0.51920414",
"0.5186289",
"0.51705295",
"0.5169406",
"0.51672816",
"0.5156743",
"0.51378924",
"0.5125579",
"0.5116795",
"0.5112867",
"0.51101416"
] | 0.7150706 | 0 |
Loads a pickled data file if the file is there, returns False otherwise | def pickle_load(path):
if os.path.isfile(path):
file = pickle.load(open(path, "rb"))
return file
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_data(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n data = pickle.load(f)\n if data:\n self.profiles = data['profiles']\n self.user_data = data['user_data']\n self.api_data = data['api_data']\n else:\n return False",
"def load_pickle_data(filename):\n path = \"../tmp/{}.pckl\".format(filename)\n if os.path.exists(path):\n print(\"LOADING PCKL FILE FROM {}\".format(path))\n f = open(path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj",
"def load(self,filename=None): # return True\r\n pass",
"def check_pickle() -> list:\n try:\n with open(\"data.pkl\", mode='r+b') as open_pickle:\n data = pickle.load(open_pickle)\n except FileNotFoundError as _:\n data = load_data()\n with open(\"data.pkl\", mode='w+b') as open_pickle:\n pickle.dump(data, open_pickle)\n return data",
"def pickle_load(file_path):\n if not os.path.isfile(file_path):\n return None\n\n with open(file_path, 'rb') as f:\n return pickle.load(f)",
"def load(self):\n filename = self._filename\n if not os.path.exists(filename):\n self.service.log.store('Cannot load %s, does not exist' % filename)\n return False\n \n # Read from file\n self.service.log.store('Loading %s' % filename)\n f = open(filename, 'r')\n raw = f.read()\n f.close()\n \n self.from_json(raw)\n return True",
"def safe_pickle_load(file_name):\n try:\n f = open(file_name, \"r\")\n try:\n data = pickle.load(f)\n except EOFError:\n data = None\n finally:\n f.close()\n except IOError:\n data = None\n\n return data",
"def load(filename):\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n return data\n except FileNotFoundError:\n return None",
"def load(self):\n if os.path.isfile(self.save_filename):\n data = cloudpickle.load(open(self.save_filename, \"rb\"))\n (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n ) = data\n return True\n return False",
"def pickle_from_file(fname):\n\ttry:\n\t\tfh = open(fname, 'r')\n\t\tdata = cPickle.load(fh)\n\t\tfh.close()\n\texcept:\n\t\t#raise\n\t\tprint \"Loading pickled data failed!\", sys.exc_info()[0]\n\t\tdata = None\n \n\treturn data",
"def Load_Data(savedfilename):\n import pickle\n\n try:\n\n with open(savedfilename, 'rb') as handle:\n loaded_data = pickle.load(handle)\n print\n 'loaded successfully, fileloaded as as:\\nloaded_data'\n return loaded_data\n except:\n import numpy as np\n loaded_data = np.load(savedfilename)\n return loaded_data",
"def can_load(cls, filename):\n return False",
"def load(self):\n result = bolt.PickleDict.load(self)\n if not result and self.oldPath.exists():\n ins = None\n try:\n ins = self.oldPath.open('r')\n self.data.update(compat.uncpickle(ins))\n ins.close()\n result = 1\n except EOFError:\n if ins: ins.close()\n #--Done\n return result",
"def load(self):\n f = self.open(\"rb\")\n try:\n import pickle\n\n return error.checked_call(pickle.load, f)\n finally:\n f.close()",
"def load_data(file_name: str) -> Optional[Any]:\n with open(file_name, \"rb\") as input_data:\n data = pickle.load(input_data)\n return data",
"def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)",
"def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data",
"def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)",
"def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data",
"def load(self):\n if self.loaded or not self.has_data:\n return self.data\n if self.filename:\n try:\n with lzma.open(os.path.join('resources', self.game, 'dumps', self.filename), 'rb') as df:\n return self.load_from_open_file(df)\n except Exception as e:\n return ['ERROR! Could not load data: {}'.format(str(e))]",
"def load_object(self, filename):\n with open(filename, 'rb') as inp: # Overwrites any existing file.\n data = pickle.load(inp)\n return data",
"def load(fname):\r\n with open(fname, 'rb') as f:\r\n data = pickle.load(f)\r\n return data",
"def load(self, path: str) -> bool:\n path = replace_standard_paths(path)\n if os.path.isfile(path):\n return self.load_state(torch.load(path))\n return False",
"def load_pickle(path):\n try:\n debug(\"trying to load pickle data\")\n with open(path, mode='rb') as file:\n debug(\"opened file %s for reading\", path)\n return pickle.load(file, encoding='utf-8')\n except (pickle.UnpicklingError, OSError) as err:\n debug(\"error in pickling from %s, error: %s\", path, err)\n return None",
"def load(self):\n return True",
"def load(filename):\n import pickle\n return pickle.load(open(filename, 'r'))",
"def load_pickle(path):\n assert osp.exists(path)\n with open(path, 'r') as f:\n ret = pickle.load(f)\n return ret",
"def load_file(self, filename):\n with open(filename, \"rb\") as pickle_handle:\n return pickle.load(pickle_handle)",
"def loadPickle(pickle_file):\n print(\"Loading pickle data from file: \"+pickle_file)\n\n data = None\n try:\n with open(pickle_file, \"rb\") as fd:\n data = pickle.load(fd)\n except EOFError:\n pass\n except pickle.UnpicklingError as upe:\n print(\"Failed: Loading Pickle Data\")\n except IOError:\n data = {}\n\n return data",
"def load(filename):\n with open(filename,'rb') as f:\n return pickle.load(self,f)"
] | [
"0.7287219",
"0.7144231",
"0.7137119",
"0.70844233",
"0.70248264",
"0.6880995",
"0.6860018",
"0.6859217",
"0.6858166",
"0.6843264",
"0.6794191",
"0.6782974",
"0.67791075",
"0.66986644",
"0.6648589",
"0.6612741",
"0.6609128",
"0.6606487",
"0.66049695",
"0.6603827",
"0.6598058",
"0.6591046",
"0.65620816",
"0.6561499",
"0.655917",
"0.6544169",
"0.65411353",
"0.6487416",
"0.64738536",
"0.6453786"
] | 0.7589998 | 0 |
Counts trigrams, bigrams and unigrams for words. This funcion appends them straigt to the data stuctures they will be used in. This populates features and raw conunts for each industry. | def countize(word, ind, count_words, features):
word = clean(word)
word = word.split()
if len(word)>1:
for i in range(1,len(word)):
bigram = (word[i-1],word[i])
count_words[ind].append(bigram)
features.append(bigram)
if len(word)>2:
for i in range(2,len(word)):
trigram = (word[i-2],word[i-1], word[i])
count_words[ind].append(trigram)
features.append(trigram)
for i in range(len(word)):
unigram = word[i]
count_words[ind].append((unigram))
features.append((unigram))
return count_words, features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())",
"def qualify_words():\n config = get_config()\n\n all_feature_matrices = []\n all_opinion_matrices = []\n\n # first 5 parts are labeled, thus are useful\n all_feature_label_vectors = []\n all_opinion_label_vectors = []\n\n for fname in config.file_names:\n feature_X, feature_dims = load_feature_matrices(fname)\n opinion_X, opinion_dims = load_opinion_matrices(fname)\n feature_y = load_feature_labels(fname)\n opinion_y = load_opinion_labels(fname)\n\n # append to all collector\n all_feature_matrices.append(feature_X)\n all_feature_label_vectors.append(feature_y)\n all_opinion_matrices.append(opinion_X)\n all_opinion_label_vectors.append(opinion_y)\n # use first 5 for training\n # stack first 5\n feature_training_X = []\n feature_training_y = []\n opinion_training_X = []\n opinion_training_y = []\n for i in range(5):\n feature_training_X.append(all_feature_matrices[i])\n feature_training_y.append(all_feature_label_vectors[i])\n opinion_training_X.append(all_opinion_matrices[i])\n opinion_training_y.append(all_opinion_label_vectors[i])\n\n feature_training_X = np.hstack(feature_training_X)\n feature_training_y = np.hstack(feature_training_y)\n opinion_training_X = np.hstack(opinion_training_X)\n opinion_training_y = np.hstack(opinion_training_y)\n\n # using combination of rule and ranking score as features\n feature_model = MultinomialNB()\n opinion_model = MultinomialNB()\n\n # training\n feature_model.fit(np.transpose(feature_training_X), feature_training_y.ravel())\n opinion_model.fit(np.transpose(opinion_training_X), opinion_training_y.ravel())\n\n # predicting on candidate aspects and opinions, extracted from amazon reviews\n for i in range(5, len(config.file_names)):\n fname = config.file_names[i]\n feature_pred = feature_model.predict_proba(\n np.transpose(all_feature_matrices[i]))[:,1]\n opinion_pred = opinion_model.predict_proba(\n np.transpose(all_opinion_matrices[i]))[:,1]\n # pickle the prediction results\n with open('../results/' + fname + '_feature_pred_score.pickle', 'wb') as f:\n pickle.dump(feature_pred, f)\n with open('../results/' + fname + '_opinion_pred_score.pickle', 'wb') as f:\n pickle.dump(opinion_pred, f)",
"def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features",
"def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1",
"def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)",
"def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass",
"def trainInternal():\n\n con_counts = Counter()\n deflike = Counter()\n\n for record in records:\n data = [re.split(\"\\t\", d) for d in re.split(\"\\n\", record)]\n tokens, tags = zip(*data)\n\n for i, token in enumerate(tokens):\n denom = len(token)\n for indices, f in fqs(token, 0.5): #perform analysis on one word at a time\n context, numer = internalContext(indices, token)\n if tags[i] != \"O\": #only want the named entities\n deflike[context] += f * numer/denom #need to normalize by word length\n con_counts[context] += f * numer/denom\n\n deflike = Counter({context: deflike[context]/con_counts[context] for context in deflike}) #perform division on each entry\n\n return deflike",
"def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token",
"def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self",
"def extract_features(self, docs_train, docs_test, word_ngram_range=(1, 3), dim_reduce=False):\n\n\t\t# Build a vectorizer that splits strings into sequences of i to j words\n\t\tword_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='word', ngram_range=word_ngram_range,\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\t\t# Build a vectorizer that splits strings into sequences of 3 to 5 characters\n\t\tchar_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='char', ngram_range=(3, 5),\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\n\t\t# Build a transformer (vectorizer) pipeline using the previous analyzers\n\t\t# *FeatureUnion* concatenates results of multiple transformer objects\n\t\tself.ngrams_vectorizer = Pipeline([('feats', FeatureUnion([('word_ngram', word_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('char_ngram', char_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ])),\n\t\t\t\t\t\t\t\t # ('clff', LinearSVC(random_state=42))\n\t\t\t\t\t\t\t\t ])\n\n\t\t# Fit (learn vocabulary and IDF) and transform (transform documents to the TF-IDF matrix) the training set\n\t\tX_train_ngrams_tfidf = self.ngrams_vectorizer.fit_transform(docs_train)\n\t\t'''\n\t\t↳ Check the following attributes of each of the transformers (analyzers)—*word_vectorizer* and *char_vectorizer*:\n\t\tvocabulary_ : dict. A mapping of terms to feature indices.\n\t\tstop_words_ : set. Terms that were ignored\n\t\t'''\n\t\tprint(\"%.2f seconds: Finished fit_transforming the training dataset\" % time.process_time())\n\t\tprint(\"Training set word & character ngrams .shape = \", X_train_ngrams_tfidf.shape)\n\n\t\tfeature_names_ngrams = [word_vectorizer.vocabulary_, char_vectorizer.vocabulary_]\n\n\t\t'''\n\t\tExtract the features of the test set (transform test documents to the TF-IDF matrix)\n\t\tOnly transform is called on the transformer (vectorizer), because it has already been fit to the training set.\n\t\t'''\n\t\tX_test_ngrams_tfidf = self.ngrams_vectorizer.transform(docs_test)\n\t\tprint(\"%.2f seconds: Finished transforming the test dataset\" % time.process_time())\n\t\tprint(\"Test set word & character ngrams .shape = \", X_test_ngrams_tfidf.shape)\n\n\t\t# • Dimensionality reduction using truncated SVD (aka LSA)\n\t\tif dim_reduce:\n\t\t\t# Build a truncated SVD (LSA) transformer object\n\t\t\tself.svd_reducer = TruncatedSVD(n_components=300, random_state=43)\n\t\t\t# Fit the LSI model and perform dimensionality reduction\n\t\t\tX_train_ngrams_tfidf_reduced = self.svd_reducer.fit_transform(X_train_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the training dataset\", time.process_time())\n\t\t\tX_test_ngrams_tfidf_reduced = self.svd_reducer.transform(X_test_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the test dataset\", time.process_time())\n\n\t\t\tX_train = X_train_ngrams_tfidf_reduced\n\t\t\tX_test = X_test_ngrams_tfidf_reduced\n\t\telse:\n\t\t\tX_train = X_train_ngrams_tfidf\n\t\t\tX_test = X_test_ngrams_tfidf\n\n\t\treturn X_train, X_test, feature_names_ngrams",
"def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features",
"def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count",
"def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features",
"def train(self, corpus): \n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data: \n # word = datum.word\n for sentence in corpus:\n prevWord = \"\"\n prevPrevWord = \"\"\n for word in sentence:\n word = word.strip(STRIP_CHARS)\n word = word.lower()\n currentWord = word\n self.unigramCounts[currentWord] += 1\n self.total += 1\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n if trigram not in self.trigramCounts:\n self.continuationCounts[currentWord] += 1\n self.followingCounts[(prevPrevWord, prevWord)] += 1\n self.trigramCounts[trigram] += 1\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n else:\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n self.total += len(self.unigramCounts)",
"def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass",
"def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X",
"def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()",
"def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])",
"def count_words(self,top_only=True):\n if top_only:\n self.top_skill_list()\n else:\n self.all_skill_list()\n word_counts = Counter(self.skill_list)\n top_n = word_counts.most_common(len(word_counts))\n self.feature = []\n proportion = []\n for i in top_n:\n self.feature.append(i[0])\n proportion.append(i[1])\n self.coff = 1./(np.log(proportion)+1)\n return",
"def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1",
"def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count",
"def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words",
"def bag_of_words(files_data_train,files_data_test):\n\tcount_vector = sklearn.feature_extraction.text.CountVectorizer()\n\t#print count_vector.fit(files_data)\n\tword_train = count_vector.fit_transform(files_data_train)\n\tword_test = count_vector.transform(files_data_test)\n\tprint len(count_vector.get_feature_names())\n\treturn word_train,word_test",
"def propername_featurize(input_data,N, MinFreq,model_choice =\"NGram\"):\n def to_lowercase(text):\n return text.lower()\n\n def remove_URL(text):\n return re.sub(r\"http\\S+\", \"\", text)\n def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\n def tokenize(text):\n return text.split()\n def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stop_word:\n new_words.append(word)\n return new_words\n def detokenize_words(words):\n separator = ' '\n return separator.join(words)\n def preprocess_text(df):\n df['text'] = df['text'].apply(to_lowercase)\n df['text'] = df['text'].apply(remove_URL)\n df['text'] = df['text'].apply(tokenize)\n df['text'] = df['text'].apply(remove_non_ascii)\n df['text'] = df['text'].apply(detokenize_words) \n return df\n def character_ngram(text_matrix, N, MinFreq): #array of non-tokenized text\n #tokenize\n all_tokenized_text = []\n #build all token\n flatten_tokenized_text = []\n for j in text_matrix:\n cur_text = \"\".join(j.split())\n cur_feature = []\n \n for i in range(N[0]-1,N[1]): \n \n for l in range(len(cur_text) - i):\n cur_feature.append(cur_text[l:l+i+1])\n \n all_tokenized_text.append(cur_feature)\n flatten_tokenized_text.extend(cur_feature)\n charfreq = {}\n for i in flatten_tokenized_text:\n if i not in charfreq.keys():\n charfreq[i] = 1\n else:\n charfreq[i] += 1\n selected_feature = []\n for i, item in charfreq.items():\n if item >= MinFreq:\n selected_feature.append(i)\n dim = len(selected_feature)\n encoded_matrix = []\n selected_feature = np.array(selected_feature)\n for i in all_tokenized_text:\n cur_text = np.array(i)\n cur_encoded = np.zeros(dim)\n cur_idx = []\n for j in range(len(cur_text)):\n idx = np.where(selected_feature == cur_text[j]) \n if len(idx[0]) != 0: \n cur_idx.append(idx[0][0])\n #binary character presence \n cur_encoded[cur_idx] = 1\n\n encoded_matrix.append(cur_encoded)\n encoded_matrix = np.array(encoded_matrix)\n\n return encoded_matrix, selected_feature\n def task_specific_featurize(feature_value):\n feature_dic = {\"contain_numerics\":[], \"contain_special_punc\":[],\"contain_inc\":[],\"Small_token_length\":[]}\n special_pun = \"&\\?-:%\"\n company_col = [\"co.\",\"inc.\"]\n def hasNumbers(string):\n return any(char.isdigit() for char in string)\n for i in text_feature:\n if hasNumbers(i):\n feature_dic[\"contain_numerics\"].append(1)\n else:\n feature_dic[\"contain_numerics\"].append(0)\n Spec_Punc = False\n for l in special_pun:\n if i.find(l) != -1:\n feature_dic[\"contain_special_punc\"].append(1)\n Spec_Punc = True\n break\n if Spec_Punc == False:\n feature_dic[\"contain_special_punc\"].append(0)\n Contain_Com = False\n for l in company_col:\n if i.find(l) != -1:\n feature_dic[\"contain_inc\"].append(1)\n Contain_Com = True\n break\n if Contain_Com == False:\n feature_dic[\"contain_inc\"].append(0)\n token_length = len(i.split())\n if token_length <= 1:\n feature_dic[\"Small_token_length\"].append(1)\n else:\n feature_dic[\"Small_token_length\"].append(0)\n\n encoded_matrix = pd.DataFrame(feature_dic).values\n selected_feature = list(feature_dic.keys()) \n return encoded_matrix, selected_feature\n # TODO: Implement featurization of input.\n matrix_processed = preprocess_text(input_data)\n text_feature = matrix_processed[[\"text\"]].values.flatten() \n if model_choice == \"NGram\":\n \n encoded_matrix, selected_feature = character_ngram(text_feature, N, MinFreq)\n elif model_choice == \"TS\":\n encoded_matrix, selected_feature = task_specific_featurize(text_feature)\n elif model_choice == \"Combined\":\n\n encoded_matrix_specific, selected_feature_specific = task_specific_featurize(text_feature) \n encoded_matrix_bow, selected_feature_bow = character_ngram(text_feature, N, MinFreq)\n encoded_matrix = np.hstack((encoded_matrix_bow,encoded_matrix_specific))\n selected_feature = list(selected_feature_bow)\n selected_feature.extend(selected_feature_specific)\n \n return encoded_matrix,selected_feature",
"def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()",
"def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER",
"def train(self, corpus):\n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data:\n # word = datum.word\n unigramCounts = {}\n total = 0\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n unigramCounts[token] = unigramCounts.get(token, 0) + 1\n total += 1 # token number\n\n # save word count and total for add-one in the next test part\n self.count = unigramCounts\n self.total = total",
"def word_counts(\n df:pd.DataFrame, \n free_text_col:str, \n out_prefix:str,\n words_to_keep:int=0,\n min_count:int=0):\n \n vectorisor = CountVectorizer(strip_accents=\"unicode\", lowercase=True)\n x = vectorisor.fit_transform(df[free_text_col])\n counts = x.sum(axis=0).tolist()[0]\n words = vectorisor.get_feature_names()\n\n# Creates a df from bow index, words and count\n bow_reference = (pd.DataFrame(zip(words, counts), columns=[\"words\", \"counts\"])\n .reset_index()\n .rename(columns={\"index\":\"bow_key\"})\n .sort_values(\"counts\",ascending=False))\n if words_to_keep==min_count==0:\n min_count = 2\n elif min_count==0:\n min_count = bow_reference.iloc[words_to_keep][\"counts\"]\n elif words_to_keep > bow_reference.shape[0]:\n min_count = -1\n else:\n min_count = min(min_count, bow_reference.iloc[words_to_keep][\"counts\"])\n \n bow_reference[\"top_n\"] = bow_reference[\"counts\"] >= min_count\n\n # appends top_n word columns to df\n keys = bow_reference[bow_reference[\"top_n\"]][\"bow_key\"]\n \n out = df.merge(\n pd.DataFrame(\n x[:, keys].toarray(),\n columns=[f\"{out_prefix}{key}\" for key in keys]),\n left_index=True,\n right_index=True)\n \n return out, bow_reference[bow_reference[\"top_n\"]]",
"def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size",
"def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq"
] | [
"0.67787427",
"0.6616555",
"0.6556645",
"0.63850594",
"0.63547343",
"0.6336458",
"0.63022137",
"0.6277686",
"0.62525123",
"0.6210935",
"0.61823857",
"0.6177107",
"0.61616325",
"0.6132681",
"0.6123291",
"0.6087143",
"0.60707706",
"0.60529006",
"0.6035648",
"0.6004986",
"0.59987444",
"0.5945014",
"0.59323806",
"0.5932378",
"0.5928413",
"0.59150726",
"0.5911156",
"0.5909867",
"0.590619",
"0.58874565"
] | 0.67658335 | 1 |
Gathers and pickles vectors from a given csv file. | def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):
with open(path, 'rt', encoding='mac_roman') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='"')
for row in csvreader:
words_vec, features = countize(row[3], row[2], words_vec, features)
try:
words_vec, features = countize(row[6], row[2], words_vec, features)
except:
pass
pickle.dump(words_vec, open("ind_vectors.data", "wb"))
pickle.dump(features, open("i_features.data", "wb"))
return words_vec, features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadCSV(input_file):",
"def csvToVec(filename):\n X = csvToArray(filename)\n assert X.shape[0] == 1, 'file %s must have 1 row' % filename\n y = X[0,:]\n return y",
"def load_file(csv_file):\n vehicles = []\n # Opens the file and reads it row for row\n with open(csv_file, 'rb') as csv_open_file:\n reader = csv.reader(csv_open_file)\n for row in reader:\n # Reads in vehicles\n if len(row) != 1:\n name, x, y, orientation, length = row\n vehicles.append(Vehicle(name, int(x), int(y), orientation, int(length)))\n # Read size of the grid\n else:\n n = int(row[0])\n return Grid(set(vehicles), n)",
"def from_csv(cls, path: typing.Union[str, Path]) -> \"VLEPoints\":\n frame = pandas.read_csv(path)\n\n if list(frame.columns) != VLE_COLUMNS:\n raise ValueError(\"Incorrect columns: %s\" % list(frame.columns))\n\n points = []\n\n d = frame.iloc[0].to_dict()\n components = [getattr(Components, d[\"first_component\"]),\n getattr(Components, d[\"second_component\"])]\n\n for _, row in frame.iterrows():\n d = row.to_dict()\n points.append(VLEPoint.from_dict(d))\n\n return VLEPoints(components=components, data=points)",
"def fromCSV(self, filename):\n with open(filename, newline = '') as csvfile:\n read = csv.reader(csvfile, delimiter = ',')\n param = []\n for row in read:\n param.append(row)\n self.c = complex(param[0][0])\n self.xmin = int(param[1][0])\n self.xmax = int(param[2][0])\n self.xlen = int(param[3][0])\n self.ymin = int(param[4][0])\n self.ymax = int(param[5][0])\n self.ylen = int(param[6][0])\n self.fs = np.vectorize(param[7][0])\n self.plane = np.loadtxt(\"plane.csv\", delimiter = ',', dtype = 'int') #Read plane from plane.csv file ",
"def load_d(prefix):\n vel_x = np.genfromtxt(file('%s_x.csv' % prefix), delimiter=',')\n vel_y = np.genfromtxt(file('%s_y.csv' % prefix), delimiter=',')\n\n # make a 3d height x width x 2 matrix to hold the vectors\n vel = np.zeros(list(vel_x.shape) + [2])\n vel[:, :, 0] = vel_y # note, this y here is correct--and it's important it be this order\n vel[:, :, 1] = vel_x\n return vel",
"def open_file(file_path):\r\n\r\n global vector_X\r\n global training_data_matrix\r\n global row_number_of_data_matrix\r\n global single_row\r\n\r\n global training_g1_list\r\n global training_g2_list\r\n global training_g3_list\r\n\r\n global test_g1_list\r\n global test_g2_list\r\n global test_g3_list\r\n\r\n # open file\r\n with open(file_path, \"r\") as csvfile:\r\n\r\n line_number = 0\r\n index_of_training_matrix = 0\r\n\r\n # read all rows of csv file\r\n reader = csv.reader(csvfile)\r\n\r\n next(reader, None) # skip the headers\r\n\r\n for row in reader:\r\n\r\n row = row[0]\r\n\r\n # read line split by comma and convert into float numbers\r\n single_row = [float(x) for x in row.split(\";\")]\r\n\r\n # take the first 20% of the data as test data\r\n # and the remaining as the training data\r\n if line_number < row_number_of_test_data_matrix:\r\n\r\n test_data_matrix[line_number] = [1.0] + single_row[:-3]\r\n\r\n test_g1_list[line_number] = single_row[-3]\r\n test_g2_list[line_number] = single_row[-2]\r\n test_g3_list[line_number] = single_row[-1]\r\n\r\n else:\r\n training_data_matrix[index_of_training_matrix] = [1.0] + single_row[:-3]\r\n\r\n training_g1_list[index_of_training_matrix] = single_row[-3]\r\n training_g2_list[index_of_training_matrix] = single_row[-2]\r\n training_g3_list[index_of_training_matrix] = single_row[-1]\r\n\r\n index_of_training_matrix += 1\r\n\r\n if line_number == (row_number_of_data_matrix - 1):\r\n break\r\n\r\n line_number += 1\r\n\r\n return test_data_matrix, training_data_matrix, \\\r\n test_g1_list, test_g2_list, test_g3_list, \\\r\n training_g1_list, training_g2_list, training_g3_list",
"def from_csv(self, filename):\n\t\tpoints = np.genfromtxt(filename, delimiter=\",\")\n\t\tassert points.shape[1] == 2\n\n\t\tself.N = points.shape[0]\n\t\tself.points = points\n\t\tself.original_points = points",
"def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels",
"def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return",
"def get_data(self, csv_file):\n pass",
"def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r",
"def read_csv():",
"def load_from_csv(self, file_name, parameters_size):\n \n with open(file_name) as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n simulation_name = str(row[0:1])[2:-2] # Remove brakets and quotation marks \n result_name = str(row[1:2])[2:-2] # Remove brakets and quotation marks \n params = np.float64(row[2:parameters_size+2])\n res = np.float64(row[parameters_size+2:])\n self.add_sample(parameters = params, simulation_name = simulation_name, result = res, result_name = result_name)",
"def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)",
"def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col",
"def loadSensitivity(tradeTbl, sensiTbl, filepath, vectorField):\n\n df = pd.read_csv(filepath)\n df[\"AsOfDate\"] = pd.to_datetime(df[\"AsOfDate\"]).dt.date\n df[vectorField] = getArrayValue(df[vectorField])\n\n if \"CashflowKey\" not in df.columns:\n df[\"CashflowKey\"] = \"-\"\n\n tradeTbl.load_pandas(df[tradeTbl.columns])\n sensiTbl.load_pandas(df[sensiTbl.columns])",
"def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)",
"def from_csv(filename: str) -> List['Parameter']:\n parameters = []\n lines = FileUtiles.csvLoad(filename)\n keys = lines[0]\n for line_idx in range(1, len(lines)):\n values = lines[line_idx]\n parameters.append(Parameter(*((keys[idx], Parameter.eval(values[idx])) for idx in range(len(keys)))))\n return parameters",
"def load_data(filename):\n with open(\"./shopping.csv\", \"r\") as f:\n reader = csv.reader(f)\n next(reader)\n evidence_raw = []\n labels_raw = []\n for row in reader:\n evidence_raw.append(row[:-1])\n labels_raw.append(row[-1])\n evidence = []\n labels = []\n for row1, row2 in zip(evidence_raw, labels_raw):\n evidence.append(oneHotEncode_Evi(row1))\n labels.append(oneHotEncode_labels(row2))\n return (evidence, labels)",
"def get_training_data():\n features = []\n labels = []\n\n with open('data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n rows = [line for line in csv_reader]\n random.shuffle(rows)\n\n for vector in rows:\n feature_vector = [float(vector[i]) for i in range(4)]\n features.append(feature_vector)\n labels.append(encode_label(vector[4]))\n\n normalise_features(features)\n\n return features, labels",
"def printFeatureVector():\r\n #fp.close()\r\n \"\"\"\r\n fp = csv.reader(open('corpus.csv', 'r'), delimiter=',')\r\n for row in fp:\r\n tweet = row[1]\r\n processedTweet = processTweet(tweet)\r\n featureVector = getFeatureVector(processedTweet)\r\n print featureVector\r\n\"\"\"",
"def __init__(self, csv_path):\r\n # Transforms\r\n self.to_tensor = transforms.ToTensor()\r\n # Read the csv file\r\n self.data_info = pd.read_csv(csv_path, header=None)\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[:, 0])\r\n # Second column is the labels\r\n self.label_arr = [np.asarray(self.data_info.iloc[:, 1])]\r\n # Third column is for an operation indicator\r\n #self.operation_arr = np.asarray(self.data_info.iloc[:, 2])\r\n # Calculate len\r\n self.data_len = len(self.data_info.index)",
"def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False",
"def load_from_file_csv(cls):\n if path.exists(cls.__name__ + \".csv\") is False:\n return []\n with open(cls.__name__ + \".csv\", \"r\", newline='') as f:\n listofinstances = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, value in row.items():\n row[key] = int(value)\n listofinstances.append(cls.create(**row))\n return listofinstances",
"def csvRowToVector(self, csvRow, questionIds, csvTitles):\n if len(csvTitles) != len(csvRow): \n raise ValueError(\"Length of titles list is different to that of csvRow\")\n \n numFields = len(questionIds)\n egoRow = numpy.zeros(numFields) \n\n for i in range(0, numFields): \n try: \n fieldIndex = csvTitles.index(questionIds[i][0])\n except: \n logging.debug((\"Field not found: \" + questionIds[i][0]))\n raise \n \n if questionIds[i][1] == 0:\n try: \n egoRow[i] = float(csvRow[fieldIndex])\n except: \n print((\"Field has missing values: \" + questionIds[i][0]))\n raise \n elif questionIds[i][1] == 1:\n egoRow[i] = self.__markMissingValues(csvRow[fieldIndex], 0)\n #This is a missing value we do not want replaced with mean or mode\n #e.g. with alters. \n elif questionIds[i][1] == 2: \n egoRow[i] = self.__markMissingValues(csvRow[fieldIndex], -1)\n else:\n raise ValueError(\"Problem with questionIds field: \" + str(questionIds[i][0]))\n \n return egoRow",
"def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data",
"def loadCalibrationPoints(self):\n\n with open('cali_points.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n i = 0\n\n for row in csvreader:\n j = 0\n for col in row:\n \n if i < 5:\n self.rgb_click_points[i][j] = int(col)\n j += 1\n if j == 2:\n j = 0\n elif i > 4 :\n self.depth_click_points[i-5][j] = int(col)\n j += 1\n if j ==2:\n j = 0\n i+=1\n self.cameraCalibration()\n pass",
"def load_products_data(connection, csvfile):\n insert_sql = 'insert into products (id, description, genres) ' \\\n 'values (%s, %s, %s)'\n load_data(connection, insert_sql, get_data_from_file(csvfile))",
"def loadVector(vector):\n expVecCmmd = 'v.out.ascii format=standard input=' + vector\n# JL p = Popen(expVecCmmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)\n p = Popen(expVecCmmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=False)\n vectorAscii = p.stdout.read().strip('\\n').split('\\n')\n l = 0\n while 'ORGANIZATION' not in vectorAscii[l]:\n l += 1\n while ':' in vectorAscii[l]:\n l += 1\n v = []\n while l < len(vectorAscii):\n line = vectorAscii[l].split()\n if line[0] in ['L', 'B', 'A']:\n skip = len(line) - 2\n vertices = int(line[1])\n l += 1\n v.append([])\n for i in range(vertices):\n v[-1].append(map(float, vectorAscii[l].split()[:2]))\n l += 1\n l += skip\n elif line[0] in ['P', 'C', 'F', 'K']:\n skip = len(line) - 2\n vertices = int(line[1])\n l += 1\n for i in range(vertices):\n l += 1\n l += skip\n else:\n grass.fatal(_(\"Problem with line: <%s>\") % vectorAscii[l])\n if len(v) < 1:\n grass.fatal(_(\"Zero lines found in vector map <%s>\") % vector)\n return v"
] | [
"0.6888591",
"0.6667055",
"0.6447633",
"0.6441823",
"0.6416773",
"0.638443",
"0.63685536",
"0.62376577",
"0.6235755",
"0.62333155",
"0.6210472",
"0.6201278",
"0.6198714",
"0.6145814",
"0.6137962",
"0.6124991",
"0.61010146",
"0.6021219",
"0.60208184",
"0.60184103",
"0.6010493",
"0.60100734",
"0.60063094",
"0.5934973",
"0.59273714",
"0.5885003",
"0.58822215",
"0.5877646",
"0.58712393",
"0.5857909"
] | 0.67931163 | 1 |
Returns ngrams of a given role. Role can be a string or a tuple of strings. (In this latter case they already are assumed to be stripped of unnecessary punctuation, certain nonalphanumeric charatcters and capitalisation). | def n_grammize(role):
ngrams = []
if isinstance(role,str):
role = role.lower()
role = role.split()
if len(role)>2:
for i in range(2, len(role)):
ngrams.append((role[i-2], role[i-1], role[i]))
if len(role)>1:
for i in range(1, len(role)):
ngrams.append((role[i-1], role[i]))
for i in range(len(role)):
ngrams.append(role[i])
return ngrams | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)",
"def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]",
"def ngram(self,phrase,n,unity=\"w\"):\n return self._support.ngram(phrase,n,unity)",
"def get_ngrams(seq, n):\n return",
"def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]",
"def ngrams(text, n):\n grams = zip(*[text[i:] for i in range(n)])\n return [''.join(gram) for gram in grams]",
"def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]:\n\n ngrams = []\n ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)]\n for r in ranges:\n ngrams += list(zip(*[items[j:] for j in range(*r)]))\n\n formatted_ngrams = [' '.join(item) for item in ngrams]\n\n yield formatted_ngrams",
"def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(True)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams",
"def ngrams(words, n=1):\n return [tuple(words[j:j + n]) for j in range(len(words) - n + 1)]",
"def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(uncased)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams",
"def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams",
"def ngrams(self):\n return self.__ngrams(())",
"def getgrams(text, tokendict):\n n = len(text)\n grams = []\n # Get lower-case of words\n if n >= 1:\n grams.append(tokendict[text[0]].lemma.lower())\n grams.append(tokendict[text[-1]].lemma.lower())\n grams.append(tokendict[text[0]].pos)\n grams.append(tokendict[text[-1]].pos)\n if n >= 2:\n token = tokendict[text[0]].lemma.lower() \\\n + ' ' + tokendict[text[1]].lemma.lower()\n grams.append(token)\n token = tokendict[text[-2]].lemma.lower() \\\n + ' ' + tokendict[text[-1]].lemma.lower()\n grams.append(token)\n return grams",
"def ngrams_(text, n):\n return zip(*[text[i:] for i in range(n)])",
"def get_grams(candidate, n):\n words = candidate.split(' ')\n # print(words)\n grams = list()\n for i in range(len(words) - n + 1):\n # print(words[i:i+n])\n grams.append(' '.join(words[i:i+n]))\n return grams",
"def iter_ngrams(self, sentence, n):\n return [tuple(sentence[i : i+n]) for i in range(len(sentence)-n+1)]",
"def ngrams(text, n):\n return chain(*[ngrams_(text, i) for i in range(n + 1)])",
"def word_ngrams(sent, n):\n\t\n\t# EXAMPLES \n\t# > word_ngrams(tokenize('hello world'), 1)\n\t# [('hello',), ('world',)]\n\t# > word_ngrams(tokenize('hello world'), 2)\n\t# [('<s>', 'hello'), ('hello', 'world'), ('world', '</s>')]\n\n\t# YOUR CODE HERE\n\ttokenized_sent = tokenize(sent)\n\tif n != 1:\n\t\ttokenized_sent.insert(0, '<s>')\n\t\ttokenized_sent.append('</s>')\n\treturn [tuple(tokenized_sent[i:i + n]) for i in range(0, len(tokenized_sent)-n+1)]",
"def sent_to_n_grams(sent: str, n: int) -> Iterator[Tuple[str, ...]]:\n\n words = [word for word in sent.split() if word not in string.punctuation]\n\n rows = [words[i:] for i in range(n)]\n\n return zip(*rows)",
"def n_grams(tokens, n=1):\n shiftToken = lambda i: (el for j,el in enumerate(tokens) if j>=i)\n shiftedTokens = (shiftToken(i) for i in range(n))\n tupleNGrams = zip(*shiftedTokens)\n return tupleNGrams",
"def n_grams(tokens, n):\n return zip(*[tokens[i:] for i in range(n)])",
"def make_ngrams(texts,n,ngram_mod):\r\n return [turnmod(doc,n,ngram_mod) for doc in texts]",
"def range_ngrams(tokens, ngramRange=(1,2)):\n return chain(*(n_grams(tokens, i) for i in range(*ngramRange)))",
"def everygrams(seq):\n for n in range(1, len(seq) + 1):\n for ng in nltk.util.ngrams(seq, n):\n yield ng",
"def create_ngrams(self, tokens):\n ngrams = []\n for i in range(len(tokens)- self.N + 1):\n ngrams.append(tuple(tokens[i:i+self.N]))\n return ngrams",
"def get_lemma_ngrams(s, t, i, ngram_size):\n ngrams = {\"uni\":1, \"bi\":2, \"tri\":3}\n if i < s.length-(ngrams[ngram_size]-1):\n lemma_ngrams = []\n for j in range(ngrams[ngram_size]):\n if s.nodes[i+j].lemma:\n lemma_ngrams.append(s.nodes[i+j].lemma[0]) #only keeps the first lemma\n else:\n lemma_ngrams.append(s.nodes[i+j].word + \"*\") #no lemma, word form* instead\n if len(lemma_ngrams) > 1:\n return tuple(lemma_ngrams)\n return lemma_ngrams[0] #or better to keep them all?\n else:\n return []",
"def n_grama(sentence, n):\n t = tokenize(sentence)\n n_grams = zip(*[t[i:] for i in range(n)])\n return list(map(lambda n_gram: ' '.join(n_gram), n_grams))",
"def ngrams(sequence, n):\n # credit: http://stackoverflow.com/questions/2380394/simple-implementation-of-n-gram-tf-idf-and-cosine-similarity-in-python\n sequence = list(sequence)\n count = max(0, len(sequence) - n + 1)\n return [tuple(sequence[i:i+n]) for i in range(count)]",
"def get_ngrams(tokens, min_n, max_n):\n all_ngrams = list()\n n_tokens = len(tokens)\n for i in range(n_tokens):\n for j in range(i + min_n, min(n_tokens, i + max_n) + 1):\n all_ngrams.append(\" \".join(tokens[i:j]))\n return all_ngrams",
"def get_ngrams(stats,s,t,i):\n #lemma ngrams\n ngram_sizes = [\"bi\", \"tri\"]\n for ngram_size in ngram_sizes:\n lm_ngram = get_lemma_ngrams(s, t, i, ngram_size)\n if lm_ngram:\n put_feature_value_list(stats,\"lemma_\" + ngram_size + \"gr\", lm_ngram)\n\n #POS and deprel bigrams\n if i < s.length-1:\n put_feature_value_list(stats,\"deprels_bigr\", (t.deprel,s.nodes[i+1].deprel))\n put_feature_value_list(stats,\"pos_bigr\", (t.pos,s.nodes[i+1].pos))\n \n #POS and deprel trigrams\n if i < s.length-2:\n put_feature_value_list(stats,\"deprels_trigr\", (t.deprel, s.nodes[i+1].deprel, s.nodes[i+2].deprel))\n put_feature_value_list(stats,\"pos_trigr\", (t.pos, s.nodes[i+1].pos, s.nodes[i+2].pos))\n\n return stats"
] | [
"0.61402243",
"0.6019998",
"0.60045886",
"0.59341073",
"0.59319186",
"0.58735085",
"0.58025056",
"0.5800501",
"0.57737285",
"0.57707524",
"0.5729101",
"0.5727403",
"0.5667416",
"0.5612965",
"0.55376065",
"0.5521605",
"0.5495334",
"0.5429406",
"0.54199857",
"0.54048324",
"0.5387045",
"0.5372496",
"0.53720886",
"0.53475124",
"0.53392833",
"0.532566",
"0.53255844",
"0.52864474",
"0.52654815",
"0.5258329"
] | 0.8349167 | 0 |
Uses featurize function on a vector. | def feature_vector(features, vector):
clean_features = set(features)
new_features_vector = featurize(vector,clean_features)
return new_features_vector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n [fv] = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)",
"def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n fv = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)",
"def _update_feature_vec(fvec, word, tag_ngram):",
"def my_featurize(apartment):\n return x, y",
"def vectorize(features, vocab):\n vec = lil_matrix((1, len(vocab)))\n for feat in features:\n try:\n fidx = vocab[feat]\n vec[0, fidx] += 1.0\n except KeyError:\n pass\n return vec",
"def _featurize(self, img):\n self._classifier.predict(img)\n return self._classifier.get_features()",
"def featurize(tokens, feature_fns):\n ###TODO\n \n # step 1 -> feats creation\n feats = defaultdict(lambda: 0)\n \n # step 2 -> call particular feature function for each feature\n for feature in feature_fns : \n feature(tokens,feats)\n\n # step 3 -> sort before return\n return(sorted(feats.items(), key=lambda x: x[0]))",
"def feat():\n pass",
"def featurize(tokens, feature_fns):\n answer = []\n for func in feature_fns:\n feats = defaultdict(lambda: 0)\n func(tokens,feats)\n answer.extend(feats.items())\n return sorted(answer, key= lambda x: x[0])",
"def fvector(data, method ):\n\n fv = 0\n if method['type'] == 'lbp':\n \n\n lbpkern = lbpsimple.generateKernel2()\n \n imlbp = lbpsimple.lbp2oneslice(data, lbpkern)\n\n fv,bins = lbpsimple.features(imlbp)\n\n #pdb.set_trace();\n elif method['type'] == 'hist':\n \n fv, bins = numpy.histogram( data,range(-200,2000,20))\n fv = fv[10:15]\n #fv, bins = numpy.histogram( data)\n pass\n\n else:\n raise Exception('Unknow method for feature vector: %s' %(method))\n\n return fv",
"def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features",
"def augment_feature_vector(X):\n column_of_ones = np.zeros([len(X), 1]) + 1\n\n return np.hstack((column_of_ones, X))",
"def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total",
"def apply(self, vec):\n raise NotImplementedError",
"def _convert_to_features(self, img: np.ndarray) -> np.ndarray:",
"def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x",
"def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec",
"def feature_forward(self, x):\n raise NotImplementedError",
"def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)",
"def _vectorize(self, vectorizer = None):\n\n\t\tvectorizer = vectorizer if vectorizer else self.vectorizer;\n\n\t\tself.training_set_vector = vectorizer.fit_transform(self.training_set)\n\n\t\tself.testing_set_vector = vectorizer.transform(self.testing_set)",
"def forward(self, x):\n x = self.features(x)\n return x",
"def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary",
"def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors",
"def build_feature_vector(self, basic_vector):\n basic_vector = basic_vector - np.mean(basic_vector)\n self._basic_vectors.append(basic_vector)\n if len(self._basic_vectors) > 2:\n #if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one\n first_derivative = (basic_vector - self._basic_vectors[-3])/(2*self.seconds_to_next_vector)\n second_derivative = (basic_vector - 2*self._basic_vectors[-2] + self._basic_vectors[-3])/(self.seconds_to_next_vector**2)\n feature_vector = np.concatenate((basic_vector, first_derivative, second_derivative))\n self._feature_vectors.append(feature_vector)",
"def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec",
"def create_feature_vector(features, length):\n START_IDX = 0\n END_IDX = 1\n\n output_vector = np.zeros(length)\n\n # negative strand\n for loc in features[-1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 1 \n\n # positive strand\n for loc in features[1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 2\n\n return output_vector",
"def compute_features(self, X):\n F = self.feature_extractor(X)\n if self.with_dropout:\n F = self.dropout(F)\n F = F[:, None].expand(-1, self.n_primitives, -1)\n F = torch.cat([\n F,\n self.primitive_embedding[None].expand_as(F)\n ], dim=-1)\n\n B = F.shape[0]\n M = self.n_primitives\n D = 2*self.feature_extractor.feature_size\n\n assert F.shape == (B, M, D)\n return F",
"def vectorize(self, terms):\n features = {}\n\n if self.parameters[LexiconFeature.PARAM_ENABLED] == 'false':\n return features\n\n tones = []\n if (self.terms_used == 'all'):\n tones = [self.get_tone(term) for term in terms]\n elif (self.used_terms == 'hashtags_only'):\n tones = [self.get_tone(term) for term in terms\n if len(term) > 0 and term[0] == '#']\n\n if (len(tones) == 0):\n tones.append(0)\n\n for function_name in self.functions:\n if (function_name == 'sum'):\n value = (sum(tones))\n elif (function_name == 'max'):\n value = max(tones)\n elif (function_name == 'min'):\n value = min(tones)\n else:\n raise ValueError(\n \"unexpected function: '{}'\".format(function_name))\n\n feature_name = \"{}_{}\".format(self.get_name(), function_name)\n features[feature_name] = utils.normalize(value)\n\n #\n # Calculate sum of cluster scores\n #\n # for cluster in self.bag_of_clusters_features:\n # cluster_tones = [self.get_cluster_tone(\n # cluster, cluster.get_cluster_id(word))\n # for word in terms if cluster.contains_word(word)]\n # if len(cluster_tones) == 0:\n # cluster_tones.append(0)\n\n # feature_name = \"{}_score_sum\".format(cluster.get_name())\n # value = sum(cluster_tones)\n # features[feature_name] = utils.normalize(value)\n\n return features",
"def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out",
"def preprocess_feature(df):"
] | [
"0.6279238",
"0.62478745",
"0.6121798",
"0.6079553",
"0.60581803",
"0.60362357",
"0.59740824",
"0.5891672",
"0.5890009",
"0.58688086",
"0.5858637",
"0.5843295",
"0.58297974",
"0.57748574",
"0.5719389",
"0.5689847",
"0.5676522",
"0.5671145",
"0.56549513",
"0.5650641",
"0.55704135",
"0.55663586",
"0.55551845",
"0.5553854",
"0.5550291",
"0.5548269",
"0.5544055",
"0.55440384",
"0.5537503",
"0.5537261"
] | 0.75786316 | 0 |
Removes stopwords and some nonalpahnumeric characters that are deemed irrelevant for our purposes. | def clean(word):
word = word.lower()
stopwords = ['of', 'and','to', 'at', 'in', '@']
word = re.sub(r'[\&/\-\(\)\|\@,\]\[]+', ' ', word)
for stopword in stopwords:
pattern = r'\b' + stopword + r'\b'
pattern = re.compile(pattern)
word = re.sub(pattern, '', word)
word = re.sub(r'\s\s+', ' ', word)
return word | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _remove_stopwords(self, text: str) -> str:\n pattern = r\"\"\"\n (?x) # Set flag to allow verbose regexps\n \\w+(?:-\\w+)* # Words with optional internal hyphens \n | \\s* # Any space\n | [][!\"#$%&'*+,-./:;<=>?@\\\\^():_`{|}~] # Any symbol \n \"\"\"\n symbol = \" \"\n return \"\".join(\n t if t not in self.stop_words else symbol for t in re.findall(pattern, text)\n )",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)",
"def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words",
"def remove_stopwords(self, text):\n stopwords_list = stopwords.words('english')\n whitelist = [\"n't\", \"not\", \"no\"]\n words = text.split()\n clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]\n return \" \".join(clean_words)",
"def remove_stopwords(text):\n text = \" \"+text\n text = text.upper()\n for word in STOP_WORDS:\n text = text.replace(word.upper(),\" \")\n return text",
"def _clean(text, remove_stopwords=False):\n text = _remove_between_square_brackets(text)\n text = _replace_contractions(text)\n \n words = nltk.word_tokenize(text)\n words = _remove_non_ascii(words)\n words = _to_lowercase(words)\n words = _remove_punctuation(words)\n words = _replace_numbers(words)\n\n if remove_stopwords:\n words = _remove_stopwords(words)\n\n return ' '.join(words)",
"def remove_stopwords(self,text):\n return \" \".join([word for word in str(text).split() if word not in self.STOPWORDS])",
"def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text",
"def remove_stopwords(data):\n stop_words = stopwords.words('english')\n words = word_tokenize(str(data))\n new = \"\"\n for word in words:\n if word not in stop_words and len(word) > 1:\n new = new + \" \" + word\n return new",
"def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens",
"def remove_stopwords(text):\n stopwords = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\", \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\", \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\", \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\", \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\", \"of\", \"at\", \"by\", \"for\", \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\", \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\", \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\", \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\", \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\n return \" \".join([word for word in text.split() if word not in stopwords])",
"def remove_stop_words(self):\n self.word_list = [word for word in self.word_list if len(word) > 1 and word not in STOP_WORDS] #The len(word) check is here because there's still one piece of white space I haven't pinned down in each file. I haven't taken the time to figure out a quick way to look at all the whitespace characters yet, but none of the ones I included takes care of that one lonely space. Will keep on it.\n self.word_list.sort()",
"def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())",
"def clean_text(text):\n text = text.lower() # Convert the text to lower case\n text = re.sub(\",\", \" \", text) # Replace commas with an extra space\n\n text = re.sub(\"<.*?>\", \"\", text) # Clean out any HTML tags\n text = re.sub(\"\\s+\", \" \", text) # Replace multiple spaces with\n\n text = text.split()\n\n text = [\n re.sub(\"[^\\w]\", \"\", i.rstrip()) for i in text if i not in all_stopwords\n ] # Clean out stopwords\n\n # text = engStem.stemWords(text)# English Stemming\n\n text = \" \".join(text)\n return text",
"def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text",
"def remove_stopwords(text):\n return \" \".join([word for word in str(text).split() if word not in STOPWORDS])",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)",
"def cleanData(s):\n\n # extract only word tokens of at least 2 chars\n re.compile(r\"\\b\\w\\w + \\b\", re.U).findall(s)\n\n # xml_dict = {';': '', '<': '<', '&': '&', '>': '>', '"': '\"',\n # '&apos': '\\''}\n # for key, value in xml_dict.iteritems():\n # s = s.replace(key, value)\n s.translate(maketrans('?!,.', ' '))\n\n with open('stopwords.txt') as stop_words:\n stop_words = {line.strip().lower() for line in stop_words if line!='\\n'}\n\n return s",
"def remove_stop_words(text):\n return ' '.join(\n [word for word in text.split(' ') if word not in final_stop_words])",
"def remove_stopwords(text):\n tokens = word_tokenize(text)\n filtered = [word for word in tokens if word not in stop_words]\n filtered = ' '.join(filtered)\n return filtered",
"def remove_bad_chars(self, corpus: List[str]) -> List[str]:\n corpus_clean: List[str] = list()\n for doc in corpus:\n doc_tmp = \"\"\n doc_tmp = re.sub(self.bad_chars, \"\", doc)\n corpus_clean.append(doc_tmp)\n return corpus_clean",
"def remove_stopwords(text):\r\n text_split=text.split()\r\n text_split=[word for word in text_split if word not in stopwords.words('spanish')]\r\n return text_split",
"def sanitize(text, stop_word_list):\n\n # convert the text into Unicode\n text = unicode(text)\n\n #print(type(text))\n\n # replace dot with space\n text = text.translate({ord(\".\"): ord(\" \")})\n # replace dash with space\n text = text.translate({ord(\"-\"): ord(\" \")})\n\n # split the text on white-space\n words = text.split()\n sanitized_words = []\n for w in words:\n\n # ignore numbers\n if w.isnumeric():\n continue\n\n # print(\"Word (Before Punctuation): \" + w)\n\n # remove punctuation\n # Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python\n # w = w.translate(None, string.punctuation)\n\n # The above method does not work for Unicode strings\n # Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505\n # print(type(w))\n\n # replace punctuations with None\n w = w.translate({ord(c): None for c in string.punctuation})\n w = w.lower()\n # print(\"Word (After Punctuation): \"+w)\n\n # Note: Remove stop-words before Stemming, or else the stop-word\n # matching will not work.\n # If the word is in Stop Word List\n try:\n i = stop_word_list.index(w.lower())\n # skip further processing of word loop\n # print(\"Stop Word Removed: \"+w)\n continue\n except ValueError:\n pass\n\n w = stemm_word(w)\n\n # hack, hack, hack\n if w == '':\n continue\n\n # add the sanitized word into return list\n sanitized_words.append(w)\n\n return sanitized_words"
] | [
"0.7668897",
"0.75107396",
"0.75107396",
"0.75107396",
"0.75107396",
"0.75107396",
"0.75107396",
"0.75103426",
"0.745749",
"0.7410949",
"0.7322454",
"0.72971654",
"0.7257038",
"0.7251849",
"0.7238443",
"0.7230823",
"0.7226988",
"0.71822757",
"0.71772724",
"0.7159221",
"0.7149845",
"0.71328956",
"0.70762175",
"0.70579284",
"0.7053649",
"0.7031565",
"0.7023236",
"0.7013334",
"0.6999056",
"0.6985621"
] | 0.7719584 | 0 |
Sets weight of each term depending on ocurrance. 0.2 is decided completely heuristically. Features are deemed irrelevant if they appear many times in each industry and aren't associated with one in particular. | def set_weight(term, irrelevant):
if term in irrelevant:
return (0.2 * max([x/sum(indtf_features[term]) for x in indtf_features[term]]))
elif isinstance(term, tuple):
return len(term)
else:
return 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_weights_rocchio(isRelevant, result):\n\tglobal global_weights\n\tmax_contributing_weight = max(result.sim_measures, key=result.sim_measures.get)\n\tif isRelevant:\n\t\tglobal_weights[max_contributing_weight] += alpha\n\t\tfor measure in result.sim_measures:\n\t\t\tglobal_weights[measure] -= beta \n\n\telse:\n\t\tglobal_weights[max_contributing_weight] -= alpha\n\t\tfor measure in result.sim_measures:\n\t\t\tglobal_weights[measure] += beta",
"def test_ontology_term_graph_based_information_content_as_weights(ontology):\n\tassert ontology.ic(\"TO:0000001\", as_weight=True) == 0.000\n\tassert ontology.ic(\"TO:0000002\", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000003\", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000004\", as_weight=True) == (((1.000 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000005\", as_weight=True) == 1.000\n\tassert ontology.ic(\"TO:0000006\", as_weight=True) == 1.000\n\tassert ontology.ic(\"TO:0000007\", as_weight=True) == (((0.5 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000008\", as_weight=True) == (((1.3690702464285427 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000009\", as_weight=True) == 1.000",
"def _recalculate_opinions(self, idea):\r\n \r\n global INFLUENCE_FACTOR\r\n \r\n last_idea = self.opinions[idea.category]\r\n last_idea.weight = last_idea.weight+(idea.weight*INFLUENCE_FACTOR)\r\n if last_idea.weight >1:\r\n last_idea.weight = 1\r\n elif last_idea.weight <-1:\r\n last_idea.weight = -1",
"def weight(self):",
"def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights",
"def initialize_weights(self):\n # compute lmda.\n self.lmda = float(self.c)/float(self.N)\n # bias term. This should not get a regularization penalty.\n self.bias = {}\n # weight vector\n self.w = {}\n # lastW[j] = k, indicates that feature j was last updated at time k.\n self.lastW = {}\n for lbl in self.labels:\n self.bias[lbl] = 0\n self.w[lbl] = {}\n self.lastW[lbl] = {}\n pass",
"def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n return featuresWeights",
"def apply_boosts(searcher):\n return searcher.boost(\n question_title=4.0,\n question_content=3.0,\n question_answer_content=3.0,\n post_title=2.0,\n post_content=1.0,\n document_title=6.0,\n document_content=1.0,\n document_keywords=8.0,\n document_summary=2.0,\n\n # Text phrases in document titles and content get an extra boost.\n document_title__match_phrase=10.0,\n document_content__match_phrase=8.0)",
"def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()",
"def weights(self):\r\n\t\treturn None",
"def update_weights(self):\n\t\tpass",
"def weight_distribution(self):\n all_scores = []\n for zettel in self.lemma_tokens:\n scores = []\n for word in zettel:\n cur_tf_idf = self.tf_idf_scores[word[0]] / 3 #range: 0-3+\n if word[1] == 'NG':\n word_list = re.split(\" \", word[0])\n cur_word_score = 0\n i = 0\n for new_word in word_list:\n cur_word_score += self.word_scores[new_word]\n i += 1\n cur_word_score = cur_word_score / i / 2 #range: 0-2+\n else:\n cur_word_score = self.word_scores[word[0]] / 2 #range: 0-2+\n cur_keyword_score = self.keyword_scores[word[0]] / 4 #0-4+\n cur_text_rank = self.text_ranks[word[0]] / 10 #range: 0-12+\n cur_pos_score = self.pos_scores[word[0]]\n cur_area_score = self.z_area_scores[word[0]]\n cur_total_score = ((cur_tf_idf * self.score_weights[0]) + (cur_word_score * self.score_weights[1]) +\n (cur_keyword_score * self.score_weights[2]) + (cur_text_rank * self.score_weights[3]) +\n (cur_pos_score * self.score_weights[4]) + (cur_area_score * self.score_weights[5])) / 6\n scores.append(cur_total_score)\n all_scores.append(scores)\n return all_scores",
"def weight_term_frequencies_one(a_in,b_in):\n plus_value = 2\n a_out = {}\n for(term, val) in a_in.items():\n try:\n b_val = b_in[term]\n a_out[term] = float(val)/float(val+b_val+plus_value)\n except KeyError:\n a_out[term] = float(val)/(float(val + plus_value)) \n return a_out",
"def get_weights(self):",
"def get_weights(self):\n return self.forcing_term.weights_.ravel()",
"def calculate_weighted_results():\n pass",
"def weights(self) -> List[float]:",
"def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]",
"def weight_term_frequencies(a_in,b_in):\n plus_value = 2\n a_out = {}\n b_out = {} \n #print \"in weight term frequencies\" \n for(term, val) in a_in.items():\n try:\n b_val = b_in[term]\n a_out[term] = float(val)/float(val+b_val+plus_value)\n except KeyError:\n a_out[term] = float(val)/(float(val + plus_value)) \n \n for(term, val) in b_in.items():\n try:\n a_val = a_in[term]\n b_out[term] = float(val)/float(val+a_val+plus_value)\n except KeyError:\n b_out[term] = float(val)/(float(val + plus_value)) \n \n return (a_out,b_out)",
"def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)",
"def weight_wrtg(self, wrtg):\n # Clear caches because weights are going to change.\n # TODO: it might be possible to not clear the caches\n # if the weight doesn't change, and re-use previous decoding.\n wrtg.ClearCaches()\n for p in wrtg.P:\n rule = p.rhs.rule\n assert isinstance(rule.features, list)\n rule.weight = self.weight_rule(rule)",
"def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)",
"def __deriveElementalWeightsByNaturalNuclideAbundances():\n for element in elements.byName.values():\n numer = 0.0\n denom = 0.0\n for nb in element.getNaturalIsotopics():\n numer += nb.weight * nb.abundance\n denom += nb.abundance\n\n if denom:\n element.standardWeight = numer / denom",
"def compute_overall_opinions(self):\r\n opinions_list = []\r\n global CATEGORIES\r\n i=0\r\n for cat in CATEGORIES:\r\n opinions_list.append([])\r\n for citizen in self.citizens:\r\n opinions_list[i].append(citizen.opinions[cat].weight)\r\n i+=1\r\n \r\n i=0;\r\n for cat in CATEGORIES:\r\n mean = stats.describe(opinions_list[i])[2]\r\n std = math.sqrt(stats.describe(opinions_list[i])[3])\r\n print \"Category: %d - Mean: %f - STD: %f\" % (cat, mean, std)\r\n i+=1",
"def _weight_boosting_n_estimators(name: str):\n return scope.int(hp.qloguniform(name, np.log(10.5), np.log(1000.5), 1))",
"def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"",
"def my_assign_weights(context, data):\n pass",
"def _scale_weights(self, max_weight):\n scale_factor = np.divide(1, max_weight)\n for exp in self.experts:\n exp.weight = exp.weight * scale_factor",
"def getWeight(self) -> float:\n ...",
"def calc_weights(terms, ent, fp_len):\n term_dict = {}\n length = len(terms)\n for i, term in enumerate(terms):\n # get weights\n arguments = [terms[(i-1)%length], term, terms[(i+1)%length]]\n if term not in term_dict:\n fp_hash = md5Hash(arguments).zfill(fp_len)\n fp_hash_list = binconv(fp_hash, fp_len)\n token = None\n #if term in ent:\n # token = Token(fp_hash_list, 2)\n #else:\n token = Token(fp_hash_list, 0)\n term_dict[term] = token\n #if term in ent:\n # term_dict[term].weight += 1\n #else:\n term_dict[term].weight += 1\n return term_dict"
] | [
"0.6369961",
"0.6271947",
"0.60454494",
"0.585983",
"0.58412117",
"0.5800738",
"0.57777506",
"0.57360715",
"0.5691468",
"0.56898457",
"0.5493943",
"0.54804784",
"0.54799795",
"0.5478447",
"0.54776084",
"0.54596615",
"0.54485625",
"0.54483396",
"0.5446516",
"0.54402906",
"0.5433857",
"0.5423515",
"0.54087013",
"0.5398232",
"0.5390943",
"0.53841716",
"0.53773224",
"0.53719026",
"0.5367955",
"0.5352691"
] | 0.67181236 | 0 |
Transforms a short 'document' according to a trained model and weights to infer the topic. Each topic is and index. Query can be a string, tuple of string or a list of tuple of strings. Verbose = False will return only the numeric index. Otherwise the topics can be interpreted bu a legend in form of a dictionary. | def guess_topic(lda, query, features_vec, irrelevant, verbose=True):
query_doc = []
doc_topic = []
topic_most_pr = None
if isinstance(query,str):
query = clean(query)
query = n_grammize(query)
for term in query:
weight = set_weight(term, irrelevant)
if term in features_vec:
query_doc.append(weight * array(features_vec[term]))
elif isinstance(query,tuple):
if query in features_vec:
weight = set_weight(query, irrelevant)
query_doc.append(weight * array(features_vec[query]))
elif isinstance(query,list):
for term in query:
weight = set_weight(term, irrelevant)
if term in features_vec:
query_doc.append(weight * array(features_vec[term]))
X = array(query_doc)
if len(X)==1:
X = X.reshape(1,-1)
if len(X)==0:
return topic_most_pr
doc_topic = lda.transform(X)
sum_topics = numpy.zeros(len(doc_topic[0]))
for i in range(len(doc_topic)):
sum_topics = sum_topics + doc_topic[i]
topic_most_pr = sum_topics.argmax()
if verbose == True:
if topic_most_pr in legend:
return legend[topic_most_pr]
else:
return topic_most_pr
else:
return topic_most_pr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def semanticSearch(model, topics, index, idx_to_docid, k=1000):\r\n run = {}\r\n topic_nums = [topic for topic in topics]\r\n queries = [topics[topic]['title'] for topic in topics]\r\n encoded_queries = model.encode(queries)\r\n labels, distances = index.knn_query(encoded_queries, k=k)\r\n for i,topic in enumerate(topic_nums):\r\n run[topic] = []\r\n # considers highest passage match only for a document\r\n added_docids = []\r\n sim = [1-x for x in distances[i]]\r\n scored_run = zip(labels[i], sim)\r\n for i, (passageidx, dist) in enumerate(scored_run):\r\n docid = idx_to_docid[passageidx]\r\n \r\n if docid not in added_docids:\r\n run[topic].append((docid, dist))\r\n added_docids.append(docid)\r\n run[topic] = run[topic][:1000]\r\n return run",
"def __getitem__(self, bow, iterations=100):\n is_corpus, corpus = utils.is_corpus(bow)\n if not is_corpus:\n # query is a single document => make a corpus out of it\n bow = [bow]\n\n self.convert_input(bow, infer=True)\n cmd = \\\n self.mallet_path + ' infer-topics --input %s --inferencer %s ' \\\n '--output-doc-topics %s --num-iterations %s --doc-topics-threshold %s --random-seed %s'\n cmd = cmd % (\n self.fcorpusmallet() + '.infer', self.finferencer(),\n self.fdoctopics() + '.infer', iterations, self.topic_threshold, str(self.random_seed)\n )\n logger.info(\"inferring topics with MALLET LDA '%s'\", cmd)\n check_output(args=cmd, shell=True)\n result = list(self.read_doctopics(self.fdoctopics() + '.infer'))\n return result if is_corpus else result[0]",
"def show_topic_model_textually(seed_gensim_topic_model, seed_gensim_corpus,\n texts_to_analyze, num_topics):\n print(\"alpha =\", seed_gensim_topic_model.alpha)\n print(seed_gensim_topic_model)\n print(seed_gensim_topic_model.print_topics(num_topics))\n print()",
"def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta",
"def visualize(model, num_topics=num_topics, num_words=num_words,\r\n vocab=idx2word, show_emb=True,\r\n tokenizer=tokenizer, bert_model=bert):\r\n model.eval() # set the net in evaluation mode\r\n # set a few words to query\r\n queries = ['insurance', 'weather', 'particles', 'religion', 'man', 'love',\r\n 'intelligence', 'money', 'politics', 'health', 'people', 'family']\r\n\r\n ## visualize topics using monte carlo (sampling from the posterior I guess)\r\n with torch.no_grad(): # no gradients computation - makes forward pass lighter\r\n print('-' * 20)\r\n print('Visualize topics...')\r\n topics_words = []\r\n gammas = model.get_beta() # topics distributions\r\n for k in range(num_topics):\r\n gamma = gammas[k]\r\n top_words = list(gamma.cpu().numpy().argsort()[-num_words + 1:][::-1])\r\n topic_words = [vocab[a] for a in top_words]\r\n topics_words.append(' '.join(topic_words))\r\n print('Topic {}: {}'.format(k, topic_words))\r\n\r\n if show_emb:\r\n ## visualize word embeddings by using V to get nearest neighbors\r\n print('-' * 20)\r\n print('Visualize word embeddings by using output embedding matrix')\r\n\r\n # extract the embeddings from the model!\r\n try:\r\n embeddings = model.rho.weight # Vocab_size x E\r\n except:\r\n embeddings = model.rho # Vocab_size x E\r\n\r\n for word in queries:\r\n # extracting Bert representation of the word\r\n inputs = tokenizer(word, return_tensors=\"pt\")\r\n outputs = bert_model(**inputs).last_hidden_state[0]\r\n outputs.requires_grad = False\r\n if outputs.size()[0] > 1: # aggregate\r\n outputs = torch.sum(outputs, dim=0)\r\n nns = utils.nearest_neighbors(q=outputs,\r\n embeddings=embeddings, vocab=list(vocab.values()))\r\n print('word: {} .. neighbors: {}'.format(word, nns)) # utility function\r",
"def rocchio_doc_list(query_vector, corpus, topic):\n #create dict of vectors for each docid that contains\n #at least one non-zero term in query_vector\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n doc_shortlist = dict()\n vector_len = len(query_vector)\n word_list = list(inv_index.keys())\n if corpus == cg.REUTERS:\n topic_docs = list(map(int, text_categorization.get_topic_dict()[topic]))\n else:\n topic_docs = list(range(0, 663))\n for index, weight in enumerate(query_vector):\n word = word_list[index]\n for doc_id in set(inv_index[word]).intersection(set(topic_docs)):\n if doc_id in doc_shortlist:\n #doc already added, just update weight entry for this word\n doc_shortlist[doc_id][index] = inv_index[word][doc_id]['weight']\n else:\n #doc not added yet add doc_id to shortlist,\n #initialize list to 0s for all words in query\n #update weight entry for current word\n entry = np.zeros(vector_len)\n entry[index] = inv_index[word][doc_id]['weight']\n doc_shortlist[doc_id] = entry\n\n return doc_shortlist",
"def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)",
"def explore_topic(topic_number, topn=25, model=10):\n #\n if model==25:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_25'))\n topicname=topic_names_25[topic_number]\n gensimSTR=''\n elif model==15:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_15'))\n topicname=topic_names_15[topic_number]\n gensimSTR=''\n elif model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)\n ##",
"def transform(self, query):\n query = preprocessing(query)\n return lookup(query, self.model.wv)",
"def compute_topic_model(year_from=1900, year_to=2020, venues_filter=None, n_topics=100, use_lemmer=True,\n min_df=2, max_df=0.8):\n start = time.time()\n out_fileprefix = get_output_fileprefix(year_from, year_to, venues_filter, n_topics)\n\n corpus, tf_features_names = get_corpus_gensim_for_learning(year_from, year_to, venues_filter, use_lemmer, min_df, max_df)\n execute_lda_gensim(corpus, tf_features_names, n_topics, out_fileprefix)\n\n end = time.time()\n return year_from, year_to, n_topics, (end - start)",
"def __getitem__(self, doc):\n lda_model = ldamodel.LdaModel(\n num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)\n lda_model.topics = np.zeros((self.vocab_len, self.num_topics))\n ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)\n\n time_lhoods = []\n for time in range(self.num_time_slices):\n lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice\n lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)\n time_lhoods.append(lhood)\n\n doc_topic = ldapost.gamma / ldapost.gamma.sum()\n # should even the likelihoods be returned?\n return doc_topic",
"def visualize():\n model.eval()\n with torch.no_grad():\n alpha = model.mu_q_alpha\n beta = model.get_beta(alpha) \n \n print('\\n')\n print('#'*100)\n print('Visualize topics...') \n \n topics_words = []\n for k in range(args.num_topics):\n gamma = beta[k, :]\n top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1]) \n topic_words = [vocab[a] for a in top_words]\n topics_words.append(' '.join(topic_words))\n print('Topic {} .. ===> {}'.format(k, topic_words)) \n\n print('\\n')\n print('Visualize word embeddings ...')\n # queries = ['economic', 'assembly', 'security', 'management', 'debt', 'rights', 'africa']\n # queries = ['economic', 'assembly', 'security', 'management', 'rights', 'africa']\n queries = ['border', 'vaccines', 'coronaviruses', 'masks']\n queries = set(queries).intersection(vocab)\n try:\n embeddings = model.rho.weight # Vocab_size x E\n except:\n embeddings = model.rho # Vocab_size x E\n # neighbors = []\n for word in queries:\n print('word: {} .. neighbors: {}'.format(\n word, nearest_neighbors(word, embeddings, vocab, args.num_words)))\n print('#'*100)",
"def post_process_result_of_lda_topic_model(lda_model, gensim_corpus,\n document_collection,\n document_collection_filtered,\n n_closest=25):\n # Prepare containers to store results\n # Container to keep the document topic matrix\n n_closest = - n_closest\n document_topic_matrix = []\n # Container to keep topics and the closest texts to each topic\n topic_closest_doc_with_topics_words = []\n # Container to keep topics\n all_topics = lda_model.show_topics(50)\n\n # Create an LDA corpus from the original gensim corpus\n lda_corpus = lda_model[gensim_corpus]\n\n # Iterate through the lda corpus and create the document topic matrix\n for i, documents in enumerate(lda_corpus):\n # Data returned is not proper numpy matrix\n document_topic_matrix.append(\n np.array([elements[1]for elements in documents]))\n\n # Create the proper numpy matrix\n document_topic_matrix = np.vstack(document_topic_matrix)\n\n # Find the closest texts to a given topic\n # Iterate through the transpose of the document topic matrix\n for i, element in enumerate(document_topic_matrix.T):\n # Identify the id of 15 closest texts of each topic\n closest = element.argsort(axis=0)[n_closest:][::-1]\n # Create a container to keep each text with the id above\n texts = []\n for element in closest:\n texts.append({'matched_text':\n document_collection_filtered[element],\n 'matched_text_words':\n document_collection[element]['match_word'],\n 'testimony_id': document_collection[element]\n ['testimony_id']})\n\n # Append them to container\n topic_closest_doc_with_topics_words.append({'texts': texts,\n 'topic_words':\n all_topics[i]})\n\n return {'topic_documents': topic_closest_doc_with_topics_words,\n 'document_topic_matrix': document_topic_matrix}",
"def show_topic_model_visually(seed_gensim_topic_model, seed_gensim_corpus,\n seed_gensim_dictionary):\n vis = pyLDAvis.gensim.prepare(seed_gensim_topic_model, seed_gensim_corpus,\n seed_gensim_dictionary)\n pyLDAvis.show(vis)",
"def get_summary_model(processed_text, model_type, number_topics):\n\n if model_type == 'LDA':\n count_model = CountVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return count_model, LDA(n_components=number_topics, learning_method='batch').fit(count_model.fit_transform(processed_text))\n if model_type == 'LSA':\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, TruncatedSVD(n_components=number_topics, algorithm='randomized', n_iter=100, random_state=122).fit(tf_idf_model.transform(processed_text))\n else:\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, NMF(n_components=number_topics, init='random', random_state=0).fit(tf_idf_model.transform(processed_text))",
"def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics",
"def search_in_help(topic: str):\n key_lower = topic.lower()\n if key_lower == 'docs':\n print_help_text(docs_text)\n elif key_lower == 'args':\n print_help_text(docs_args_text)\n elif key_lower == 'sort':\n print_help_text(docs_sort_text)\n elif key_lower == 'groups':\n print_help_text(docs_groups_text)\n elif key_lower == 'list':\n print_help_text(docs_list_text)\n elif key_lower not in set(chain.from_iterable(indexes.keys())):\n print(fill(f'Not found: {topic}', width=START_TEXT_WIDTH, initial_indent=' ' * 2, subsequent_indent=' ' * 2))\n print_help_text(docs_general_text)\n else:\n for k, v in indexes.items():\n if key_lower in k:\n print(fill(str(v[0]), width=START_TEXT_WIDTH, initial_indent=' ' * 2, subsequent_indent=' ' * 2))\n # show long description\n if topic == key_lower:\n print(fill(f'Long: {v[2]}', width=START_TEXT_WIDTH, initial_indent=' ' * 6, subsequent_indent=' ' * 6))\n # show short description\n else:\n print(fill(f'Short: {v[1]}', width=START_TEXT_WIDTH, initial_indent=' ' * 6, subsequent_indent=' ' * 6))",
"def convert_single_example(example, tokenizer, is_training, args):\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = [] # all subtokens of original doc after tokenizing\n features = []\n for (i, token) in enumerate(example['paragraph_tokens']):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = albert_tokenize(tokenizer, token)\n tok_to_orig_index.extend([i] * len(sub_tokens))\n all_doc_tokens.extend(sub_tokens)\n\n # 特别注意!由于在paragraph_tokens中我们的token已经映射过一次了\n # 这里wordpiece等于又映射了一遍,所以这里的操作是二次映射\n if example['position_map']:\n tok_to_orig_index = [example['position_map'][index] for index in tok_to_orig_index]\n\n # QUERY\n query_tokens = []\n query_tokens.append(\"[Q]\")\n query_tokens.extend(albert_tokenize(tokenizer, example['question_text']))\n if len(query_tokens) > args.max_query_length:\n query_tokens = query_tokens[-args.max_query_length:]\n\n # ANSWER 预处理的时候先长短分开\n tok_start_position = -1\n tok_end_position = -1\n # 这里终点是必然在para_tokens内的\n if is_training:\n # 现阶段,有短答案预测短答案,否则预测长答案\n if example['answer_type'] != AnswerType['UNKNOWN']:\n tok_long_start_position = orig_to_tok_index[example['long_start']]\n if example['long_end'] == len(orig_to_tok_index):\n tok_long_end_position = orig_to_tok_index[-1]\n else:\n tok_long_end_position = orig_to_tok_index[example['long_end']] - 1\n tok_start_position = tok_long_start_position\n tok_end_position = tok_long_end_position\n if example['answer_type'] == AnswerType['SHORT']:\n tok_short_start_position = orig_to_tok_index[example['short_start']]\n if example['short_end'] == len(orig_to_tok_index):\n tok_short_end_position = orig_to_tok_index[-1]\n else:\n tok_short_end_position = orig_to_tok_index[example['short_end']] - 1\n tok_start_position = tok_short_start_position\n tok_end_position = tok_short_end_position\n\n # Get max tokens number for original doc,\n # should minus query tokens number and 3 special tokens\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = args.max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset # compute number of tokens remaining unsliding\n length = min(length, max_tokens_for_doc) # determine current sliding window size\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n\n # Consider case for reaching end of original doc\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, args.doc_stride)\n\n # Convert window + query + special tokens to feature\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n tokens.extend(query_tokens)\n segment_ids.extend([0] * len(query_tokens))\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = check_is_max_context(doc_spans, doc_span_index, split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n assert len(tokens) == len(segment_ids)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (args.max_seq_length - len(input_ids))\n input_ids.extend(padding)\n input_mask.extend(padding)\n segment_ids.extend(padding)\n\n assert len(input_ids) == args.max_seq_length\n assert len(input_mask) == args.max_seq_length\n assert len(segment_ids) == args.max_seq_length\n\n start_position = None\n end_position = None\n answer_type = None\n answer_text = \"\"\n if is_training:\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n contains_an_annotation = (tok_start_position >= doc_start and tok_end_position <= doc_end)\n # 负样本需要经过采样,且目标为[CLS]\n if (not contains_an_annotation) or example['answer_type'] == AnswerType['UNKNOWN']:\n if args.include_unknowns < 0 or random.random() > args.include_unknowns:\n continue\n start_position = 0\n end_position = 0\n answer_type = AnswerType['UNKNOWN']\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n answer_type = example['answer_type']\n\n # 如果是短答案,对一下答案是否正确\n if example['answer_type'] == AnswerType['SHORT']:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n answer_text = answer_text.replace(' ', '').replace(u\"▁\", ' ').strip()\n gt_answer = example['short_answer_text'].lower()\n answer_text_chars = [c for c in answer_text if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n gt_answer_chars = [c for c in gt_answer if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n if \"\".join(answer_text_chars) != \"\".join(gt_answer_chars):\n print(answer_text, 'V.S.', gt_answer)\n\n feature = InputFeatures(\n unique_id=None,\n example_index=None,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n answer_text=answer_text,\n answer_type=answer_type)\n\n features.append(feature)\n\n return features",
"def display_topics_svd(model_fit, terms, num_top_words, topics = None):",
"def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q",
"def doc_topics(self, doc_number):\n doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]\n return doc_topic[doc_number]",
"def generate_bar_example(\n num_topics=10, num_documents=500, num_words_per_doc=100, alpha=1, beta=1, seed=None\n):\n\n width = 5\n\n vocab_size = width * width\n rng = random.Random()\n if seed is not None:\n rng.seed(seed)\n\n zeros = [[0 for i in range(width)] for j in range(width)]\n topic_squares = [zeros for i in range(num_topics)]\n for i in range(width):\n for j in range(width):\n topic_squares[i][i][j] = 1.0 / width\n for i in range(width):\n for j in range(width):\n topic_squares[width + i][j][i] = 1.0 / width\n topics = []\n for k in range(num_topics):\n topics.append(list(_itertools.chain(*topic_squares[k])))\n\n def weighted_choice(probs):\n total = sum(probs)\n r = rng.uniform(0, total)\n upto = 0\n for i, w in enumerate(probs):\n if upto + w > r:\n return i\n upto += w\n assert False, \"Shouldn't get here\"\n\n documents = []\n thetas = []\n for d in range(num_documents):\n doc = [0 for i in range(width * width)]\n topic_dist = [rng.gammavariate(1, 1) for k in range(num_topics)]\n topic_dist = [z / sum(topic_dist) for z in topic_dist]\n for i in range(num_words_per_doc):\n k = weighted_choice(topic_dist)\n w = weighted_choice(topics[k])\n doc[w] += 1\n thetas.append(topic_dist)\n documents.append(doc)\n\n sparse_documents = []\n for d in documents:\n sd = {}\n for i in range(width):\n for j in range(width):\n k = str(i) + \",\" + str(j)\n sd[k] = d[i * width + j]\n sparse_documents.append(sd)\n bow_documents = turicreate.SArray(sparse_documents)\n return bow_documents",
"def explore_topic_nouns(topic_number, topn=25, model=10):\n #\n if model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n if dfff[dfff['nouns']==term].empty: ## dfff is loaded from pilot_path/bow_nouns.csv\n pass\n else:\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)",
"def RM3(query, docs, external_docs, freqstats, weights, fb_original_weight, fb_terms,\n dump_term_weights=False):\n coll_len = freqstats(None)[0]\n lambda_c, lambda_d = weights\n\n query_tf = Counter(query)\n query_len = len(query)\n query_ctf = {stem: freqstats(stem)[0] for stem in query}\n\n prob_w = defaultdict(float)\n for doc in external_docs:\n doc_tf = Counter(doc)\n doc_len = len(doc)\n\n score = 0\n for stem, qtf in query_tf.items():\n stem_score = (lambda_c * query_ctf[stem] / float(coll_len) +\n lambda_d * doc_tf[stem] / float(doc_len))\n if stem_score == 0:\n score = float('-inf')\n break\n score += qtf * math.log(stem_score)\n if score == float('-inf'):\n continue\n\n for stem, dtf in doc_tf.items():\n prob_w[stem] += float(dtf) / doc_len * math.exp(score)\n\n total_mass = sum(prob_w.values())\n for k, v in prob_w.items():\n prob_w[k] = float(v) / total_mass\n\n prob_q = {stem: float(qtf) / query_len for stem, qtf in query_tf.items()}\n for stem, p in sorted(prob_w.items(), key=lambda x: x[1], reverse=True)[:fb_terms]:\n prob_q[stem] = fb_original_weight * prob_q.get(stem, 0) + (1 - fb_original_weight) * p\n\n query_ctf = {stem: freqstats(stem)[0] for stem in prob_q}\n\n scores = []\n for doc in docs:\n doc_tf = Counter(doc)\n doc_len = len(doc)\n\n score = 0\n for stem, w in prob_q.items():\n stem_score = lambda_c * query_ctf.get(stem, 1) / float(coll_len)\n stem_score += lambda_d * doc_tf.get(stem, 0) / float(doc_len) if doc_len > 0 else 0\n if stem_score == 0:\n score = float('-inf')\n break\n score += w * math.log(stem_score)\n if score == float('-inf'):\n score = -1000000.0\n scores.append(score)\n\n if dump_term_weights:\n return scores, prob_q\n else:\n return scores\n\n # msp_score = float('-inf')\n # for offset in xrange(0, max(0, doc_len - passage_size) + 1, increment):\n # passage = doc[offset:offset+passage_size]\n # passage_tf = Counter(passage)\n # passage_len = len(passage)\n\n # score = 0\n # for stem, qtf in query_tf.items():\n # term_score = (lambda_c * query_ctf[stem] / float(coll_len) +\n # lambda_d * doc_tf[stem] / float(doc_len) +\n # lambda_p * passage_tf[stem] / float(passage_len))\n # if term_score == 0:\n # score = float('-inf')\n # break\n # score += qtf * math.log(term_score)\n # if score > msp_score:\n # msp_score = score\n # yield msp_score",
"def convert(lang_tokenizer, tensor):\n for t in tensor:\n if t != 0:\n print('%d -----> %s' % (t, lang_tokenizer.index_word[t]))",
"def topic(request, topic_id):\n\ttopic = Topic.objects.get(id=topic_id)\n\tvocabs = topic.vocab_set.all()\n\tcontext = {'topic': topic, 'vocabs':vocabs}\n\treturn render(request, 'Toeic/topic.html', context)",
"def inference(id2word=None, bigrams=None, lda_model=None, num_topics=30):\n \n if not id2word:\n id2word = corpora.Dictionary.load(UNIGRAM_FILE)\n \n if not bigrams:\n bigrams = Phrases.load(BIGRAM_FILE)\n \n if not lda_model:\n path = pathlib.Path(f\"{SAVING_DIR}/lda_topic_40\") # there are also other models\n path = path / \"lda.model\"\n lda_model = LdaModel.load(str(path))\n\n\n data = utils.read_text_file(\"test.txt\")\n list_of_tokens, _ = preprocess([data], bigrams)\n text2bow = [id2word.doc2bow(text) for text in list_of_tokens]\n\n utils.plot_document_dist(lda_model, text2bow, num_topics)",
"def dtm_vis(self, time, corpus):\n doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]\n\n def normalize(x):\n return x / x.sum()\n\n topic_term = [\n normalize(np.exp(chain.e_log_prob.T[time]))\n for k, chain in enumerate(self.topic_chains)\n ]\n\n doc_lengths = []\n term_frequency = np.zeros(self.vocab_len)\n for doc_no, doc in enumerate(corpus):\n doc_lengths.append(len(doc))\n\n for term, freq in doc:\n term_frequency[term] += freq\n\n vocab = [self.id2word[i] for i in range(len(self.id2word))]\n\n return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab",
"def show_doc_attention(self, x):\n att_layer = self.model.get_layer('doc_attention')\n prev_tensor = att_layer.input\n\n dummy_layer = Lambda(\n lambda x: att_layer._get_attention_weights(x)\n )(prev_tensor)\n\n return Model(self.model.input, dummy_layer).predict(x)",
"def show(self,\r\n index,\r\n shortform=False,\r\n length=None,\r\n yestags=True,\r\n highlight=None,\r\n show_date=True,\r\n most_recent=False,\r\n curtail=0,\r\n deepest=None):\r\n\r\n\r\n if not self.notebook_contains(index):\r\n display.noteprint((alerts.ATTENTION,'INDEX NOT FOUND'))\r\n return [set(),EMPTYCHAR]\r\n if not deepest:\r\n deepest = self.deepest(is_string=True,abridged=True)\r\n deepest += 3\r\n if not length:\r\n length = self.defaults.get('texttrim')\r\n d_index = str(index)\r\n if len(d_index) > 10:\r\n d_index = index_reduce(d_index) # to display long indexes in compact form\r\n if highlight is None:\r\n highlight = set()\r\n l_temp = []\r\n if show_date:\r\n date_insert = VERTLINE + \\\r\n self.get_note(index).date(short=True,\r\n most_recent=most_recent,\r\n convert=False)\\\r\n + BLANK\r\n else:\r\n date_insert = EMPTYCHAR\r\n\r\n\r\n if str(index) not in self.indexes():\r\n return [EMPTYCHAR, EMPTYCHAR]\r\n\r\n keyset_temp = self.get_keys_from_note(index) #fetches keyset\r\n\r\n keyset_temp = self.keypurger.purge(keyset_temp,projects=set(self.default_dict['projects']\r\n .get_all_projects()))\r\n seq_keys = set()\r\n if self.defaults.get('sequences_in_text') and not shortform:\r\n oldkeys = set(keyset_temp)\r\n seq_keys = set()\r\n keyset_temp = set()\r\n seq_keys = {x_temp for x_temp in oldkeys if ATSIGN in x_temp}\r\n keyset_temp = oldkeys - seq_keys\r\n\r\n kl = self.abridged_str_from_list(remove_tags(\r\n self.return_least_keys(transpose_keys(keyset_temp,\r\n notebook=notebook),\r\n override=not self.defaults.get('orderkeys'),\r\n add_number=True,no_allcaps=False), override=yestags),\r\n override=not shortform)\r\n seq_text = EMPTYCHAR\r\n\r\n if seq_keys:\r\n proj_seq = []\r\n main_seq = []\r\n other_seq = []\r\n\r\n for kx_temp in seq_keys:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n if ident_temp in self.default_dict['projects'].get_all_projects():\r\n proj_seq.append(kx_temp)\r\n elif ident_temp in self.default_dict['main_sequences']:\r\n main_seq.append(kx_temp)\r\n else:\r\n other_seq.append(kx_temp)\r\n proj_seq.sort()\r\n main_seq.sort()\r\n other_seq.sort()\r\n\r\n if proj_seq:\r\n seq_text = 'PROJECTS: ' + ', '.join(proj_seq) \\\r\n + self.defaults.get('seqform1')\r\n if main_seq:\r\n for kx_temp in main_seq:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n seq_text += ident_temp + ':' + value_temp \\\r\n + self.defaults.get('seqform1')\r\n if other_seq:\r\n seq_text += EOL\r\n for kx_temp in other_seq:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n seq_text += ident_temp + ':' + value_temp \\\r\n + self.defaults.get('seqform1')\r\n if seq_text:\r\n seq_text += EOL + self.defaults.get('seqform2')\r\n\r\n seq_text = seq_text.replace(BLANK+EOL,EOL)\r\n\r\n if COMMA + EOL in seq_text or COLON +EOL \\\r\n in seq_text or SEMICOLON + EOL in seq_text:\r\n seq_text = seq_text\\\r\n .replace(COMMA+EOL,EOL)\\\r\n .replace(COLON+EOL,EOL)\\\r\n .replace(SEMICOLON+EOL,EOL)\r\n\r\n\r\n\r\n for char in string.whitespace[1:]:\r\n kl = kl.replace(char, EMPTYCHAR)\r\n\r\n kl = kl.replace(UNDERLINE, BLANK)\r\n\r\n\r\n if not shortform:\r\n\r\n tex_temp = self.get_text_from_note(index).replace(TAB,BLANK*4).replace('/T',BLANK*4)\r\n\r\n for rep_temp in range(0,tex_temp.count('}}')):\r\n if '{{' in tex_temp and '}}' in tex_temp:\r\n n_temp = tex_temp.split('{{')[1].split('}}')[0]\r\n\r\n\r\n if n_temp and n_temp[0] in [ATSIGN, STAR]:\r\n pass\r\n if self.show_text:\r\n folder_temp = {ATSIGN:'/textfiles',\r\n STAR:'/attachments'}[n_temp[0]]\r\n n_temp = n_temp[1:]\r\n try:\r\n textfile = file_access.get_text_file(n_temp,\r\n folder=folder_temp)\r\n tex_temp = tex_temp.replace('{{'+ATSIGN+n_temp+'}}',\r\n textfile)\r\n except:\r\n display.noteprint((alerts.ATTENTION,\r\n labels.FILE_ERROR))\r\n elif n_temp and n_temp[0] in ['^']:\r\n if self.show_images:\r\n folder_temp = '/pictures'\r\n directoryname = os.getcwd()+folder_temp\r\n picture = Image.open(directoryname\r\n +'/'+n_temp[1:]\r\n +'.jpg')\r\n picture.show()\r\n\r\n\r\n\r\n suffix = EMPTYCHAR\r\n if self.no_flash: #To disable flash card mode\r\n tex_temp = tex_temp.replace('/FC/','\\n /BREAK/ \\n')\r\n if '/FC/' in tex_temp: #For a flash card\r\n sides_temp = tex_temp.split('/FC/')\r\n if self.flexflip:\r\n self.sides = len(sides_temp)\r\n if self.last_sides != self.sides:\r\n self.side=0\r\n self.last_sides = self.sides\r\n tex_temp = sides_temp[self.side%len(sides_temp)]\r\n suffix = '[' + str(self.side%len(sides_temp)+1) + ']'\r\n\r\n\r\n\r\n\r\n if curtail != 0 and len(tex_temp) > curtail:\r\n tex_temp = tex_temp[0:curtail]\r\n # Adds the first and second element on the list\r\n l_temp.append(d_index+self.mark(index)+suffix\r\n +BLANK+VERTLINE+BLANK\r\n +self.field(index)\r\n +date_insert\r\n +BLANK+VERTLINE+BLANK+kl\r\n +BLANK+VERTLINE)\r\n l_temp.append(seq_text + nformat.encase(tex_temp,\r\n highlight))\r\n\r\n if len(l_temp) > 1:\r\n if self.defaults.get('curtail'):\r\n l_temp[1] = l_temp[1].strip(EOL)\r\n l_temp[1] = EOL * self.defaults.get('header') \\\r\n + l_temp[1] + EOL \\\r\n * self.defaults.get('footer')\r\n\r\n else:\r\n\r\n t_temp = self.get_text_from_note(index)\r\n t_temp = t_temp[0 : min([len(t_temp), length])]\r\n t_temp = nformat\\\r\n .purgeformatting(t_temp)\\\r\n .replace(EOL,EMPTYCHAR)\\\r\n .replace(TAB,EMPTYCHAR)\\\r\n .replace(VERTLINE,EMPTYCHAR)\\\r\n .replace(UNDERLINE,EMPTYCHAR)\r\n\r\n t_temp = nformat.encase(t_temp,highlight)\r\n\r\n\r\n\r\n l_temp.append(d_index+self.mark(index)\r\n +max([deepest-(len(d_index+self.mark(index))),0])\r\n *BLANK+BLANK+VERTLINE+BLANK\r\n +self.field(index)\r\n +max([self.field_length()\r\n -(len(self.field(index))), 0])*BLANK+BLANK\r\n +date_insert\r\n +BLANK\r\n +VERTLINE+BLANK+kl\r\n +(self.defaults.get('keytrim')-len(kl))*BLANK\\\r\n +BLANK+VERTLINE\r\n +BLANK+t_temp)\r\n\r\n return l_temp"
] | [
"0.57476157",
"0.56244314",
"0.55522525",
"0.55447453",
"0.5503854",
"0.54328257",
"0.540742",
"0.53439325",
"0.52937776",
"0.52260923",
"0.5206664",
"0.5205154",
"0.51819384",
"0.5175856",
"0.51390594",
"0.5078848",
"0.50570416",
"0.5042481",
"0.50256",
"0.50121063",
"0.50083905",
"0.49535087",
"0.4909564",
"0.4908357",
"0.49028558",
"0.48952967",
"0.4887125",
"0.48819768",
"0.4880564",
"0.48684397"
] | 0.5973902 | 0 |
Get the n most common rolename from csvfile (I used it) for testing purposes mostly. | def get_mostcommon(path, n, i=3):
allroles = []
with open(path, 'rt', encoding='mac_roman') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='"')
for row in csvreader:
try:
role = clean(row[i])
allroles.append(''.join(role))
except IndexError:
pass
mostc = collections.Counter(allroles)
roles = mostc.most_common(n)
mostcroles = [x[0] for x in roles]
return mostcroles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def best_team(file):\n with open(file, \"r\") as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n most_wins = 0\n best_team = \"\"\n for row in reader:\n if int(row[3]) > most_wins:\n most_wins = int(row[3])\n best_team = row[0]\n return best_team",
"def calculting_name():\n\n list_of_files = glob.glob('./muestras/*') # * means all if need specific format then *.csv\n latest_file = max(list_of_files, key=os.path.getctime)\n _, name_file = os.path.split(latest_file)\n name, _ = os.path.splitext(name_file)\n name_number = str(name)\n\n return name_number",
"def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()",
"def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)",
"def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)",
"def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))",
"def longest_word(file_name):\n longest = 0\n linenum = 0\n finalnum = 0\n result = ''\n with open(file_name) as file:\n lines = file.readlines()\n for line in lines:\n linenum += 1\n words = line.split()\n for word in words:\n if len(word) > longest:\n longest = len(word)\n result = word\n finalnum = linenum\n if longest == 0:\n return None\n return str(finalnum) + ': ' + result",
"def get_tops(name_file,number,gender):\r\n tops = []\r\n num = 0\r\n f = open(name_file)\r\n reader = csv.reader(f)\r\n for row in reader:\r\n if row.__contains__(gender.upper()):\r\n row[2] = int(row[2])\r\n tops.append(row)\r\n num = num + 1\r\n if num == number:\r\n break\r\n return tops",
"def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))",
"def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname",
"def find_top_salespeople(name_list, sales_list, n):\n top_names = []\n temp_name = [] + name_list\n temp_sales = [] + sales_list\n\n while len(top_names) < n and len(temp_sales) != 0: \n max_sales = temp_sales.index(max(temp_sales))\n num = temp_sales[max_sales]\n top_names.append(temp_name[max_sales])\n temp_name.remove(temp_name[max_sales])\n temp_sales.remove(temp_sales[max_sales])\n\n if num in temp_sales:\n max_sales2 = temp_sales.index(max(temp_sales))\n top_names.append(temp_name[max_sales2])\n temp_name.remove(temp_name[max_sales2])\n temp_sales.remove(temp_sales[max_sales2])\n\n return top_names",
"def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)",
"def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)",
"def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)",
"def get_movie_most_nominations(movies: list) -> str:\n pass",
"def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]",
"def pname(name):\n ranks = list(reversed(name.split(';')))\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n if rank == '__':\n continue\n if rank.split('__')[1] is '':\n return ranks[i+1] + ';' + rank\n return rank",
"def _name_champion(self):\n # TODO BREAK TIES\n return max(self.teams, key=lambda team: len(team.wins))",
"def most_similar(user):\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n sim_dict = dict()\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n if user == a:\n sim_dict[b] = float(sim)\n\n if len(sim_dict.keys()) == 0:\n return None\n\n ranked = sorted(sim_dict.keys(), key=lambda x: sim_dict[x],\n reverse=True)\n\n return ranked[1:13]",
"def filter_top_cv(self, k=10, csv_file=\"enhanced.csv\"):\n # get label\n all_cv = []\n label_file = pd.read_csv(csv_file)\n labels = label_file['label']\n for label in labels:\n all_cv += list(label.split(\";\"))\n\n # count label num to get top-frequent label\n count = Counter(all_cv)\n top_cv = [x[0] for x in count.most_common(k)]\n\n # make dict mapping dir to label\n d = {}\n genes = label_file['Gene']\n labels = label_file['label']\n length = len(genes)\n for i in range(length):\n d[genes[i]] = list(labels[i].split(\";\"))\n\n filter_d = {}\n all_sids = sorted(d.keys())\n for sid in all_sids:\n for label in d[sid]:\n if label not in top_cv:\n continue\n if sid not in filter_d:\n filter_d[sid] = []\n filter_d[sid].append(label)\n\n if len(top_cv) < k:\n print(\"Error: top cv less than k\", count)\n return filter_d, top_cv",
"def find_most_common(df):\n return list(df['hotel_cluster'].value_counts().head().index)",
"def get_top_import_repo(self,n=None, _filter=None):\n\t\tentries = self.get_all(_filter)\n\t\tric = RepoImportCounter(self.package)\n\t\ti = 0\n\t\tfor entry in entries:\n\t\t\tif (i%1000 == 0):\n\t\t\t\tprint(i)\n\t\t\tric.parse(entry)\n\t\t\ti += 1\n\t\treturn ric.get_most_common(n)",
"def test_top_n_dognames(self):\n dognames = student_submission.read_csv('./dognames.csv')\n\n top_1 = student_submission.top_n_dognames(dognames, 1)\n self.assertTrue(isinstance(top_1, list), 'The return type of top_n_dognames seems wrong')\n\n self.assertEqual(top_1[0][0], 'Luna', 'The top used dog name seems wrong')\n\n top_100 = student_submission.top_n_dognames(dognames, 100)\n self.assertEqual(len(top_100), 100, 'The length of the list seems wrong')\n\n is_sorted = all(top_100[i][1] >= top_100[i+1][1] for i in range(len(top_100) - 1))\n self.assertTrue(is_sorted, 'Your list does not seem to be sorted correctly')",
"def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)",
"def best(score, names):\n top_score_name = \"\"\n for name in names:\n if score(top_score_name) < score(name):\n top_score_name = name\n\n return top_score_name + \" has the longest name.\"",
"def get_country_ranking(all_contrystats_data, country):\n\n index = next((i for i, item in enumerate(all_contrystats_data) if item['country'] == country), -1)\n return index + 1 # we shift from 0",
"def most_common_passwords(creds: list, num: int):\n\treturn collections.Counter(creds).most_common(num)",
"def find_greatest_rotator(word_dict,n):\n rotate_best = None\n rotations = 0\n for word in word_dict:\n if rotate_pairs(word,word_dict,n) > rotations:\n rotate_best = word\n rotations = rotate_pairs(word,word_dict,n)\n return rotate_best, rotations"
] | [
"0.5811817",
"0.5779755",
"0.57320595",
"0.54729784",
"0.54105455",
"0.5389896",
"0.5379834",
"0.52935493",
"0.51974595",
"0.51733625",
"0.5172672",
"0.5158862",
"0.5145489",
"0.5135635",
"0.51277316",
"0.51220286",
"0.51147276",
"0.50854826",
"0.5080411",
"0.5066969",
"0.50609",
"0.4971194",
"0.49711874",
"0.49412695",
"0.4913245",
"0.49054456",
"0.48898917",
"0.48747316",
"0.48735875",
"0.48435166"
] | 0.7325346 | 0 |
generator function to get randomly ordered samples of financial data | def get_examples():
symbols = get_symbols()
symbol_start_indices, total_examples = _index_symbols(symbols)
selection_order = np.arange(total_examples)
np.random.shuffle(selection_order)
for sample_index in selection_order:
# use a binary search to determine which symbol to use given the sample_index
start_index_index = bisect.bisect_left(symbol_start_indices, sample_index + 1) - 1
start_index = symbol_start_indices[start_index_index]
offset = sample_index - start_index
symbol = symbols[start_index_index]
time_series_data = get_time_series_data(symbol)
next_day = time_series_data[offset]
previous_days = time_series_data[offset + 1: offset + 1 + EXAMPLE_SIZE]
previous_days.reverse()
metadata = get_metadata_for_symbol(symbol)
yield previous_days, next_day, metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_feeder_2():\n return random.sample(range(100), 10)",
"def data_source():\n dataset = [0.1, 0.2, 0.3, 0.4, 0.5]\n while True:\n time.sleep(2)\n yield random.choice(dataset)",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def test_random_generator(self):\n gen = random_data()\n data = [next(gen) for _ in range(100)]\n self.assertEqual(len(data), 100)",
"def gen_data(self, amount):\n\n return random.choices(self.indices, weights=self.weights, k=amount)",
"def get_next_sample(self):",
"def generate_samples(self):\n self.analytic_probability()",
"def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()",
"def in_random_order(theta):\r\n indexes = [i for i,_ in enumerate(data)] # creates a list of indices\r\n random.shuffle(indexes) # shuffles them\r\n for i in indexes:\r\n yield data[i] # return data in that order\r",
"def random_values():\n while True:\n yield random()",
"def sample(data, k):\n\n # create random number generator\n r = Random()\n r.seed()\n\n # load all n items into dictionary\n n = len(data)\n data_dict = {i: data[i] for i in range(n)}\n samples = []\n\n for i in range(k):\n # select random item\n rand_i = r.randrange(0, n - 1) if n > 1 else 0 # randrange fails if start==stop\n samples.append(data_dict[rand_i])\n\n # replace selected item with last item and decrement number of items\n # to prevent duplicates\n data_dict[rand_i] = data_dict[n - 1]\n n -= 1\n\n return samples",
"def sample(n, seed= 0):\n data = list(range(n))\n while True:\n np.random.seed(seed)\n np.random.shuffle(data)\n yield from data",
"def data(i):\n m = i*100\n return [random.randint(0, m) for j in range(i)]",
"def some_simple_data(length=1000000):\n data = list(range(length))\n random.shuffle(data)\n return data",
"def simple_generator_function(data):\n if data:\n random.shuffle(data)\n\n for data_row in data:\n print \"\\ngenerating data:\"\n guess_data = [data_row[0], data_row[1], data_row[2], data_row[3]]\n iris = copy.deepcopy(DATA_SKELETON)\n iris['data_vector'] = guess_data\n iris['class'] = data_row[4]\n iris['_id'] = str(uuid.uuid4())\n # iris[0]['created'] = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n yield iris",
"def ticker_generator():\n return (v for v in load_equities().values)",
"def population_gen(population):\n pop_sort = [item for item in population]\n random.shuffle(pop_sort)\n\n for item in pop_sort:\n yield item",
"def in_random_order(data):\n indexes = [i for i, _ in enumerate(data)]\n random.shuffle(indexes)\n\n for i in indexes:\n yield data[i]",
"def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test",
"def in_random_order(data):\n indexes = [i for i, _ in enumerate(data)] # create a list of indexes\n random.shuffle(indexes) # shuffle them\n for i in indexes:\n yield data[i]",
"def generate_synth_data(n):",
"def in_random_order(data):\n idx = [i for i, _ in enumerate(data)]\n random.shuffle(idx)\n for i in idx:\n yield data[i]",
"def generate_numbers():\n\n return random.sample(range(100), 10)",
"def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]",
"def bootstrap_sample(data):\n return [random.choice(data) for _ in data]",
"def generate_test_set(data, pts): \n test_set = np.asarray(random.sample(data, pts))\n \n return test_set",
"def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling",
"def _random_epoch_gen(data):\n while True:\n for i in np.random.permutation(len(data)):\n yield data[i]",
"def random(self=None, sample=100, min=0, max=100):\r\n\t\treturn DataStatistics([randint(min, max) for i in range(sample)])"
] | [
"0.7042209",
"0.6748927",
"0.65114784",
"0.65114784",
"0.6460016",
"0.64581263",
"0.63565946",
"0.62900376",
"0.6261254",
"0.6239399",
"0.62391174",
"0.62097275",
"0.6187463",
"0.6132192",
"0.6110315",
"0.6107518",
"0.6062251",
"0.6058275",
"0.6057489",
"0.6057019",
"0.60528314",
"0.6032036",
"0.6025897",
"0.598224",
"0.597506",
"0.5963318",
"0.59555584",
"0.59533435",
"0.5938644",
"0.59332126"
] | 0.67601985 | 1 |
analyse the symbols to determine where examples can be extracted | def _index_symbols(symbols):
symbol_start_indices = []
next_start_index = 0
for symbol in symbols:
entry_count = count_entries(symbol)
if entry_count > EXAMPLE_SIZE:
symbol_start_indices.append(next_start_index)
next_start_index += entry_count - EXAMPLE_SIZE
total_examples = next_start_index
return symbol_start_indices, total_examples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def symbols_details(self):\n pass",
"def symbols(self):\n pass",
"def explain(symbol):\n if isinstance(symbol, Symbolic):\n print(symbol.source)\n else: \n print(symbol)",
"def test_ex_2_1(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n #s = wam.get_term_repr(wam.deref_reg(0))\n s = wam.get_term_repr(7)\n self.assertEqual(s, 'p(_G2, h(_G2, _G3), f(_G3))')",
"def test_is_an_element_symbol():\n for el in roentgen.elements['symbol']:\n assert(is_an_element(el))",
"def test_all_extra_tokens(self):\n self.helper_test_evaluate_raises(\n '1 or 0',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=1)",
"def test_extract():\n print(\"Executing test_extract:\")\n\n theory_1=[\n (14,),\n (15,),\n (14,),\n (16,)\n ]\n theory_2=[\n (14,),\n (15,),\n (14,),\n (17,)\n ]\n theory_3=[\n (15,),\n (14,)\n ]\n\n mind=minds.new_mind(theories=[theory_1,theory_2,theory_3])\n\n print(\"Mind initial state:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))\n\n minds.extract_new_routines(mind,1)\n print(\"Mind after 1 step of extraction:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))\n\n minds.extract_new_routines(mind,1)\n print(\"Mind after 2 steps of extraction:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))",
"def _symbols_of_input(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a state transition arc for each digit of a multi-digit number.\n if \"[\" not in label:\n return list(label)\n\n # We add a state transition arc for each inflectional or derivational\n # morpheme, inflectional group boundary, and proper noun analysis tag.\n return _SYMBOLS_REGEX.findall(label)",
"def _find_processing_instructions(self):\n pass",
"def find_symbols(self, **kw):\n return list(self.ifind_symbols(**kw))",
"def compilation_test(interp, source):\r\n print '*** Compiling symbols from file: %s ***' % util.within_VCode(source)\r\n interp.cleanup_dictionary()\r\n interp.parse_symbols_from_file(source)\r\n print '\\n\\nParsed symbols are: '\r\n interp.print_symbols()\r\n print 'Unresolved abbreviations are:'\r\n unresolved = interp.peek_at_unresolved()\r\n sorted_unresolved = unresolved.keys()\r\n sorted_unresolved.sort()\r\n for an_abbreviation in sorted_unresolved:\r\n symbol_list = unresolved[an_abbreviation].keys()\r\n symbol_list.sort()\r\n print '\\'%s\\': appears in %s' % (an_abbreviation, str(symbol_list))\r\n \r\n print '\\n*** End of compilation test ***\\n'",
"def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)",
"def substantiate():",
"def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)",
"def ifind_symbols(self, name=\"any\", **kw):\n for sym in self.itersymbols():\n if (name==\"any\" or name==sym.sym.name) and \\\n sym.sym.k==kw:\n yield sym.sym",
"def analyze_examples(pos_examples: List[str], neg_examples: List[str]) -> Tuple[List[str], List[str], List[str], Tuple[int, int]]:\n unique_alphabets = []\n unique_nums = []\n unique_symbols = []\n\n for string in pos_examples + neg_examples:\n for c in string:\n if c.isalpha():\n unique_alphabets.append(c)\n elif c.isdigit():\n unique_nums.append(c)\n elif c.isascii():\n unique_symbols.append(c)\n else:\n pass\n\n repeat_min = min(len(s) for s in pos_examples + neg_examples)\n repeat_max = max(len(s) for s in pos_examples + neg_examples)\n\n return list(set(unique_alphabets)), list(set(unique_nums)), list(set(unique_symbols)), (repeat_min, repeat_max)",
"def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)",
"def filter_symbols(nm_output):\n lines = nm_output.split('\\n')\n subset = [e for e in lines if (' T ' in e or ' D ' in e)]\n\n for s in subset:\n _, _, symbol = s.split()\n yield symbol",
"def test_validate_self_input_symbol_subset(self):\n with nose.assert_raises(exceptions.MissingSymbolError):\n self.dtm1.input_symbols.add('2')\n self.dtm1.validate_self()",
"def get_statistic(source_text):\n symbols = {s: 0 for s in bytes(range(0, 256))}\n for symbol in source_text:\n symbols[symbol] += 1\n return symbols",
"def clean_symbols(self):\n self.add_labels()\n variable_counter = 16\n for i in range(len(self.commands)):\n command = self.commands[i]\n if command.startswith('@'): # symbols always reside in A instructions\n value = command.split('@')[1]\n if not value.isdigit(): # is a symbol\n if value not in self.symbol_table: # is a variable\n self.symbol_table[value] = str(variable_counter)\n variable_counter += 1\n numeric_value = self.symbol_table.get(value)\n command = '@' + numeric_value\n self.commands[i] = command",
"def test_no_symbols(self):\n tweet = self.load_tweet('symbols')\n # Save a copy:\n symbols = tweet['entities']['symbols']\n del tweet['entities']['symbols']\n tweet_text = self.api.html_for_tweet(tweet)\n self.assertTrue('symbols: $AAPL and' in tweet_text)\n self.assertTrue('and $ANOTHER and $A.' in tweet_text)",
"def test_functionallity(self):\n\n pp = Lexpp(external_dict=pkg_resources.resource_filename(\"lexpp\", \"tests/test.dict\"))\n\n test_word = \"キャプテン\"\n entries = list(pp.lookup(test_word))\n\n self.assertEqual(len(entries), 4)\n\n for e in entries:\n self.assertEqual(type(e), Entry)\n rep = pp.get_representative_form(e)\n self.assertEqual(rep, test_word)",
"def _missing_symbol_to_skipped_tests(self):\n return {\n \"MathMLElement\": [\"mathml\"],\n \"GraphicsLayer\": [\"compositing\"],\n \"WebCoreHas3DRendering\": [\"animations/3d\", \"transforms/3d\"],\n \"WebGLShader\": [\"fast/canvas/webgl\", \"compositing/webgl\", \"http/tests/canvas/webgl\"],\n \"MHTMLArchive\": [\"mhtml\"],\n }",
"def __init__(self, symbols):\r\n self.symbols = set(symbols)",
"def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')",
"def test_symbol_repr(self):\n a = pybamm.Symbol(\"a\")\n b = pybamm.Symbol(\"b\")\n c = pybamm.Symbol(\"c\", domain=[\"test\"])\n d = pybamm.Symbol(\"d\", domain=[\"test\"])\n hex_regex = r\"\\-?0x[0-9,a-f]+\"\n self.assertRegex(\n a.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", a, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n b.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", b, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n c.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", c, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n d.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", d, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n (a + b).__repr__(),\n r\"Addition\\(\" + hex_regex + r\", \\+, children\\=\\['a', 'b'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n (c * d).__repr__(),\n r\"Multiplication\\(\"\n + hex_regex\n + r\", \\*, children\\=\\['c', 'd'\\], domain=\\['test'\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(a).__repr__(),\n r\"Gradient\\(\" + hex_regex + \", grad, children\\=\\['a'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(c).__repr__(),\n r\"Gradient\\(\"\n + hex_regex\n + \", grad, children\\=\\['c'\\], domain=\\['test'\\]\\)\",\n )",
"def test_missing_all_tokens(self):\n self.helper_test_evaluate_raises(\n '(A nand B) and not D',\n expected_exc_type=MissingSymbolError)",
"def process_special_sign(self):\r\n # 首先把全部是英文的句子找出来,没有特殊符号,没有其他东西,只有字母和数字。\r\n # 思路大概是用正则表达式确定结尾,用函数判断中间全部都是英文的句子,不允许特殊符号。\r\n # 用上面的check_sents函数,解决这个问题。\r\n all_sents = list()\r\n for i in self.set_of_result[0][\"question_text\"]:\r\n if DataCleanCheckTool.check_sents(i):\r\n all_sents.append(i)\r\n\r\n # 有些特殊情况的数据,直接抛弃掉,数量不大\r\n # 然后有一些描述词性特殊的单词的其实没有意义,直接抛掉\r\n # 还有一些带括号的,那些需要把括号中的内容抛掉\r\n # 但是因为用的是pop,每次pop之后index都变化,所以会跳着pop,因此在数据量大的情况下需要重复执行\r\n for k, v in enumerate(all_sents):\r\n if \". . .\" in v:\r\n all_sents.pop(k)\r\n elif \"...\" in v:\r\n all_sents.pop(k)\r\n elif \"adj.\" in v:\r\n all_sents.pop(k)\r\n elif \"adv.\" in v:\r\n all_sents.pop(k)\r\n elif \"n.\" in v:\r\n all_sents.pop(k)\r\n elif \"v.\" in v:\r\n all_sents.pop(k)\r\n elif \"prep.\" in v:\r\n all_sents.pop(k)\r\n elif \"sth.\" in v:\r\n all_sents.pop(k)\r\n elif \"sb.\" in v:\r\n all_sents.pop(k)\r\n\r\n # 小写开头的都可以全部抛弃掉了,不是完整的真正的句子,只是一段不完整的话。\r\n pattern = re.compile(\"^[a-z].+\")\r\n for k, v in enumerate(all_sents):\r\n try:\r\n pattern.search(v).group()\r\n all_sents.pop(k)\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n return all_sents",
"def get_symbol(self):\n return []"
] | [
"0.61620724",
"0.5961964",
"0.58629996",
"0.5696681",
"0.55566597",
"0.55555195",
"0.5509136",
"0.5487217",
"0.54459673",
"0.5443426",
"0.53950363",
"0.5371426",
"0.53046227",
"0.52617",
"0.5248638",
"0.5231612",
"0.5227878",
"0.5224617",
"0.5190185",
"0.5189669",
"0.5187694",
"0.5155561",
"0.5154444",
"0.5152942",
"0.51508546",
"0.5148626",
"0.511322",
"0.5090196",
"0.5087789",
"0.5081851"
] | 0.61644316 | 0 |
iterates over padded tiles of an ND image while keeping track of the slice positions | def tile_iterator(im,
blocksize = (64, 64),
padsize = (64,64),
mode = "constant",
verbose = False):
if not(im.ndim == len(blocksize) ==len(padsize)):
raise ValueError("im.ndim (%s) != len(blocksize) (%s) != len(padsize) (%s)"
%(im.ndim , len(blocksize) , len(padsize)))
subgrids = tuple([int(np.ceil(1.*n/b)) for n,b in zip(im.shape, blocksize)])
#if the image dimension are not divible by the blocksize, pad it accordingly
pad_mismatch = tuple([(s*b-n) for n,s, b in zip(im.shape,subgrids,blocksize)])
if verbose:
print("tile padding... ")
im_pad = np.pad(im,[(p,p+pm) for pm,p in zip(pad_mismatch,padsize)], mode = mode)
# iterates over cartesian product of subgrids
for i,index in enumerate(product(*[range(sg) for sg in subgrids])):
# the slices
# if verbose:
# print("tile %s/%s"%(i+1,np.prod(subgrids)))
# dest[s_output] is where we will write to
s_input = tuple([slice(i*b,(i+1)*b) for i,b in zip(index, blocksize)])
s_output = tuple([slice(p,-p-pm*(i==s-1)) for pm,p,i,s in zip(pad_mismatch,padsize, index, subgrids)])
s_output = tuple([slice(p,b+p-pm*(i==s-1)) for b,pm,p,i,s in zip(blocksize,pad_mismatch,padsize, index, subgrids)])
s_padinput = tuple([slice(i*b,(i+1)*b+2*p) for i,b,p in zip(index, blocksize, padsize)])
padded_block = im_pad[s_padinput]
yield padded_block, s_input, s_output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def roll(self, image):\n\n\t\toutput = np.zeros(image.shape)\n\n\t\theight, width = image.shape\n\n\t\tfor y in range(height):\n\n\t\t\t# Getting available neighbour indexes in y direction.\n\t\t\tdelta_y_0 = abs(min(y - self.padding, 0))\n\t\t\tdelta_y_1 = min( height - 1 - y, self.padding) + self.padding + 1\n\n\t\t\tfor x in range(width):\n\n\t\t\t\t# Getting available neighbour indexes in x direction.\n\t\t\t\tdelta_x_0 = abs( min(x - self.padding, 0)) \n\t\t\t\tdelta_x_1 = min( width - 1 - x, self.padding) + self.padding + 1\n\n\t\t\t\t# Taking a grid of pixels from the image.\n\t\t\t\tgrid = image[ \n\t\t\t\t\ty - (self.padding - delta_y_0) : y + (delta_y_1 - self.padding),\n\t\t\t\t\tx - (self.padding - delta_x_0) : x + (delta_x_1 - self.padding)\n\t\t\t\t]\n\n\t\t\t\tpixel = self.apply(grid, slice(delta_x_0, delta_x_1 ), slice(delta_y_0,delta_y_1))\n\t\t\t\t\n\t\t\t\toutput[y, x] = pixel\n\n\t\treturn output",
"def image_slices(image, axis=0):\n LOOKING_FOR_START, LOOKING_FOR_END = range(2)\n h = image.shape[axis]\n state = LOOKING_FOR_START\n for i in range(h):\n line = image[i,:] if axis == 0 else image[:,i]\n if state == LOOKING_FOR_START:\n if any(line != 255):\n istart = i\n state = LOOKING_FOR_END\n else:\n if all(line == 255):\n state = LOOKING_FOR_START\n if axis == 0:\n yield image[istart:i,:]\n else:\n yield image[:,istart:i]",
"def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices",
"def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def iter_patch(\n arr: NdarrayOrTensor,\n patch_size: Sequence[int] | int = 0,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n copy_back: bool = True,\n mode: str | None = NumpyPadMode.WRAP,\n **pad_opts: dict,\n) -> Generator[tuple[NdarrayOrTensor, np.ndarray], None, None]:\n\n from monai.transforms.croppad.functional import pad_nd # needs to be here to avoid circular import\n\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # set padded flag to false if pad mode is None\n padded = bool(mode)\n is_v = [bool(p) for p in ensure_tuple_size(patch_size, arr.ndim)] # whether a valid patch size provided\n _pad_size = tuple(p if v and padded else 0 for p, v in zip(patch_size_, is_v)) # pad p if v else 0\n _overlap = [op if v else 0.0 for op, v in zip(ensure_tuple_rep(overlap, arr.ndim), is_v)] # overlap if v else 0.0\n # pad image by maximum values needed to ensure patches are taken from inside an image\n if padded:\n arrpad = pad_nd(arr, to_pad=[(p, p) for p in _pad_size], mode=mode, **pad_opts) # type: ignore\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, _pad_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, _pad_size))\n else:\n arrpad = arr\n start_pos_padded = start_pos\n iter_size = arr.shape\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded, _overlap, padded=padded):\n # compensate original image padding\n if padded:\n coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, _pad_size))\n else:\n coords_no_pad = tuple((coord.start, coord.stop) for coord in slices)\n yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader)\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(_pad_size, arr.shape))\n arr[...] = arrpad[slices] # type: ignore",
"def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles",
"def _slice(self, position, dimension):\n\n assert dimension[0]+ position[0] in range(-1, self.width+2)\n assert dimension[1]+ position[1] in range(-1, self.height+2)\n\n result = []\n for x in range(dimension[0]):\n temp = []\n for y in range(dimension[1]):\n try:\n temp.append(self.board[(position[0]+x,position[1]+y)])\n except KeyError:\n if position[0]+x in range(self.width) and \\\n position[1]+y in range(self.height):\n temp.append(0)\n else:\n temp.append('@')\n\n result.append(temp)\n return result",
"def slice_image(im, target_size = [1024, 1024], padcolor = [126, 148, 137]):\n ncells = [0, 0]\n input_size = im.shape[:2]\n if len(im.shape) < 3:\n im = np.expand_dims(im, 2) # add a dummy dim for more streamlined code\n nchannels = im.shape[2]\n for dim in range(2):\n ncells[dim] = input_size[dim]//target_size[dim] + 1 #one extra\n imcanvas = np.zeros([a*b for a,b in zip(ncells,target_size)] + [nchannels], dtype = im.dtype)\n for channel in range(nchannels):\n imcanvas[:, :, channel] = padcolor[channel]\n \n imcanvas[:input_size[0], :input_size[1], :] = im\n imlist = []\n for cell_row in range(ncells[0]):\n for cell_col in range(ncells[1]):\n if nchannels > 1:\n imlist.append(imcanvas[cell_row*target_size[0]:(cell_row+1)*target_size[0], cell_col*target_size[1] : (cell_col+1)*target_size[1], :])\n else:\n imlist.append(imcanvas[cell_row*target_size[0]:(cell_row+1)*target_size[0], cell_col*target_size[1] : (cell_col+1)*target_size[1], 0])\n return (imlist, ncells)",
"def iter_patch_slices(\n image_size: Sequence[int],\n patch_size: Sequence[int] | int,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n padded: bool = True,\n) -> Generator[tuple[slice, ...], None, None]:\n\n # ensure patch_size has the right length\n patch_size_ = get_valid_patch_size(image_size, patch_size)\n\n # create slices based on start position of each patch\n for position in iter_patch_position(\n image_size=image_size, patch_size=patch_size_, start_pos=start_pos, overlap=overlap, padded=padded\n ):\n yield tuple(slice(s, s + p) for s, p in zip(position, patch_size_))",
"def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore",
"def slice_im_plus_boxes(image_path, out_name, out_dir_images, \n boxes=[], yolo_classes=[], out_dir_labels=None, \n mask_path=None, out_dir_masks=None,\n sliceHeight=416, sliceWidth=416,\n overlap=0.1, slice_sep='|', pad=0,\n skip_highly_overlapped_tiles=False,\n overwrite=False,\n out_ext='.png', verbose=False):\n\n if len(out_ext) == 0:\n im_ext = '.' + image_path.split('.')[-1]\n else:\n im_ext = out_ext\n\n t0 = time.time()\n image = skimage.io.imread(image_path) #, as_grey=False).astype(np.uint8) # [::-1]\n print(\"image.shape:\", image.shape)\n if mask_path:\n mask = skimage.io.imread(mask_path)\n win_h, win_w = image.shape[:2]\n win_size = sliceHeight*sliceWidth\n dx = int((1. - overlap) * sliceWidth)\n dy = int((1. - overlap) * sliceHeight)\n \n n_ims = 0\n for y0 in range(0, image.shape[0], dy):\n for x0 in range(0, image.shape[1], dx):\n out_boxes_yolo = []\n out_classes_yolo = []\n n_ims += 1\n\n if (n_ims % 100) == 0:\n print(n_ims)\n\n # make sure we don't have a tiny image on the edge\n if y0+sliceHeight > image.shape[0]:\n # skip if too much overlap (> 0.6)\n if skip_highly_overlapped_tiles:\n if (y0+sliceHeight - image.shape[0]) > (0.6*sliceHeight):\n continue\n else:\n y = image.shape[0] - sliceHeight\n else:\n y = image.shape[0] - sliceHeight\n else:\n y = y0\n if x0+sliceWidth > image.shape[1]:\n # skip if too much overlap (> 0.6)\n if skip_highly_overlapped_tiles:\n if (x0+sliceWidth - image.shape[1]) > (0.6*sliceWidth):\n continue\n else:\n x = image.shape[1] - sliceWidth\n else:\n x = image.shape[1] - sliceWidth\n else:\n x = x0\n\n xmin, xmax, ymin, ymax = x, x+sliceWidth, y, y+sliceHeight\n\n # find boxes that lie entirely within the window\n if len(boxes) > 0:\n out_path_label = os.path.join(\n out_dir_labels,\n out_name + slice_sep + str(y) + '_' + str(x) + '_'\n + str(sliceHeight) + '_' + str(sliceWidth)\n + '_' + str(pad) + '_' + str(win_w) + '_' + str(win_h)\n + '.txt')\n for j,b in enumerate(boxes):\n yolo_class = yolo_classes[j]\n xb0, yb0, xb1, yb1 = b\n if (xb0 >= xmin) and (yb0 >= ymin) \\\n and (xb1 <= xmax) and (yb1 <= ymax):\n # get box coordinates within window\n out_box_tmp = [xb0 - xmin, xb1 - xmin,\n yb0 - ymin, yb1 - ymin]\n print(\" out_box_tmp:\", out_box_tmp)\n # out_boxes.append(out_box_tmp)\n # convert to yolo coords (x,y,w,h)\n yolo_coords = prep_train.convert((sliceWidth, sliceHeight),\n out_box_tmp)\n print(\" yolo_coords:\", yolo_coords)\n out_boxes_yolo.append(yolo_coords)\n out_classes_yolo.append(yolo_class)\n \n # skip if no labels?\n if len(out_boxes_yolo) == 0:\n continue\n\n # save yolo labels\n txt_outfile = open(out_path_label, \"w\") \n for yolo_class, yolo_coord in zip(out_classes_yolo, out_boxes_yolo): \n outstring = str(yolo_class) + \" \" + \" \".join([str(a) for a in yolo_coord]) + '\\n'\n if verbose: \n print(\" outstring:\", outstring.strip())\n txt_outfile.write(outstring)\n txt_outfile.close() \n\n # save mask, if desired\n if mask_path:\n mask_c = mask[y:y + sliceHeight, x:x + sliceWidth]\n outpath_mask = os.path.join(\n out_dir_masks,\n out_name + slice_sep + str(y) + '_' + str(x) + '_'\n + str(sliceHeight) + '_' + str(sliceWidth)\n + '_' + str(pad) + '_' + str(win_w) + '_' + str(win_h)\n + im_ext)\n skimage.io.imsave(outpath_mask, mask_c, check_contrast=False)\n\n # extract image\n window_c = image[y:y + sliceHeight, x:x + sliceWidth]\n outpath = os.path.join(\n out_dir_images,\n out_name + slice_sep + str(y) + '_' + str(x) + '_'\n + str(sliceHeight) + '_' + str(sliceWidth)\n + '_' + str(pad) + '_' + str(win_w) + '_' + str(win_h)\n + im_ext)\n if not os.path.exists(outpath):\n skimage.io.imsave(outpath, window_c, check_contrast=False)\n elif overwrite:\n skimage.io.imsave(outpath, window_c, check_contrast=False)\n else:\n print(\"outpath {} exists, skipping\".format(outpath))\n \n print(\"Num slices:\", n_ims,\n \"sliceHeight\", sliceHeight, \"sliceWidth\", sliceWidth)\n print(\"Time to slice\", image_path, time.time()-t0, \"seconds\")\n return",
"def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles",
"def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info",
"def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles",
"def iter_patch(\n arr: np.ndarray, patch_size, start_pos=(), copy_back: bool = True, mode: str = \"wrap\", **pad_opts,\n):\n # ensure patchSize and startPos are the right length\n patch_size = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # pad image by maximum values needed to ensure patches are taken from inside an image\n arrpad = np.pad(arr, tuple((p, p) for p in patch_size), mode, **pad_opts)\n\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, patch_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, patch_size))\n\n for slices in iter_patch_slices(iter_size, patch_size, start_pos_padded):\n yield arrpad[slices]\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(patch_size, arr.shape))\n arr[...] = arrpad[slices]",
"def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)",
"def footprint_corner_indices():",
"def _make_slices(self, img_stacks, mask_stacks, patient_id, out_pth):\n img_file_name = \"{patient}_{id}_stack\"\n msk_file_name = \"{patient}_{id}_stack_mask\"\n for s in range(1, img_stacks.shape[0] + 1):\n if s < self.stack_size or img_stacks.shape[0] - s <= self.stack_size:\n continue\n slice_idx = np.arange(-1, self.stack_size-1) + s\n im_block = img_stacks[slice_idx,:, :, 1]\n msk_block = mask_stacks[s, :, :, 1] # Output is the mask for the center channel\n np.save(os.path.join(out_pth, img_file_name.format(patient=patient_id, id=s)), im_block)\n np.save(os.path.join(out_pth, msk_file_name.format(patient=patient_id, id=s)), msk_block)",
"def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def iter_patch_slices(dims, patch_size, start_pos=()):\n\n # ensure patchSize and startPos are the right length\n ndim = len(dims)\n patch_size = get_valid_patch_size(dims, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n\n # collect the ranges to step over each dimension\n ranges = tuple(starmap(range, zip(start_pos, dims, patch_size)))\n\n # choose patches by applying product to the ranges\n for position in product(*ranges[::-1]): # reverse ranges order to iterate in index order\n yield tuple(slice(s, s + p) for s, p in zip(position[::-1], patch_size))",
"def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_y1 = new_y1-delta\r\n\t\t\ti = i + 1\r\n\t\tif a < layers - 1:\r\n\t\t\tcutlist.append([\"z_step\", str(-deltaz)])\r\n\t\ty_max = y_max - taper_straight - taper_y\r\n\r\n\treturn cutlist",
"def _get_slices(\n self,\n stride: int,\n patch_size: Tuple[int, int],\n img_size: Tuple[int, int],\n pad: int = None,\n ) -> Tuple[Dict[str, slice], int, int]:\n y_end, x_end = patch_size\n nrows, pady = self._get_margins(y_end, img_size[0], stride, pad=pad)\n ncols, padx = self._get_margins(x_end, img_size[1], stride, pad=pad)\n\n xyslices = {}\n for row in range(nrows):\n for col in range(ncols):\n y_start = row * stride\n y_end = y_start + patch_size[0]\n x_start = col * stride\n x_end = x_start + patch_size[1]\n xyslices[f\"y-{y_start}_x-{x_start}\"] = (\n slice(y_start, y_end),\n slice(x_start, x_end),\n )\n\n return xyslices, pady, padx",
"def create_patches_from_mask(image, mask, patchSize=32, pad=32, depth=1, searchSlices=None):\n rois = []\n images = []\n labels = []\n searchSlices = range(len(mask)) if searchSlices is None else searchSlices\n for i in searchSlices:\n # For each voxel, generate a ROI centered there\n if not np.any(mask[i]):\n continue\n xS, yS = np.nonzero(mask[i, :, :])\n xS -= xS % patchSize\n yS -= yS % patchSize\n allPatches = set(zip(xS, yS))\n for x, y in allPatches:\n patch = np.copy(\n # agafem el patch que ens interessa i agafem un contorn per si de cas (padding)\n # potser seria interessant reduir el padding (la quantitat de marge que deixem)\n # ara mateix tenim patches de 96, quan ens interessa el centre de 32 d'aquests\n image[i - depth: i + 1 + depth, x - pad:x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n label = np.copy(\n # quan fem rotacio al fer data augmentation, ens volem assegurar d'estar treballant amb\n # el mateix\n mask[i: i + 1, x - pad: x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n\n rois.append(np.array([x, y, i]))\n images.append(patch)\n labels.append(label)\n return rois, images, labels",
"def _segments2slices(array_size, grid_segments, patch_segments):\n patch_slices = [slice(start, stop) for start, stop in patch_segments]\n array_slices = []\n\n for start, stop in grid_segments:\n segment_size = max(abs(start), abs(stop))\n k = int(ceil(float(segment_size) / array_size) + (-1 if start >= 0 else 0))\n cell_mirrored = k % 2\n \n step = 1\n if start < 0:\n start = k * array_size + start\n stop = k * array_size + stop\n else:\n start = start - k * array_size\n stop = stop - k * array_size\n\n if cell_mirrored:\n start = array_size - start - 1\n stop = array_size - stop - 1\n step = -1\n\n if stop < 0:\n stop = None\n\n array_slices.append(slice(start, stop, step))\n \n return array_slices, patch_slices",
"def positions(self, tileID, numSamples):",
"def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles",
"def test_2D_padding(self):\n field_names = [\"text\"]\n mask_field_names = ['text_mask']\n data_path = \"mldp/tests/data/news.csv\"\n pad_symbol = \"<PAD>\"\n mask_field_name_suffix = \"mask\"\n padding_modes = ['left', 'right', 'both']\n symbols_to_mask = [\"The\", \"a\", \"to\", \"as\"]\n axis = 1\n\n data_chunk = read_data_from_csv_file(data_path, sep=\"\\t\")\n\n # tokenize field values\n for fn in field_names:\n data_chunk[fn] = np.array([seq.split() for seq in data_chunk[fn]])\n\n for padding_mode, symbol_to_mask in product(padding_modes,\n symbols_to_mask):\n padder = Padder(field_names, pad_symbol=pad_symbol,\n new_mask_fname=mask_field_names,\n padding_mode=padding_mode, axis=axis,\n symbol_to_mask=symbol_to_mask)\n padded_data_chunk = padder(copy.deepcopy(data_chunk))\n\n for fn, mask_fn in zip(field_names, mask_field_names):\n padded_fv = padded_data_chunk[fn]\n mask = padded_data_chunk[mask_fn]\n original_fv = data_chunk[fn]\n\n self.assertTrue(len(padded_fv.shape) == 2)\n self._test_padded_values(original_field_values=original_fv,\n padded_field_values=padded_fv,\n mask=mask, pad_symbol=pad_symbol,\n symbol_to_mask=symbol_to_mask)"
] | [
"0.6320081",
"0.6216814",
"0.6195147",
"0.60652786",
"0.5944742",
"0.5944742",
"0.59005415",
"0.58932716",
"0.5860595",
"0.5857728",
"0.58525264",
"0.58328336",
"0.5808904",
"0.58026594",
"0.5777165",
"0.57505",
"0.5707814",
"0.5691599",
"0.5649974",
"0.5610359",
"0.5599954",
"0.5599462",
"0.5587051",
"0.5575704",
"0.55676395",
"0.55554867",
"0.55240136",
"0.5517933",
"0.5503913",
"0.55029774"
] | 0.6521748 | 0 |
Compute energy of a protein. | def compute_energy(self, protein):
return utils.score_pose(protein.pose, self.scorefxn) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)",
"def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy",
"def energy(p,m):\n return math.sqrt(p*p + m*m)",
"def energy(data):\n return sum(pow(data, 2))",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy",
"def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))",
"def energy(self):\n return self.mc.energy(self.chain)",
"def _calc_energy( self, V_a, eos_d ):\n pass",
"def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)",
"def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)",
"def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))",
"def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)",
"def energy(self):\n return self._energy",
"def compute_energy(x, input_HP_sequence):\n # add code here, feel free to change the argument list\n # Given a input HP sequence, we already which points are H's.\n return U",
"def make_energy(self):\n @nb.njit\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms, self.Nd[0], self.Nd[1], self.Nd[2]) \\\n + energy.uniaxial_anisotropy(m, self.u, self.Ku1, self.Ku2) \\\n + energy.cubic_anisotropy(m, self.c1, self.c2, self.c3,\n self.Kc1, self.Kc2, self.Kc3)\n self.energy = energy_func",
"def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()",
"def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec",
"def estimated_energy(self):\n energy = 0j\n for pauli_string, coef in self._pauli_coef_terms:\n a = self._zeros[pauli_string]\n b = self._ones[pauli_string]\n if a + b:\n energy += coef * (a - b) / (a + b)\n energy = complex(energy)\n if energy.imag == 0:\n energy = energy.real\n energy += self._identity_offset\n return energy",
"def energy(energy_name: str) -> float:\n pass",
"def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy",
"def energy(nx,ny):\n return 1+nx+ny",
"def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy",
"def make_energy(self):\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms,\n self.Nd[0], self.Nd[1], self.Nd[2])\n self.energy = energy_func",
"def get_energy(self):\r\n return self._energy",
"def energy(self):\n return self._accelerator.energy",
"def energy(self) -> Union[int, float]:\n return self.proto.energy",
"def energy(self) -> Union[int, float]:\n return self.proto.energy"
] | [
"0.72012615",
"0.715897",
"0.71059984",
"0.70766",
"0.70726967",
"0.70589584",
"0.7055935",
"0.70316356",
"0.69816923",
"0.6975418",
"0.6911739",
"0.67674965",
"0.67373985",
"0.67346823",
"0.6701098",
"0.6694987",
"0.66668403",
"0.66624516",
"0.6649281",
"0.664188",
"0.66417664",
"0.6607923",
"0.6555147",
"0.6529706",
"0.6516192",
"0.65069836",
"0.6506693",
"0.6482177",
"0.64805716",
"0.64805716"
] | 0.82101876 | 0 |
Sample from possible fragments for a position, and replace torsion angles of that fragment in the protein. | def perturb_fragment(self, protein, position): # you may want to add more arguments
#check which fragments have already been sampled from the current position during the current step
chosen_indices = self.sampled_fragments[position]
fragments_to_sample_from = set(range(self.nfrags)) - chosen_indices
#choose a candidate fragment at this position, then add that candidate to the list of previously chosen fragments (during this step)
chosen_candidate = random.choice(list(fragments_to_sample_from))
self.sampled_fragments[position].add(chosen_candidate)
#after candidate fragment is chosen, make perturbed fragment and return
new_positions = self.candidate_frag_list[position][chosen_candidate]
perturbed_fragment = Protein(pose = protein.pose)
for i in range(len(new_positions)):
perturbed_fragment.set_torsion((position + i), new_positions[i][0], new_positions[i][1])
return(perturbed_fragment) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sample_angle(self):\n active_names = (\"N\", \"CA\", \"C\", \"O\", \"CB\", \"H\", \"HA\")\n selection = self.covalent_residue.select(\"name\", active_names)\n self.covalent_residue._active = False\n self.covalent_residue._active[selection] = True\n self.covalent_residue.update_clash_mask()\n active = self.covalent_residue.active\n angles = np.arange(\n -self.options.sample_angle_range,\n self.options.sample_angle_range + 0.001,\n self.options.sample_angle_step,\n )\n new_coor_set = []\n new_bs = []\n for coor in self._coor_set:\n self.covalent_residue.coor = coor\n rotator = CBAngleRotator(self.covalent_residue)\n for angle in angles:\n rotator(angle)\n coor = self.covalent_residue.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(coor[active])\n mask = self.covalent_residue.e[active] != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if self._cd() or self.covalent_residue.clashes():\n continue\n elif self.covalent_residue.clashes():\n continue\n new_coor_set.append(self.covalent_residue.coor)\n new_bs.append(self.conformer.b)\n\n self._coor_set = new_coor_set\n self._bs = new_bs\n # logger.debug(f\"Bond angle sampling generated {len(self._coor_set)} conformers.\")",
"def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp",
"def mutate_residue(pose, mutant_position, mutant_aa,\n pack_radius = 0.0, pack_scorefxn = '' ):\n #### a MutateResidue Mover exists similar to this except it does not pack\n #### the area around the mutant residue (no pack_radius feature)\n #mutator = MutateResidue(mutant_position, mutant_aa)\n #mutator.apply(test_pose)\n\n if pose.is_fullatom() == False:\n IOError( 'mutate_residue only works with fullatom poses' )\n\n\n # create a standard scorefxn by default\n if not pack_scorefxn:\n pack_scorefxn = rosetta.core.scoring.get_score_function()\n\n task = pyrosetta.standard_packer_task(pose)\n\n # the Vector1 of booleans (a specific object) is needed for specifying the\n # mutation, this demonstrates another more direct method of setting\n # PackerTask options for design\n aa_bool = rosetta.utility.vector1_bool()\n # PyRosetta uses several ways of tracking amino acids (ResidueTypes)\n # the numbers 1-20 correspond individually to the 20 proteogenic amino acids\n # aa_from_oneletter returns the integer representation of an amino acid\n # from its one letter code\n # convert mutant_aa to its integer representation\n mutant_aa = rosetta.core.chemical.aa_from_oneletter_code(mutant_aa)\n\n # mutation is performed by using a PackerTask with only the mutant\n # amino acid available during design\n # to do this, construct a Vector1 of booleans indicating which amino acid\n # (by its numerical designation, see above) to allow\n for i in range(1, 21):\n # in Python, logical expression are evaluated with priority, thus the\n # line below appends to aa_bool the truth (True or False) of the\n # statement i == mutant_aa\n aa_bool.append( i == int(mutant_aa) )\n\n # modify the mutating residue's assignment in the PackerTask using the\n # Vector1 of booleans across the proteogenic amino acids\n task.nonconst_residue_task(mutant_position\n ).restrict_absent_canonical_aas(aa_bool)\n\n # prevent residues from packing by setting the per-residue \"options\" of\n # the PackerTask\n restrict_non_nbrs_from_repacking(pose, mutant_position, task, pack_radius)\n\n # apply the mutation and pack nearby residues\n #print task\n packer = rosetta.protocols.simple_moves.PackRotamersMover(pack_scorefxn, task)\n packer.apply(pose)",
"def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])",
"def randomize(self, amplitude):\n from random import gauss\n # update alignment from projections\n self.getMarkersFromTiltSeries(self.TiltSeries_)\n self.getTranslationsFromTiltSeries(self.TiltSeries_)\n self.getRotationsFromTiltSeries(self.TiltSeries_)\n self.getMagnificationsFromTiltSeries(self.TiltSeries_)\n\n # add random amplitude\n # markers\n irefmark = self.TiltSeries_._TiltAlignmentParas.irefmark\n for (imark, mark) in enumerate(self._Markers):\n if (imark + 1) != irefmark:\n r = mark.get_r()\n for ii in range(0, 3):\n r[ii] = gauss(0, amplitude) + r[ii]\n self._Markers[imark].set_r(r)\n\n ireftilt = self.TiltSeries_._TiltAlignmentParas.ireftilt\n # translations, rotations, magnifications\n for itilt in range(0, self._ntilt):\n self._alignmentTransX[ii] = self._alignmentTransX[ii] + gauss(0, amplitude)\n self._alignmentTransY[ii] = self._alignmentTransY[ii] + gauss(0, amplitude)\n self._alignmentRotations[ii] = self._alignmentRotations[ii] + gauss(0, amplitude)\n if (self._projIndices[itilt] != ireftilt):\n self._alignmentMagnifications[ii] = self._alignmentMagnifications[ii] + gauss(0, amplitude)\n\n # update alignment from projections\n self.setMarkersInTiltSeries(self.TiltSeries_)\n self.getTranslationsFromTiltSeries(self.TiltSeries_)\n self.getRotationsFromTiltSeries(self.TiltSeries_)\n self.getMagnificationsFromTiltSeries(self.TiltSeries_)",
"def benchmarkRandomFragment( fasta, size ):\n\n contig, strand, start, end = fasta.getRandomCoordinates( size )\n s = fasta.getSequence( contig, strand, start, end )\n return s",
"def mutate_seq(genome):\n for var in genome.get_variants():\n if var.type == \"snp\":\n mutate_snp(genome, var)\n elif var.type == \"indel\":\n mutate_indel(genome, var)\n elif var.type == \"deletion\":\n mutate_deletion(genome, var)\n elif var.type == \"translocation origin\":\n mutate_trans_orig(genome, var)\n elif var.type == \"translocation insert\":\n mutate_trans_ins(genome, var)",
"def mutate_point_poly3(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)",
"def mutation(self):\n\n for r in range(self.pop_num*3, 5): # Mutation.\n for w in range(0,self.length): \n if random.random()<0.2: \n self.par_and_sons[r].A[w] = self.par_and_sons[r].A[w] + np.random.randint(-20, 20) # Offset + -20 pixels.",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def mutate_seq(seq, block0, RNs):\n sequence = seq\n block = block0\n # get the number of changes in the FWR part and key part\n # for framework part, include the rate of silent mutations (75%), this\n # is not necessary for the explicitly modeled residues as changes there\n # can lead to replacement with the same AA still\n FWR_changes = np.random.binomial(cf.lAb, cf.p_err_FWR*0.75)\n CDR_changes = np.random.binomial(cf.nkey, cf.p_err_CDR)\n if FWR_changes > 0:\n # determine number of deadly muts and blockmuts in the non-death\n # branch (p_death + (1-p_death)*p_block + (1-p_death)*(1-p_block)=1)\n # 0 signifies deathly mutation, 1 signifies blocking mutation\n mutIDs = list(np.random.choice([0, 1, 2],\n p=[cf.p_death_FWR,\n (1-cf.p_death_FWR) * cf.p_block_FWR,\n (1-cf.p_death_FWR) *\n (1-cf.p_block_FWR)],\n size=FWR_changes))\n\n if 0 in mutIDs: # if deadly mutations happen, return no sequence\n return None, 0, 0\n elif 1 in mutIDs: # if block mutation happens, set block to true\n block = True\n # if the cell has not died yet, analyse mutations in the CDR region\n if CDR_changes > 0:\n # get non-repetitive positions where mutation will be attempted\n changepos = random.sample(range(cf.nkey), CDR_changes)\n for pos in changepos:\n # get transition probabilities for the current amino acid\n cumprob = np.cumsum(cf.tp20[sequence[pos] - 1])\n randi = RNs.getR()\n # find replacement codon\n for i in range(21): # 20 aa plus stop\n if randi < cumprob[i]:\n sequence[pos] = i + 1\n break\n # if stop codon was integrated into the sequence, return 0 as well\n if 21 in sequence:\n return None, 0, 0\n # only mutations of cells that survived are returnd for the counting\n return sequence, FWR_changes, block",
"def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)",
"def anglor_psi(infile, sequence):\n return anglor(infile, sequence)",
"def mutate_random(DNA,AminoAcid,distance,pdic,rev,header,Random,outputpath):\r\n ##debug vals \r\n start = [] # list of start positions of mutations ( start means first mutation in balanced case)\r\n both = [] # start and end position\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(\"BalancedMutation\"+\"\\t\"+\"NewAA\" + \"\\t\" + \"OldAA\"+\"\\t\"+\"NewAAPos\"+\"\\t\"+\"OldAAPos\" +\"\\t\"+ \"NewDNA\"+\"\\t\"+ \"OldDNA\"+ \"\\t\"+\"NewDNAPos\"+\"\\t\"+\"OldDNAPos\"+\"\\n\")\r\n fobj2.close()\r\n \r\n \r\n # generate start positions for mutation (the samplespace)\r\n samplespace = []\r\n for i in range (2,len(AminoAcid),distance/3):\r\n samplespace.append(i)\r\n \r\n \r\n ##random_modification\r\n if (Random ==1):\r\n r.shuffle(samplespace)\r\n else:\r\n pass\r\n \r\n dna_list = list(DNA)\r\n AminoAcid_list = list(AminoAcid)\r\n \r\n '''the lookup dictionary for the aa triplets '''\r\n lookup_dic = INI.createdic(AminoAcid)\r\n\r\n #gotit indicator if a possibility was found to revert the initial changes (start of mutation)\r\n gotit=False\r\n # stat variables\r\n succ_counter = 0\r\n fail_counter = 0 \r\n skip = 0\r\n \r\n ''' Main loop over the AminoAcid'''\r\n for i in samplespace:\r\n ''' no triplet left --> break '''\r\n if(i+2 >len(AminoAcid)):\r\n print(\"\\t(finished...exceeded length of AA)\")\r\n continue\r\n \r\n ''' AA which is going to be mutated'''\r\n AA = AminoAcid_list[i]\r\n \r\n '''index for dna : i*3 --> AminoAcid --> DNA\r\n #not i*3+3 because i starts at AA 2 since we need a right and left neighbor'''\r\n iprime = i*3\r\n \r\n '''AA and corresponding DNA triplet for the middle AA '''\r\n AA_triplet= AminoAcid_list[i-1]+AminoAcid_list[i]+AminoAcid_list[i+1]\r\n DNA_triplet = DNA[iprime:iprime+3]\r\n\r\n # get temporary list of all mutations. Iterate over it to find best possible substitution\r\n mutationsliste,aaliste = getMutation(AA,DNA_triplet)\r\n \r\n \r\n # isvalidposition returns 1 if the position isforbidden, else 0\r\n val = isvalidposition(pdic, iprime, distance)\r\n if (val ==1):\r\n skip+=1\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(skipped)'\"+\"\\n\")\r\n fobj2.close()\r\n continue\r\n \r\n else:\r\n pass\r\n \r\n\r\n for q,item in enumerate(mutationsliste):\r\n \r\n if gotit==True:\r\n break\r\n else:\r\n pass\r\n \r\n ''' old and new variables for before/after the mutation '''\r\n new_triplet = mutationsliste[q]\r\n new_AA = aaliste[q]\r\n new_N,old_N,position = getdifference(DNA_triplet,new_triplet)\r\n new_AA_triplet = AA_triplet[0]+new_AA+AA_triplet[2]\r\n tempdic = pdic\r\n tempdic[iprime+position]=\"M\"\r\n \r\n if (new_AA_triplet in lookup_dic):\r\n '''templist--> contains all starting positions of the \"new_AA_triplet\" which we want to substitute back '''\r\n templist = lookup_dic[new_AA_triplet]\r\n \r\n \r\n # add potential mutation to dictionary\r\n tempposition = [iprime+position,\"M\"]\r\n for l in range(0,len(templist)):\r\n posi = templist[l]\r\n # i*3 --> protein nach DNA, +3 betrachten IMMER mittlere AA\r\n ''' suitable dna position found? '''\r\n if (new_triplet == dna_list[posi*3+3]+dna_list[posi*3+3+1]+dna_list[posi*3+3+2]):\r\n val = isvalidposition(tempdic, posi*3+3+position, distance)\r\n \r\n if (val ==1):\r\n skip+=1\r\n continue\r\n else:\r\n pass\r\n \r\n '''back substitution & do subs on 1st position'''\r\n pdic[posi*3+3+position]=\"R\"\r\n dna_list[posi*3+3+position]= old_N\r\n \r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n \r\n AminoAcid_list[i]= new_AA\r\n AminoAcid_list[posi+1]= AA\r\n \r\n gotit = True\r\n succ_counter+=1\r\n #lookup_dic[new_AA_triplet] = [i for i in lookup_dic[new_AA_triplet] if i!=posi]\r\n lookup_dic[new_AA_triplet].remove(posi)\r\n \r\n '''writing the log file '''\r\n fobj= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj.write(str(1)+\"\\t\"+AA_triplet + \"\\t\" + new_AA_triplet+\"\\t\"+str(i)+\"\\t\"+str(posi) +\"\\t\"+ DNA_triplet+\"\\t\"+ str(new_triplet)+ \"\\t\"+str(iprime+position)+\"\\t\"+str(posi*3+3+position)+\"\\n\")\r\n fobj.close()\r\n \r\n ## statistics\r\n start.append(iprime+position)\r\n both.extend([iprime+position,posi*3+3+position])\r\n break\r\n \r\n # no possible triplet positions for back substitution in lookup_dic \r\n else:\r\n continue\r\n \r\n # after loop \r\n if (gotit==False):\r\n fobj2= open(outputpath+header+\"_CompleteLog.txt\",\"a\")\r\n fobj2.write(str(0)+\"\\t\"+new_AA_triplet + \"\\t\" + \"' '\"+\"\\t\"+str(i)+\"\\t\"+\"' '\" +\"\\t\"+ new_triplet+\"\\t\"+ \"' '\"+ \"\\t\"+str(iprime+position)+\"\\t\"+\"'(tried)'\"+\"\\n\")\r\n fobj2.close()\r\n fail_counter+=1\r\n # reverse substitutions on? (=1) off (=0). If one dont change first mutation in the first place. Else: just change it.. \r\n if (rev==0):\r\n pdic[iprime+position]=\"M\"\r\n dna_list[iprime+position]= new_N\r\n AminoAcid_list[i]= new_AA\r\n start.append(iprime+position)\r\n both.extend([iprime+position]) \r\n elif (gotit==True):\r\n gotit = False\r\n \r\n # stats (INI.savepickle(pdic,header+\"_pdic_e\"))\r\n print(\"\\r\\n########Some stats:########\")\r\n print(\"DNA length:\\t\" + str(len(DNA)))\r\n print(\"max substitutions:\\t\" + str(len(DNA)/distance))\r\n print(\"#Balanced Mutations:\\t\" + str(succ_counter))\r\n \r\n \r\n return (\"\".join(dna_list))",
"def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')",
"def make_fragments(path, target, path_to_rosetta):\n\n path_to_target = path + target + '.fasta'\n path_to_rosetta_script = path_to_rosetta + 'main/tools/fragment_tools/make_fragments.pl'\n target = parse_multifasta_file(path_to_target, 1)\n target_name, target_seq = next(target)\n subprocess.run([path_to_rosetta_script, '-verbose', path_to_target])\n os.rename('aat000_03_05.200_v1_3', target_name + '_3.frags')\n os.rename('aat000_09_05.200_v1_3', target_name + '_9.frags')\n shutil.move(target_name + '_3.frags', path + 'Modeling/')\n shutil.move(target_name + '_9.frags', path + 'Modeling/')",
"def mutate_point_circ(mutated_genome):\n seed = random.randint(0,3)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_circ(mutated_genome,index)\n elif seed == 1:\n shift_point_circ(mutated_genome,index)\n elif seed == 2:\n move_radius_circ(mutated_genome,index)\n else: #seed == 3:\n shift_radius_circ(mutated_genome,index)",
"def mutate(self, candidates, mutation_prob=0.2, mutation_type='cull'):\n mutants = []\n for cand in candidates:\n n = cand.enc_path.shape[0]\n mutant = Path()\n if mutation_type == 'cull':\n cull_mask = np.random.random(n) > mutation_prob\n cull_mask[0] = cull_mask[-1] = True # do not cull start or end points\n mutant.enc_path = cand.enc_path[cull_mask]\n mutants.append(mutant)\n elif mutation_type == 'monotone':\n # if np.random.random() < mutation_prob or n < 4:\n ix1 = np.random.randint((n // 2) - 1) + 1\n ix2 = np.random.randint((n // 2) + 1, n - 2)\n x0, y0 = cand.enc_path[ix1]\n x1, y1 = cand.enc_path[ix2]\n l = line(x0, y0, x1, y1)\n coords = np.vstack((l[0], l[1])).T.astype(int)\n coords_mask = np.random.random(coords.shape[0]) > mutation_prob\n coords = coords[coords_mask]\n mutant.enc_path = np.vstack((cand.enc_path[0:ix1], coords, cand.enc_path[ix2:]))\n mutants.append(mutant)\n # else:\n # mutants.append(cand)\n return mutants",
"def getplatepos(self, phi=0, chi=0, omega=0):\n\n #Save the specified angles in the structure\n angles = np.array([phi, chi, omega]);\n\n #We divvy up the phi rotation between the plate and the sample motor.\n #We round to the nearest multiple of the sample motor step size.\n self.sample_motor_phi = round(phi / self.sample_motor_step_size) * self.sample_motor_step_size\n #And the remainder is handled by the sample plate position.\n sample_plate_phi = phi - self.sample_motor_phi\n\n #This calculates the rotation matrix for the sample PLATE only.\n rot_M_plate = rotation_matrix(sample_plate_phi, chi, omega)\n\n #And this is the rotation matrix for the sample motor only\n rot_M_motor = rotation_matrix(self.sample_motor_phi, 0, 0)\n\n\n #X,Y,Z translation vector (in mm) to perform BEFORE moving the sample plate.\n #To calculate these, we use the relative_sample_position vector.\n translate_v = -self.relative_sample_position\n #But we have to correct for the sample motor phi rotation by rotating the translation\n #vector as well.\n translate_v = np.dot(rot_M_motor, translate_v)\n \n\n #------------------ SAMPLE PLATE ----------------------\n #3 vectors representing the position of the mounting points on the plate,\n #when it is horizontal and with the sample at 0\n #Remember, the plate is in the X-Z plane.\n\n #distance between center of plate and each mounting point.\n d = self.mounting_side_length / (2 * np.cos(pi / 6))\n #Distance to the edge on the other side\n d2 = np.sin(pi / 6) * d\n\n #Vectors representing the sample plate at the \"zero\" position.\n sample_plate_zero = np.column_stack(([self.mounting_side_length / 2, self.sample_plate_height, d2],\n [-self.mounting_side_length / 2, self.sample_plate_height, d2],\n [0, self.sample_plate_height, -d]))\n\n #------------------ OTHER USEFUL POINTS ----------------------\n #Vector representing the position of the middle of the sample plate.\n sample_middle = column([0, self.sample_plate_height, 0])\n\n #Make a vector representing the position of the sample at the end of the\n #pin.\n pin = self.relative_sample_position\n\n #Make vector to represent the sample motor orientation (at zero)\n self.motor_vector_length = 20\n motor = column([0, self.sample_plate_height, self.motor_vector_length])\n\n\n #------------------ APPLY TRANSFORMS ----------------------\n #For the sample plate: we do not apply the motor_phi rotation.\n \n #Do a translation of the position - we are moving the entire sample plate\n # This places the sample in the 0,0,0 position.\n sample_plate = get_translated_vectors(sample_plate_zero, translate_v)\n\n #Now do a rotation (phi,chi,omega)\n sample_plate = dot(rot_M_plate, sample_plate)\n\n #The pin rotates with the motor, then translates, then then rotates with the\n #sample plate.\n pin = dot(rot_M_motor, pin)\n pin = get_translated_vectors(pin, translate_v)\n pin = dot(rot_M_plate, pin)\n\n #Motor vector = same as pin.\n motor = dot(rot_M_motor, motor)\n motor = get_translated_vectors(motor, translate_v)\n motor = dot(rot_M_plate, motor)\n\n #Same for the sample_middle vector\n sample_middle = dot(rot_M_motor, sample_middle)\n sample_middle = get_translated_vectors(sample_middle, translate_v)\n sample_middle = dot(rot_M_plate, sample_middle)\n\n #Sample plate coordinates are:\n #i.e. x_A2, y_A2, x_B2, etc. (as written in Janik's notebook)\n\n #We want to find the positions of the other ends of the legs on the fixed\n #plate, x_A1, etc.\n fixed_plate = np.copy(sample_plate)\n\n #Legs A and B are fixed in their orientation along Z, and C along X, so we\n #know the Z_A1, Z_B1 and X_C1 positions on the FIXED plate are the same as\n #on the SAMPLE plate.\n\n #We also know the height of all these points, y = fixed_plate_height.\n fixed_plate[COORD_Y, :] = self.fixed_plate_height\n \n #This leaves x_A1, x_B1, and z_C1 to find.\n\n #Angle between the x direction and the (A1 to A2) vector formed by leg A\n theta_A = np.arcsin((sample_plate[COORD_Y, MOUNT_A] - self.fixed_plate_height) / self.leg_length)\n if theta_A > -pi / 2:\n #Force theta_A to be ~-120 degrees\n theta_A = -pi - theta_A\n \n\n #Angle between the x direction and the B1 to B2) vector formed by leg B\n theta_B = np.arcsin((sample_plate[COORD_Y, MOUNT_B] - self.fixed_plate_height) / self.leg_length)\n\n #We can easily calculate the x position from these\n x_A1 = sample_plate[COORD_X, MOUNT_A] - self.leg_length * cos(theta_A)\n x_B1 = sample_plate[COORD_X, MOUNT_B] - self.leg_length * cos(theta_B)\n\n fixed_plate[COORD_X, MOUNT_A] = x_A1\n fixed_plate[COORD_X, MOUNT_B] = x_B1\n\n\n #Finally we find the position of Leg C\n phi_C = np.arcsin((sample_plate[COORD_Y, MOUNT_C] - self.fixed_plate_height) / self.leg_length)\n if phi_C < -pi / 2:\n #Force phi_C to be ~-60 degrees\n phi_C = 2*pi + phi_C\n\n #Now we calc. the Z position of leg C on the fixed plate.\n z_C1 = sample_plate[COORD_Z, MOUNT_C] - self.leg_length * cos(phi_C)\n fixed_plate[COORD_Z, MOUNT_C] = z_C1\n\n\n #Assign these plate position in the goniometer object, which is returned\n self.sample_plate = sample_plate\n self.fixed_plate = fixed_plate\n self.sample_plate_zero = sample_plate_zero\n\n #Also return the pin and motor vectors\n self.pin = pin\n self.motor = motor\n self.sample_middle = sample_middle",
"def mutate(self, number_of_mutations):\n self.mutated.clear()\n mutations = []\n for i in range(number_of_mutations+1):\n old_gene = random.choice(self.genes)\n while old_gene in mutations:\n old_gene = random.choice(self.genes)\n # print(self.max_time)\n old_gene.start_time = random.choice(range(self.max_time - old_gene.finish))\n self.mutated.append(self.genes.index(old_gene))",
"def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic",
"def mutation(population):\r\n global decryption_key\r\n\r\n bases = ['A', 'C', 'G', 'T']\r\n alter_dna_table = alter_dna_bases(bases)\r\n\r\n decryption_key += mutation_table_del + str(alter_dna_table) + mutation_table_del\r\n\r\n new_population = []\r\n for chromosome in population:\r\n decryption_key += chromosome_del\r\n\r\n # apply the complement\r\n b_chromosome = dna_to_bits(chromosome, utils.dna_base_to_two_bits_table)\r\n decryption_key += complement_mutation_del\r\n point1 = random.randint(0, len(b_chromosome) - 1)\r\n point2 = random.randint(point1, len(b_chromosome) - 1)\r\n decryption_key += \"(%s, %s)\" % (point1, point2)\r\n decryption_key += complement_mutation_del\r\n b_chromosome = complement(b_chromosome, point1, point2)\r\n\r\n # convert each 4 bits in chromosome to two dna bases using four_bits_to_two_dna_base_table\r\n four_bits_vector = group_bits(b_chromosome, 4)\r\n\r\n last_dna_base = None\r\n # if the last element is of length 2, don't convert it\r\n if len(four_bits_vector[len(four_bits_vector) - 1]) == 2:\r\n last_dna_base = utils.two_bits_to_dna_base_table[four_bits_vector[len(four_bits_vector) - 1]]\r\n\r\n # convert only the 4 bits elements\r\n four_bits_vector = four_bits_vector[:-1]\r\n\r\n dna_seq = bits_to_dna(four_bits_vector, utils.four_bits_to_two_dna_base_table)\r\n if last_dna_base is not None:\r\n dna_seq += last_dna_base\r\n\r\n # and then alter the dna bases between point1 and point2\r\n decryption_key += alter_mutation_del\r\n point1 = random.randint(0, len(dna_seq) - 1)\r\n point2 = random.randint(point1, len(dna_seq) - 1)\r\n decryption_key += \"(%s, %s)\" % (point1, point2)\r\n decryption_key += alter_mutation_del\r\n new_chromosome = \"\"\r\n for i in range(len(dna_seq)):\r\n if i >= point1 and i <= point2:\r\n new_chromosome += alter_dna_table[dna_seq[i]]\r\n else:\r\n new_chromosome += dna_seq[i]\r\n\r\n new_population.append(new_chromosome)\r\n\r\n decryption_key += chromosome_del\r\n\r\n return new_population",
"def __init__(self, protein, fragment_set, nmers, start_temp, end_temp, nfrags, anneal_rate):\n \n #store variables used to initiate the object\n self.scorefxn = create_score_function('score3')\n self.current_protein = protein\n self.best_protein = protein\n self.my_fragment_set = fragment_set\n self.nmers = nmers\n self.nfrags = nfrags\n self.end_temp = end_temp\n self.anneal_rate = anneal_rate\n \n #initialize some starter values (for tracking later...)\n self.current_energy = self.compute_energy(self.current_protein)\n self.current_iteration = 0\n self.current_T = start_temp\n\n \n #initialize a data structure to keep track of which fragments (at a given position) have been sampled already during each sampling step\n self.sampled_fragments = {}\n \n #create a dictionary of nfrag candidate fragments for each position in the sequence\n self.candidate_frag_list = {}\n for position in range(1, protein.length-self.nmers+1):\n self.candidate_frag_list[position] = self.my_fragment_set.get_lowRMS_fragments(position, nfrags)\n \n #for reporting information to the log file later\n self.temperature = [self.current_T]\n self.iteration = [self.current_iteration]\n self.energy = [self.current_energy]",
"def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5",
"def add_mutations(fragments,\n max_mutations=10, seednr=42,\n verbose=True,\n out=sys.stdout, err=sys.stderr):\n frgs = []\n\n # add point mutated sequences to the fragment list\n seed(seednr) # make sure repeated runs produce the same mutated sequences\n frgs = []\n divisor = int(len(fragments)/min(10, len(fragments)))\n for i, fragment in enumerate(fragments):\n frgs.append(fragment)\n for num_mutations in range(1, max_mutations+1):\n mut_fragment = dict()\n for field in fragment.keys():\n if field == 'sequence':\n mut_fragment[field] = mutate_sequence(fragment[field],\n num_mutations)\n elif field == 'num_pointmutations':\n mut_fragment[field] = num_mutations\n else:\n mut_fragment[field] = fragment[field]\n frgs.append(mut_fragment)\n if verbose:\n if i % divisor == 0:\n err.write('.')\n if verbose:\n err.write(' done.\\n')\n if verbose:\n out.write(('% 8i fragments generated with 0 to %i '\n 'point mutations.\\n') % (len(frgs), max_mutations))\n\n return frgs",
"def metropolis_step(self, positions):\n \"\"\"with brute-force sampling of new positions.\"\"\"\n\n # r = random.random()*random.choice((-1, 1))\n # r is a random number drawn from the uniform prob. dist. in [0,1]\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = np.random.uniform(-1, 1)\n # Pick a random particle\n random_index = np.random.randint(0, high=len(positions))\n new_positions = np.array(positions)\n new_random_position = new_positions[random_index, :]\n # Suggest a new move\n new_positions[random_index, :] = new_random_position + r*self.delta_R\n # Old system and wavefunction\n wavefunction = self.w.wavefunction(positions)\n old_wavefunction_squared = wavefunction**2\n\n # Test the new position with a new system and wavefunction\n # sys_test = System(self.num_p, self.num_d)\n # sys_test.positions_distances(new_positions)\n # alpha = self.w.alpha\n # beta = self.w.beta\n # a = self.w.a\n # wave_test = Wavefunction(self.num_p, self.num_d, alpha, beta, a, sys_test)\n # test_wavefunction = wave_test.wavefunction(new_positions)\n test_wavefunction = self.w.wavefunction(new_positions)\n\n new_wavefunction_squared = test_wavefunction**2\n # print ('Old = ', positions)\n\n if new_wavefunction_squared <= 1e-14:\n pass\n else:\n # acceptance_ratio = self.w.wavefunction_ratio(positions,\n # new_positions)\n acceptance_ratio = new_wavefunction_squared/old_wavefunction_squared\n epsilon = np.random.sample()\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n # print ('New = ', positions)\n # self.s.distances_update(positions, random_index)\n # self.s.positions_distances(new_positions)\n self.c += 1.0\n\n else:\n pass\n\n return positions",
"def mutate(self):\n #mutation_size = max(1,int(round(random.gauss(15,4))))/100\n\n\n\n mutation_size = max(1,int(round(random.gauss(15,4))))/100\n \"\"\"\n Changed the mutation by using random.randint rather than the gaussian one \n after observing that the gaussian random never really gave an output of more than 0.25\n \"\"\"\n\n #Decide what will be mutated, just randomly picking onr of the three params\n mutation_type = random.choice(self.params)\n\n #Mutate the thing\n if mutation_type == \"diameter\":\n \"\"\"\n Over here, what we are providing a range between self.diameter*x where x=1-mutation size and self.diameter*y where =1+mutation size\n Basically we add or subtract from 1 because the mutation has to be small\n \"\"\"\n self.diameter = max(1,random.randint(int(self.diameter*(1-mutation_size)),int(self.diameter*(1+mutation_size))))\n return self.diameter\n #same thing here\n elif mutation_type == \"pos\":\n x = max(0,random.randint(int(self.pos.x*(1-mutation_size)),int(self.pos.x*(1+mutation_size))))\n y = max(0,random.randint(int(self.pos.y*(1-mutation_size)),int(self.pos.y*(1+mutation_size))))\n self.pos = Point(min(x,self.size[0]),min(y,self.size[1]))\n return self.pos\n elif mutation_type == \"color\":\n r = min(max(0,random.randint(int(self.color.r*(1-mutation_size)),int(self.color.r*(1+mutation_size)))),255)\n g = min(max(0,random.randint(int(self.color.g*(1-mutation_size)),int(self.color.g*(1+mutation_size)))),255)\n b = min(max(0,random.randint(int(self.color.b*(1-mutation_size)),int(self.color.b*(1+mutation_size)))),255)\n self.color = Color(r,g,b)\n return self.color",
"def define_translocations(genome, num, nc):\n start = []\n end = []\n for n in range(num):\n start_pos = random.randint(100,len(genome.seq)-5100) # positions 100bp from start or end will not be variable\n end_pos = start_pos + random.randint(500,5000)\n start.append(start_pos)\n end.append(end_pos)\n\n if nc: # if non-conservative translocations specified\n del_start = []\n del_end = []\n nc_pos = [p for p in range(0, len(start))]\n for n in range(0,len(start),2): # 50:50 chance that half will be non-conserved\n if not del_start or random.randint(0,1) == 0: # ensures at least 1 will be non-conserved\n length = len(nc_pos)\n pop_pos = random.randint(0,length-1)\n idx = nc_pos.pop(pop_pos)\n nc_size = random.randint(100, ((end[idx]-start[idx])//2)-1) # size between 100 and half the translocation size\n start_pos = end[idx]-nc_size\n end_pos = end[idx]\n del_start.append(start_pos)\n del_end.append(end_pos)\n end[idx] = start_pos\n # add new deletion Variants to genome list\n var = Variant(\"deletion\", start_pos, end_pos, start_pos-end_pos)\n genome.add_variant(var)\n # add new deletions to unavail list\n for j in range(start_pos, end_pos):\n genome.unavail_pos.append(j)\n\n # add translocation Variants to genome list\n for v in range(len(start)):\n pos = get_trans_pos(genome) # get new position\n # add either side of insertion point to unavail list\n genome.unavail_pos.append(pos-1)\n genome.unavail_pos.append(pos)\n genome.unavail_pos.append(pos+1)\n # add translocated region to unavail list\n for j in range(start[v], end[v]):\n genome.unavail_pos.append(j)\n # add Variant to genome's variant list\n var = Variant(\"translocation origin\", start[v], pos, end[v]-start[v])\n genome.add_variant(var)\n var = Variant(\"translocation insert\", pos, start[v], end[v]-start[v])\n genome.add_variant(var)",
"def fragSeq(seq, ion):\n iontype = ion[0]\n ionnr = int(ion[1:])\n frag = seq[0:ionnr] if iontype == 'b' else seq[len(seq) - ionnr:]\n assert len(frag) == ionnr\n return frag",
"def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp"
] | [
"0.5305515",
"0.5180681",
"0.51104534",
"0.5014608",
"0.48726195",
"0.48163992",
"0.4806955",
"0.47817487",
"0.4777187",
"0.47461727",
"0.47372264",
"0.47323832",
"0.46817002",
"0.46801928",
"0.46705428",
"0.46659943",
"0.4642765",
"0.46316916",
"0.4625718",
"0.46244594",
"0.46223778",
"0.46216044",
"0.4620269",
"0.46081144",
"0.46056053",
"0.46048373",
"0.45892525",
"0.45891705",
"0.45792097",
"0.45776406"
] | 0.6773049 | 0 |
Anneal temperature using exponential annealing schedule. Consider kT to be a single variable (i.e. ignore Boltzmann constant) | def anneal_temp(self, T):
new_T = self.anneal_rate * T
return(new_T) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))",
"def exponential(t, eta_init, last_eta, d = 0.01):\n return eta_init*np.exp(-d*t)",
"def exp(t,tau):\n return np.exp(-t/tau)",
"def state(k, x, t):\n return exp(1j*(k*x - omega*t))",
"def Fermi(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return 1/(1+np.exp(En/(kb*T/ev)))",
"def band_energy(k,t=1.0,e0=0.2,a=1.0):\n return e0-t*np.exp(1j*k*a)-t*np.exp(-1j*k*a)",
"def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial",
"def Boltzmann(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return np.exp(-En/(kb*T/ev))",
"def T_e(self, value: u.K):\n try:\n value = value.to(u.K, equivalencies=u.temperature_energy())\n except (AttributeError, u.UnitsError, u.UnitConversionError):\n raise ParticleError(\"Invalid temperature.\") from None\n else:\n if value < 0 * u.K:\n raise ParticleError(\"T_e cannot be negative.\")\n self._T_e = value",
"def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)",
"def es_from_t(t):\n return 0.6108 * np.exp((17.27 * t) / (t + 237.3))",
"def Eg_fct_T(Eg0,alpha,beta,T) :\n return Eg0-((T*T*alpha*1e-3)/(beta+T))",
"def N_TT_EE(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.F_EE(l1, l2, phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result += self.F_EE(l2, l1, -phi)*self.CMB.ftotalTE(l1)*self.CMB.ftotalTE(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TE.__func__, \"integ\"):\n self.N_TT_TE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TE.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TE.integ(integrand, nitn=1, neval=5000)\n return result.mean",
"def evaluate(self, _t):\n\n temp = self.init_temp*np.exp(-1.0*self.exp_const*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp",
"def calculate_axial_transport(my_cell, t):\n\n phi_si, phi_se, phi_di, phi_de, phi_sm, phi_dm = my_cell.membrane_potentials()\n\n j_Na_diff_i = my_cell.j_k_diff(my_cell.D_Na, my_cell.lamda_i, my_cell.Na_si, my_cell.Na_di)*my_cell.A_i*N_A\n Na_akkum_diff_i = scipy.integrate.cumtrapz(j_Na_diff_i, t, initial=0)\n\n j_Na_drift_i = my_cell.j_k_drift(my_cell.D_Na, my_cell.Z_Na, my_cell.lamda_i, my_cell.Na_si, my_cell.Na_di, phi_si, phi_di)*my_cell.A_i*N_A\n Na_akkum_drift_i = scipy.integrate.cumtrapz(j_Na_drift_i, t, initial=0)\n\n j_K_diff_i = my_cell.j_k_diff(my_cell.D_K, my_cell.lamda_i, my_cell.K_si, my_cell.K_di)*my_cell.A_i*N_A\n K_akkum_diff_i = scipy.integrate.cumtrapz(j_K_diff_i, t, initial=0)\n\n j_K_drift_i = my_cell.j_k_drift(my_cell.D_K, my_cell.Z_K, my_cell.lamda_i, my_cell.K_si, my_cell.K_di, phi_si, phi_di)*my_cell.A_i*N_A\n K_akkum_drift_i = scipy.integrate.cumtrapz(j_K_drift_i, t, initial=0)\n\n j_Cl_diff_i = my_cell.j_k_diff(my_cell.D_Cl, my_cell.lamda_i, my_cell.Cl_si, my_cell.Cl_di)*my_cell.A_i*N_A\n Cl_akkum_diff_i = scipy.integrate.cumtrapz(j_Cl_diff_i, t, initial=0)\n\n j_Cl_drift_i = my_cell.j_k_drift(my_cell.D_Cl, my_cell.Z_Cl, my_cell.lamda_i, my_cell.Cl_si, my_cell.Cl_di, phi_si, phi_di)*my_cell.A_i*N_A\n Cl_akkum_drift_i = scipy.integrate.cumtrapz(j_Cl_drift_i, t, initial=0)\n\n j_Ca_diff_i = my_cell.j_k_diff(my_cell.D_Ca, my_cell.lamda_i, my_cell.free_Ca_si, my_cell.free_Ca_di)*my_cell.A_i*N_A\n Ca_akkum_diff_i = scipy.integrate.cumtrapz(j_Ca_diff_i, t, initial=0)\n\n j_Ca_drift_i = my_cell.j_k_drift(my_cell.D_Ca, my_cell.Z_Ca, my_cell.lamda_i, my_cell.free_Ca_si, my_cell.free_Ca_di, phi_si, phi_di)*my_cell.A_i*N_A\n Ca_akkum_drift_i = scipy.integrate.cumtrapz(j_Ca_drift_i, t, initial=0)\n\n j_e_diff_i = (j_Na_diff_i + j_K_diff_i + 2*j_Ca_diff_i - j_Cl_diff_i)\n j_e_drift_i = (j_Na_drift_i + j_K_drift_i + 2*j_Ca_drift_i - j_Cl_drift_i)\n e_akkum_diff_i = (Na_akkum_diff_i*my_cell.Z_Na + K_akkum_diff_i*my_cell.Z_K + Cl_akkum_diff_i*my_cell.Z_Cl + Ca_akkum_diff_i*my_cell.Z_Ca)\n e_akkum_drift_i = (Na_akkum_drift_i*my_cell.Z_Na + K_akkum_drift_i*my_cell.Z_K + Cl_akkum_drift_i*my_cell.Z_Cl + Ca_akkum_drift_i*my_cell.Z_Ca)\n\n j_Na_diff_e = my_cell.j_k_diff(my_cell.D_Na, my_cell.lamda_e, my_cell.Na_se, my_cell.Na_de)*my_cell.A_e*N_A\n Na_akkum_diff_e = scipy.integrate.cumtrapz(j_Na_diff_e, t, initial=0)\n\n j_Na_drift_e = my_cell.j_k_drift(my_cell.D_Na, my_cell.Z_Na, my_cell.lamda_e, my_cell.Na_se, my_cell.Na_de, phi_se, phi_de)*my_cell.A_e*N_A\n Na_akkum_drift_e = scipy.integrate.cumtrapz(j_Na_drift_e, t, initial=0)\n\n j_K_diff_e = my_cell.j_k_diff(my_cell.D_K, my_cell.lamda_e, my_cell.K_se, my_cell.K_de)*my_cell.A_e*N_A\n K_akkum_diff_e = scipy.integrate.cumtrapz(j_K_diff_e, t, initial=0)\n\n j_K_drift_e = my_cell.j_k_drift(my_cell.D_K, my_cell.Z_K, my_cell.lamda_e, my_cell.K_se, my_cell.K_de, phi_se, phi_de)*my_cell.A_e*N_A\n K_akkum_drift_e = scipy.integrate.cumtrapz(j_K_drift_e, t, initial=0)\n\n j_Cl_diff_e = my_cell.j_k_diff(my_cell.D_Cl, my_cell.lamda_e, my_cell.Cl_se, my_cell.Cl_de)*my_cell.A_e*N_A\n Cl_akkum_diff_e = scipy.integrate.cumtrapz(j_Cl_diff_e, t, initial=0)\n\n j_Cl_drift_e = my_cell.j_k_drift(my_cell.D_Cl, my_cell.Z_Cl, my_cell.lamda_e, my_cell.Cl_se, my_cell.Cl_de, phi_se, phi_de)*my_cell.A_e*N_A\n Cl_akkum_drift_e = scipy.integrate.cumtrapz(j_Cl_drift_e, t, initial=0)\n\n j_Ca_diff_e = my_cell.j_k_diff(my_cell.D_Ca, my_cell.lamda_e, my_cell.Ca_se, my_cell.Ca_de)*my_cell.A_e*N_A\n Ca_akkum_diff_e = scipy.integrate.cumtrapz(j_Ca_diff_e, t, initial=0)\n\n j_Ca_drift_e = my_cell.j_k_drift(my_cell.D_Ca, my_cell.Z_Ca, my_cell.lamda_e, my_cell.Ca_se, my_cell.Ca_de, phi_se, phi_de)*my_cell.A_e*N_A\n Ca_akkum_drift_e = scipy.integrate.cumtrapz(j_Ca_drift_e, t, initial=0)\n\n j_e_diff_e = (j_Na_diff_e + j_K_diff_e + 2*j_Ca_diff_e - j_Cl_diff_e)\n j_e_drift_e = (j_Na_drift_e + j_K_drift_e + 2*j_Ca_drift_e - j_Cl_drift_e)\n e_akkum_diff_e = (Na_akkum_diff_e*my_cell.Z_Na + K_akkum_diff_e*my_cell.Z_K + Cl_akkum_diff_e*my_cell.Z_Cl + Ca_akkum_diff_e*my_cell.Z_Ca)\n e_akkum_drift_e = (Na_akkum_drift_e*my_cell.Z_Na + K_akkum_drift_e*my_cell.Z_K + Cl_akkum_drift_e*my_cell.Z_Cl + Ca_akkum_drift_e*my_cell.Z_Ca)\n\n return j_e_drift_i, j_e_diff_i, e_akkum_drift_i, e_akkum_diff_i, Na_akkum_drift_i, Na_akkum_diff_i, K_akkum_drift_i, K_akkum_diff_i, Cl_akkum_drift_i, Cl_akkum_diff_i, Ca_akkum_drift_i, Ca_akkum_diff_i, \\\n j_e_drift_e, j_e_diff_e, e_akkum_drift_e, e_akkum_diff_e, Na_akkum_drift_e, Na_akkum_diff_e, K_akkum_drift_e, K_akkum_diff_e, Cl_akkum_drift_e, Cl_akkum_diff_e, Ca_akkum_drift_e, Ca_akkum_diff_e",
"def get_kt(temps, delta_gibbs_ts):\n # rate coefficient from Eyring equation\n return KB / H * temps * np.exp(-delta_gibbs_ts / RG / temps) # [1/s] if unimolecular",
"def fit_ar1_t(t, y):\n lntau0 = np.log(np.mean(np.diff(t)))\n sigma = np.std(y)\n yr = y - np.mean(y)\n nlnp = lambda lntau, sigma: -1.0 * ar1_t_like(t, yr, np.exp(lntau), sigma)\n res = minimize(nlnp, lntau0, args=(sigma,), method='Nelder-Mead')\n tau = np.exp(res.x.squeeze())\n return tau, sigma",
"def update_temperature(self):\n self.iteration += 1 \n self.T = self.T0 * 0.9935**self.iteration",
"def Ernst_T1(TR, alpha_e):\n return -TR / np.log(np.cos(alpha_e))",
"def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )",
"def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def schedule(t: int, max_temperature: float = 1.0, decay_constant: float = 0.005) -> float:\n return max_temperature * np.exp(-decay_constant * t)",
"def _etaE(self,x):\n return self._etaE_cool(x) + self._etaE_hot(x)",
"def price_heston_mc(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_):\r\n esp_ = monte_carlo(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_)\r\n return exp(-r_*T_)*esp_",
"def add_energy(self, T=0, beta=0, Mkep=0, mu=1, gamma=5/3., Tmin=0., AU=1):\n \n if self.npart == 0:\n self.u = array([])\n return\n\n print(' Adding temperature...')\n\n radii = norm(self.pos - self.center, axis=1)\n \n if T < 0:\n print(' Setting Temperature according to c_s(r)**2, with c_s(r) = h * v_kep(r),')\n if not hasattr(self, 'v_kep'): self.v_kep = sqrt(Mkep * G / radii)\n c_s = self.h * self.v_kep\n if gamma <= 0: gamma_cs = 1\n else: gamma_cs = gamma\n if mu <= 0:\n if gamma == 1.4: mu_cs = 2.01 # Adiabatic molecular\n else: mu_cs = 1.005 # Adiabatic/Isothermal atomic\n\n print(' using gamma = {:.3f}, and mu = {:.3f}...'.format(gamma_cs, mu_cs)) \n \n T = c_s**2 * mu_cs * mP / (gamma_cs * KB)\n\n else:\n T = full(self.npart, T * (radii / AU)**beta)\n\n\n T[T < Tmin] = Tmin \n \n # This following settings are still being tested if correct\n \n if mu <= 0:\n print(' Calculating mu used in code...')\n mu = full(self.npart, 0.5) # Ionized\n mu[T < 1.e4] = 1.005 # Atomic\n mu[T < 2.e3] = 2.01 # Molecular (Optional)\n\n if gamma <= 0:\n print(' Calculating gamma used in code...')\n gamma = full(self.npart, 5/3.) # Adiabatic monoatomic\n gamma[T < 2.e3] = 1.4\t # Adiabatic diatomic\n\n factor = KB / (mP * mu * (gamma - 1))\n \n if max(T) == min(T): print(\" Temperature: {} [K]\".format(T[0]))\n else:\n print(' Maximum temperature: {:.2f} [K]'.format(max(T)))\n print(' Minimum temperature: {:.2f} [K]'.format(min(T)))\n \n\n self.u = T * factor",
"def calculate_boltzmann_average(energy, temperature, kb=0.0019872041):\n beta = 1 / (kb * temperature)\n F = np.array(energy)\n Ptot = np.exp(-F * beta)\n P = Ptot / Ptot.sum()\n F_avg = (P * F).sum()\n return F_avg",
"def kA_func(self):\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_mix_ph(i1, T0=self.inl[0].T.val_SI)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2:\n T_i1 = T_o2 + 0.01\n if T_i1 <= T_o2:\n T_o2 = T_i1 - 0.01\n if T_i1 <= T_o2:\n T_o1 = T_i2 + 0.02\n if T_o1 <= T_i2:\n T_i2 = T_o1 - 0.02\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def kA_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n return i[0] * (o[2] - i[2]) + self.kA.val * td_log",
"def MZ(t, M0, T1):\n return M0 * (1.0 - 2 * np.exp(-t / T1))",
"def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial"
] | [
"0.70223296",
"0.6454653",
"0.6285607",
"0.6272474",
"0.62450045",
"0.6214381",
"0.6204918",
"0.6186438",
"0.6113547",
"0.60832334",
"0.6067471",
"0.60490257",
"0.6045893",
"0.6033196",
"0.599369",
"0.5920794",
"0.5913871",
"0.5907448",
"0.58919245",
"0.5879924",
"0.5873256",
"0.587249",
"0.58637595",
"0.5860357",
"0.58442307",
"0.5840971",
"0.579638",
"0.5789026",
"0.57867795",
"0.5781587"
] | 0.6998116 | 1 |
Run ONE full MCMC simulation from start_temp to end_temp. Be sure to save the best (lowestenergy) structure, so you can access it after. It is also a good idea to track certain variables during the simulation (temp, energy, and more). | def simulate(self):
#loop to perform additional steps until the current temperature is no longer greater than the ending_temperature
while self.current_T >= self.end_temp:
self.step(self.current_T)
#log various parameters that changed in the MCMCSampler object after a single step
self.temperature.append(self.current_T)
self.iteration.append(self.current_iteration)
self.energy.append(self.current_energy)
#return a pandas dataframe that will hold all of the information requested above
log_table = pd.DataFrame(list(zip(self.iteration, self.energy, self.temperature)), columns =['iteration', 'energy', 'temperature'])
return(log_table) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\n print(\"\\nBeginning simulation: current date and time {}\\n\".format(datetime.datetime.now()))\n\n # Initialise the particles, potential and energy array\n particles = np.random.rand(n_particles, 3) * L\n lj_pot = np.zeros((n_particles, n_particles))\n energy = np.zeros(n_steps + 1)\n\n # Calculate the initial energies and then do the MCMC iterations and *hopefully* converge\n particles, lj_pot, energy = initial_energy(particles, lj_pot, energy)\n particles, lj_pot, energy = mcmc(particles, lj_pot, energy)\n pressure = compute_pressure(particles)\n\n return particles, lj_pot, energy, pressure",
"def _step(self, temperature, mc_args):\n self._log(\"Current temperature {}K. Current chemical_potential:\"\n \" {} eV/atom\".format(\n int(temperature), mc_args[\"chem_potential\"]))\n\n thermo = []\n for i, sgc in enumerate(self._sgc_obj):\n self._log(\"Running MC for system {}\".format(i))\n sgc.T = temperature\n sgc.runMC(**mc_args)\n thermo.append(sgc.get_thermodynamic())\n return thermo",
"def run(self, temp_params={}):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_step_inp(temp_params, self.__locals):\r\n raise ValueError(\"run won't run, input's aren't valid.\")\r\n\r\n # continuous testing of functional inputs\r\n if self.testing_unit.testing_level > 0:\r\n for key, val in temp_params.items():\r\n if key in [\"population_function\", \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not [self.testing_unit.c_test_weighting_bias][[\"weighting_bias\"].index(key)](val):\r\n raise ValueError(\"Bad \" + key + \" input. See log or raise testing verbosity.\")\r\n\r\n # set the single run locals\r\n run_locals = copy.deepcopy(self.__locals)\r\n for param, value in temp_params.items():\r\n run_locals[param] = value\r\n\r\n # sets random seed\r\n if run_locals[\"seed\"] is None:\r\n np.random.seed()\r\n else:\r\n np.random.seed(run_locals[\"seed\"])\r\n\r\n # initialize all arrays\r\n # current state of particles\r\n particles = np.full((run_locals[\"population_size\"], 2, run_locals[\"point_count\"], len(run_locals[\"axes\"])), -1, dtype=float)\r\n # best fitness value achieved\r\n best_fitness = np.zeros((run_locals[\"population_size\"]))\r\n # state that each particle is when it found its best fitness value (old)\r\n # best_state = np.full((run_locals[\"population_size\"], run_locals[\"point_count\"], len(run_locals[\"axes\"])), -1, dtype=float)\r\n # initialize particles numbers\r\n for j in range(particles.shape[3]):\r\n particles[:, 0, :, j] = np.random.uniform(low=run_locals[\"axes\"][j][0], high=run_locals[\"axes\"][j][1],\r\n size=(particles.shape[0], particles.shape[2]))\r\n if not run_locals[\"starting_velocity_ranges\"]:\r\n particles[:, 1, :, j] = 0 # zero starting velocity\r\n else:\r\n raise NotImplementedError(\"Functional starting velocities aren't implemented yet\")\r\n\r\n # state that each particle is when it found its best fitness value\r\n best_state = np.copy(particles[:, 0])\r\n\r\n # this generally shouldn't be a program, can be removed after some use where this value error doesn't appear\r\n if self.testing_unit.testing_level > 1: # the rare in implementation testing\r\n for i in particles.flatten():\r\n if i == -1:\r\n raise ValueError(\"This program didn't properly initialize its particles array\")\r\n\r\n if run_locals[\"end_condition\"] == \"time_constraint\":\r\n start_time = time.time() # initial starting time\r\n last_update = time.time() # last time an update was printed (for verbosity > 1 only)\r\n\r\n while time.time() < start_time + run_locals[\"time_constraint\"]:\r\n if self.verbosity > 3:\r\n print(\"loop high verb: \")\r\n print(particles)\r\n print(\"----\")\r\n print(best_state)\r\n print(\"----\")\r\n print(best_fitness)\r\n elif self.verbosity > 1:\r\n if time.time() < last_update + GLOBAL_TIME_UPDATE_FREQUENCY:\r\n print(\"update\")\r\n last_update = time.time()\r\n\r\n # run the step function\r\n particles, best_state, best_fitness = self.step(particles, best_state, best_fitness, run_locals)\r\n\r\n # once done return the best state\r\n return best_state[np.argmax(best_fitness, axis=0)]\r\n elif run_locals[\"end_condition\"] == \"generations\":\r\n for _ in range(run_locals[\"generations\"]):\r\n if self.verbosity > 3:\r\n print(particles)\r\n print(best_state)\r\n print(best_fitness)\r\n elif self.verbosity > 1:\r\n print(\"update\")\r\n\r\n # run the step function\r\n particles, best_state, best_fitness = self.step(particles, best_state, best_fitness, run_locals)\r\n\r\n # once done return the best state\r\n return best_state[np.argmax(best_fitness, axis=0)]\r\n else:\r\n raise ValueError(\"End condition incorrectly set\")",
"def run(self):\n\n # If this was a tanh model or some such thing, we're already done.\n if self.is_phenom:\n return\n if self.is_complete:\n print(\"Already ran simulation!\")\n return\n\n # Need to generate radiation backgrounds first.\n if self.pf['radiative_transfer']:\n self.medium.field.run()\n self._f_Jc = self.medium.field._f_Jc\n self._f_Ji = self.medium.field._f_Ji\n self._f_Jlw = self.medium.field._f_Jlw\n else:\n self._f_Jc = lambda z: 0.0\n self._f_Ji = lambda z: 0.0\n self._f_Jlw = lambda z: 0.0\n\n # Start timer\n t1 = time.time()\n\n tf = self.medium.tf\n self.medium._insert_inits()\n\n pb = self.pb = ProgressBar(tf, use=self.pf['progress_bar'],\n name='gs-21cm')\n\n # Lists for data in general\n self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm, \\\n self.all_RC_igm, self.all_RC_cgm = \\\n self.medium.all_t, self.medium.all_z, self.medium.all_data_igm, \\\n self.medium.all_data_cgm, self.medium.all_RCs_igm, self.medium.all_RCs_cgm\n\n # Add zeros for Ja\n for element in self.all_data_igm:\n element['Ja'] = np.zeros(self.grid.dims)\n element['Jc'] = np.zeros(self.grid.dims)\n element['Ji'] = np.zeros(self.grid.dims)\n element['Jlw'] = np.zeros(self.grid.dims)\n\n # List for extrema-finding\n self.all_dTb = self._init_dTb()\n for t, z, data_igm, data_cgm, rc_igm, rc_cgm in self.step():\n\n # Occasionally the progress bar breaks if we're not careful\n if z < self.pf['final_redshift']:\n break\n if z < self.pf['kill_redshift']:\n break\n\n # Delaying the initialization prevents progressbar from being\n # interrupted by, e.g., PrintInfo calls\n if not pb.has_pb:\n pb.start()\n\n pb.update(t)\n\n # Save data\n self.all_z.append(z)\n self.all_t.append(t)\n self.all_dTb.append(data_igm['dTb'][0])\n self.all_data_igm.append(data_igm.copy())\n self.all_RC_igm.append(rc_igm.copy())\n\n if self.pf['include_cgm']:\n self.all_data_cgm.append(data_cgm.copy())\n self.all_RC_cgm.append(rc_cgm.copy())\n\n # Automatically find turning points\n if self.pf['track_extrema']:\n if self.track.is_stopping_point(self.all_z, self.all_dTb):\n break\n\n pb.finish()\n\n self.history_igm = _sort_history(self.all_data_igm, prefix='igm_',\n squeeze=True)\n\n if self.pf['include_cgm']:\n self.history_cgm = _sort_history(self.all_data_cgm, prefix='cgm_',\n squeeze=True)\n else:\n self.history_cgm = {}\n\n self.history = self.history_igm.copy()\n self.history.update(self.history_cgm)\n\n ##\n # In the future, could do this better by only calculating Ja at\n # the end, since it a passive quantity (unless we included its\n # very small heating).\n ##\n #if self.pf['secondary_lya']:\n # xe = lambda zz: np.interp(zz, self.history['z'][-1::-1],\n # self.history['igm_e'][-1::-1])\n # self.medium.field.run(xe=xe)\n # self._f_Ja = self.medium.field._f_Ja\n # #self._f_Jlw = self.medium.field._f_Jlw\n #\n # # Fix Ja in history\n\n self.history['dTb'] = self.history['igm_dTb']\n #self.history['dTb_bulk'] = self.history['igm_dTb_bulk']\n\n self.history['Ts'] = self.history['igm_Ts']\n self.history['Jc'] = self.history['igm_Jc']\n self.history['Ji'] = self.history['igm_Ji']\n self.history['Ja'] = self.history['igm_Jc'] + self.history['igm_Ji']\n self.history['Jlw'] = self.history['igm_Jlw']\n\n # Save rate coefficients [optional]\n if self.pf['save_rate_coefficients']:\n self.rates_igm = \\\n _sort_history(self.all_RC_igm, prefix='igm_', squeeze=True)\n self.rates_cgm = \\\n _sort_history(self.all_RC_cgm, prefix='cgm_', squeeze=True)\n\n self.history.update(self.rates_igm)\n self.history.update(self.rates_cgm)\n\n self.history['t'] = np.array(self.all_t)\n self.history['z'] = np.array(self.all_z)\n\n ##\n # Optional extra radio background\n ##\n Tr = np.zeros_like(self.history['z'])\n for popid, pop in enumerate(self.pops):\n if not pop.is_src_radio:\n continue\n\n z, E, flux = self.field.get_history(popid, flatten=True)\n\n E21cm = h_p * nu_0_mhz * 1e6 / erg_per_ev\n f21 = interp1d(E, flux, axis=1, bounds_error=False,\n fill_value=0.0, force_scipy=True)\n flux_21cm = f21(E21cm)\n\n Tr += np.interp(self.history['z'], z, flux_21cm) \\\n * E21cm * erg_per_ev * c**2 / k_B / 2. / (nu_0_mhz * 1e6)**2\n\n if not np.all(Tr == 0):\n assert self.medium.parcel_igm.grid.hydr.Tbg is None\n elif self.medium.parcel_igm.grid.hydr.Tbg is not None:\n Tr = self.medium.parcel_igm.grid.hydr.Tbg(self.history['z'])\n\n self.history['Tr'] = Tr\n\n # Correct the brightness temperature if there are non-CMB backgrounds\n if not np.all(Tr == 0):\n zall = self.history['z']\n n_H = self.medium.parcel_igm.grid.cosm.nH(zall)\n Ts = self.medium.parcel_igm.grid.hydr.Ts(zall,\n self.history['igm_Tk'], self.history['Ja'],\n self.history['igm_h_2'], self.history['igm_e'] * n_H, Tr)\n\n if self.pf['floor_Ts']:\n Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=zall))\n\n # Compute volume-averaged ionized fraction\n xavg = self.history['cgm_h_2'] \\\n + (1. - self.history['cgm_h_2']) * self.history['igm_h_2']\n\n # Derive brightness temperature\n dTb = self.medium.parcel_igm.grid.hydr.get_21cm_dTb(zall, Ts,\n xavg=xavg, Tr=Tr)\n\n self.history['dTb_no_radio'] = self.history['dTb'].copy()\n self.history['dTb'] = dTb\n\n #self.history['dTb_bulk'] = \\\n # self.medium.parcel_igm.grid.hydr.dTb(zall, 0.0, Ts, Tr)\n\n t2 = time.time()\n\n self.timer = t2 - t1\n\n self.is_complete = True",
"def step(self, particles, best_state, best_fitness, run_locals):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_step_inp(particles,\r\n best_state,\r\n best_fitness,\r\n run_locals):\r\n raise ValueError(\"step won't run, input's aren't valid.\")\r\n # apply the fitness function to get this generations fitness values\r\n fitness = np.empty((particles.shape[0]))\r\n #fitness = np.apply_along_axis(run_locals[\"fitness_function\"], 0, particles[:, 0, :, :]) # hopefully works\r\n for i in range(particles.shape[0]):\r\n fitness[i] = run_locals[\"fitness_function\"](particles[i, 0])\r\n\r\n # find any personal improvements\r\n better = best_fitness < fitness\r\n # set them\r\n best_fitness[better] = fitness[better]\r\n # set their states\r\n best_state[better] = particles[better, 0]\r\n\r\n # find highest of group\r\n best_of_group = np.argmax(best_fitness, axis=0)\r\n\r\n if self.verbosity > 6: # some random high verbosity outputs that were once used for debugging, might give ideas\r\n print(\"step high verb: \")\r\n print(particles[0])\r\n print(particles[:, 1].shape)\r\n print(best_state.shape)\r\n print(np.repeat(best_state[best_of_group][np.newaxis, :], particles[:, 1].shape[0], axis=0).shape)\r\n\r\n # run calculation for the velocity calculation\r\n # Maurice Clerc. Standard Particle Swarm Optimisation. 2012. hal-00764996\r\n particles[:, 1] = (run_locals[\"PSO_VELOCITY_WEIGHT\"] * particles[:, 1] +\r\n run_locals[\"PSO_INDIVIDUAL_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state - particles[:, 0]) +\r\n run_locals[\"PSO_GROUP_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state[best_of_group] - particles[:, 0]))\r\n\r\n # run calculation for point calculation\r\n particles[:, 0] = particles[:, 0] + particles[:, 1]\r\n #if True and ((particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0]).any() or \\\r\n # (particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1]).any()):\r\n #print(particles[:, 0].shape)\r\n #mask = np.logical_or(particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0],\r\n # particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1])\r\n #print(particles.shape)\r\n #print(np.arange(particles.shape[0]).shape)\r\n #print(np.arange(particles.shape[0])[mask])\r\n #print(particles[np.argmax(mask), 1])\r\n # clip the particles to be within the axes\r\n particles[:, 0] = np.clip(particles[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 1])\r\n #if self.globi < 10:\r\n # self.glob[self.globi] = particles[0, 0, 0, 0]\r\n # self.guub[self.globi] = particles[0, 1, 0, 0]\r\n # self.glub[self.globi] = best_state[best_of_group][0, 0]\r\n # self.globi += 1\r\n #else:\r\n #print(self.glob[:10])\r\n #print(self.guub[:10])\r\n #print(self.glub[:10])\r\n #raise ValueError(self.glob)\r\n\r\n return particles, best_state, best_fitness",
"def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())",
"def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n self.print_averages()\n return d_El, energy, var",
"def simulate(self):\r\n\r\n for index in tqdm(range(self.steps)):\r\n\r\n S = 0.1 - 0.1 / self.steps * (index + 1)\r\n T = 0.5 / (np.log(2 + 0.2 * index))\r\n\r\n self.move(T, S)\r\n self.t_change.append(T)\r\n self.s_change.append(S)\r\n tot = calculate_total_energy(self.current_config)\r\n self.energies.append(tot)",
"def simulation(self):\n\n t_max = 3\n if self.meas_selected_series == 1:\n particle_density_number = self.particle_density_number\n else: # series 2:\n factors = 4/np.array([4, 6, 8, 10, 12, 14, 16, 18])\n factor = factors[(self.meas_selected_number-1)]\n particle_density_number = self.particle_density_number * factor\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n size, time2 = toolbox_2.simulate_extinction(self.particle_size_number * 1e-9,\n p_i, p_f,\n particle_density_number * 1e10,\n t_max, self.saturation_percentage / 100)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n # short print:\n # print(\"M:\", self.meas_selected_number, \", \", round((p_i - p_f) / 1000, 3), \"kPa\", \", \", self.saturation_percentage, \"%\", \", \", round(smallest_growing_particle * 1e9, 2), \"nm\", \", \", sep=\"\")\n\n if smallest_growing_particle > 0:\n print(\"M:\", self.meas_selected_number, \" S:\", self.meas_selected_series, \" D:\", self.selected_data,\n \", smallest growing particle for pressure change (\", round(p_i / 1000, 2), \"-\",\n round(p_f / 1000, 2), \" = \", round((p_i - p_f) / 1000, 2), \"kPa) in \", self.saturation_percentage,\n \"% humidity is \", round(smallest_growing_particle * 1e9, 2), \"nm\", sep=\"\")\n else:\n print(\"M:\", self.meas_selected_number, \" S:\", self.meas_selected_series, \" D:\", self.selected_data,\n \", no particle will grow in \", \"(\", round(p_i / 1000, 2), \"-\", round(p_f / 1000, 2), \" = \",\n round((p_i - p_f) / 1000, 2), \"kPa)\", \" pressure change and \", self.saturation_percentage,\n \"% humidity \", sep=\"\")\n\n self.curve_simulate.setData(time2+0.05, size)\n self.simulate_bool = False",
"def run_experiment(self):\n\n start_time = time.time()\n\n strategy_instance = None\n if (self.strategy == 'ccegp'):\n strategy_instance = CCEGPStrategy(self)\n else:\n print('strategy unknown:', self.strategy)\n sys.exit(1)\n\n # For each run...\n for curr_run in range(1, self.num_runs_per_experiment + 1):\n\n # Update log\n self.curr_run = curr_run\n print('\\nRun', curr_run)\n self.log_file.write('\\nRun ' + str(curr_run) + '\\n')\n\n # Execute one run and get best values.\n attacker_run_high_fitness, attacker_run_best_world_data, attacker_run_best_solution, \\\n defender_run_high_fitness, defender_run_best_solution, attacker_dot, defender_dot \\\n = strategy_instance.execute_one_run()\n\n print('\\nBest attacker tree of run:\\n' + attacker_run_best_solution)\n if (self.print_dots):\n print('\\nBest attacker dot of run:\\n' + str(attacker_dot))\n print('\\nBest defender tree of run:\\n' + defender_run_best_solution)\n if (self.print_dots):\n print('\\nBest defender dot of run:\\n' + str(defender_dot))\n\n # If best of run is best overall, update appropriate values\n if (self.strategy != 'ccegp'):\n if (attacker_run_high_fitness > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = attacker_run_high_fitness\n print('New exp Attacker high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n # If Competitive Co-evolution, add fitnesses (use Attacker to store most data)\n else:\n if ((attacker_run_high_fitness + defender_run_high_fitness) > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = (attacker_run_high_fitness + defender_run_high_fitness)\n print('New exp Attacker+Defender high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.defender_exp_best_solution = defender_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n self.defender_exp_best_dot = defender_dot\n\n # Dump best world to file\n the_file = open(self.high_score_world_file_path, 'w')\n for line in self.attacker_exp_best_world_data:\n the_file.write(line)\n the_file.close()\n\n # Dump best Attacker solution (text) to file\n the_file = open(self.attacker_solution_file_path, 'w')\n the_file.write(self.attacker_exp_best_solution)\n the_file.close()\n\n # Dump best Defender solution (text) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_file_path, 'w')\n the_file.write(self.defender_exp_best_solution)\n the_file.close()\n\n # Dump best Attacker solution (dot) to file\n the_file = open(self.attacker_solution_dot_path, 'w')\n the_file.write(str(self.attacker_exp_best_dot))\n the_file.close()\n\n # Dump best Defender solution (dot) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_dot_path, 'w')\n the_file.write(str(self.defender_exp_best_dot))\n the_file.close()\n\n # Dump and display best Attacker solution\n if (self.render_solutions):\n self.attacker_exp_best_dot.render(filename=self.attacker_solution_png_path,\n view=self.attacker_open_png,\n format='png')\n\n # Dump and display best Defender solution\n if (self.render_solutions and self.strategy == 'ccegp'):\n self.defender_exp_best_dot.render(filename=self.defender_solution_png_path,\n view=self.defender_open_png,\n format='png')\n\n # Close out the log file\n if (not(self.log_file is None)):\n self.log_file.close()\n\n print(time.time() - start_time, 'seconds')",
"def run_one_body_sampling(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n density_adding = np.zeros(41)\n\n # Run Metropolis while finding one body density\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n density = self.one_body_density(positions)\n density_adding += density\n # self.sam.sample_values(positions)\n\n # self.sam.average_values(self.mc_cycles)\n # self.print_averages()\n\n return density_adding",
"def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)",
"def MC_step(particles,chosen_one,dr,R_cut,v):\n \n #%%Calculate the difference in energy\n other_particles = np.delete(particles,chosen_one,0)\n old_particle = particles[chosen_one]\n new_particle = copy.copy(old_particle) # otherwise I move it anyway\n new_particle[:MC_par['dim']] = (old_particle[:MC_par['dim']] + dr) % MC_par['L_box']\n #%% Apply periodic Boundary conditions and exclude particles outside R_cut\n # Particles at a distance > R_cut don't contribute to the energy\n dE = 0\n #%%\n if(MC_par['charge']):\n for charge_prod in ['same','opp']:\n #%%\n sel_other_particles = other_particles[other_particles[:,-1] * old_particle[-1] == word2sign[charge_prod]]\n old_distances = calc_distances(sel_other_particles[:,:MC_par['dim']],old_particle[:MC_par['dim']],R_cut) \n new_distances = calc_distances(sel_other_particles[:,:MC_par['dim']],new_particle[:MC_par['dim']],R_cut)\n old_histo,bins = np.histogram(old_distances, bins = v_bin)\n new_histo,bins = np.histogram(new_distances, bins = v_bin)\n dE += np.sum( (new_histo-old_histo) * v[charge_prod] )\n #%%\n else:\n old_distances = calc_distances(other_particles,old_particle,R_cut) \n new_distances = calc_distances(other_particles,new_particle,R_cut)\n old_histo,bins = np.histogram(old_distances, bins = v_bin)\n new_histo,bins = np.histogram(new_distances, bins = v_bin)\n dE += np.sum((new_histo-old_histo)*v['unsigned'])\n #%%dE = np.sum(potential(new_distances)) - np.sum(potential(old_distances))\n #Accept or decline the movement\n acc_prob = np.min([1,np.exp(-dE)])\n if np.random.rand() < acc_prob:\n # perform the movement\n particles[chosen_one] = new_particle\n move = 1\n else:\n # Decline\n move = 0\n dE = 0\n\n return dE,move",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def _start_eplus_simulation(self):\n if not self.model:\n self.exit('No model specified.')\n if not self.weather:\n self.exit('No weather specified.')\n model_path = self.model\n if model_path[0] == '~':\n model_path = os.path.expanduser(model_path)\n if model_path[0] != '/':\n model_path = os.path.join(self.cwd, model_path)\n weather_path = self.weather\n if weather_path[0] == '~':\n weather_path = os.path.expanduser(weather_path)\n if weather_path[0] != '/':\n weather_path = os.path.join(self.cwd, weather_path)\n model_dir = os.path.dirname(model_path)\n bcvtb_dir = self.bcvtb_home\n if bcvtb_dir[0] == '~':\n bcvtb_dir = os.path.expanduser(bcvtb_dir)\n if bcvtb_dir[0] != '/':\n bcvtb_dir = os.path.join(self.cwd, bcvtb_dir)\n _log.debug('Working in %r', model_dir)\n\n self._write_port_file(os.path.join(model_dir, 'socket.cfg'))\n self._write_variable_file(os.path.join(model_dir, 'variables.cfg'))\n\n if self.version >= 8.4:\n cmd_str = \"cd %s; export BCVTB_HOME=%s; energyplus -w %s -r %s\" % (\n model_dir, bcvtb_dir, weather_path, model_path)\n else:\n cmd_str = \"export BCVTB_HOME=%s; runenergyplus %s %s\" % (bcvtb_dir, model_path, weather_path)\n _log.debug('Running: %s', cmd_str)\n f = open(model_path, 'r')\n lines = f.readlines()\n f.close()\n endmonth = 0\n if self.currentday + self.length > self.maxday:\n endday = self.currentday + self.length - self.maxday\n endmonth = self.currentmonth + 1\n else:\n endday = self.currentday + self.length\n endmonth = self.currentmonth\n for i in range(len(lines)):\n if lines[i].lower().find('runperiod,') != -1:\n if not self.real_time_flag:\n lines[i + 2] = ' ' + str(self.startmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(self.startday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(self.endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(self.endday) + ', !- End Day of Month' + '\\n'\n else:\n lines[i + 2] = ' ' + str(self.currentmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(\n self.currentday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(endday) + ', !- End Day of Month' + '\\n'\n for i in range(len(lines)):\n if lines[i].lower().find('timestep,') != -1 and lines[i].lower().find('update frequency') == -1:\n if lines[i].lower().find(';') != -1:\n lines[i] = ' Timestep,' + str(self.timestep) + ';' + '\\n'\n else:\n lines[i + 1] = ' ' + str(self.timestep) + ';' + '\\n'\n if self.customizedOutT > 0:\n lines.append('ExternalInterface:Actuator,') + '\\n'\n lines.append(' outT, !- Name') + '\\n'\n lines.append(' Environment, !- Actuated Component Unique Name') + '\\n'\n lines.append(' Weather Data, !- Actuated Component Type') + '\\n'\n lines.append(' Outdoor Dry Bulb; !- Actuated Component Control Type') + '\\n'\n f = open(model_path, 'w')\n\n for i in range(len(lines)):\n f.writelines(lines[i])\n f.close()\n self.simulation = subprocess.Popen(cmd_str, shell=True)",
"def run_simulation(self):\n\n # Create agents for simulation\n self.spawn_agents(self.num_agents)\n\n if self.force_personalities != None:\n self.force_personalities(self)\n\n if self.visualizer == True:\n V.Visualizer.createVisualizer(types=self.visualizerOptions, showAtEnd=True)\n\n TM.TimeManager.createManager()\n for x in range (self.time_to_run):\n for agent in self.agents:\n agent.take_turn()\n while self.agents_to_settle:\n self.agents_to_settle.pop().settle_reposts()\n if self.data_collector != None:\n self.data_collector.collector_turn(x, agent)\n if self.visualizer == True:\n self.generate_visualizations(x)\n TM.TimeManager.sharedManager.increaseTime()\n if self.data_collector != None:\n self.data_collector.collector_round(x)\n self.generate_statistics(x)\n\n if self.visualizer == True:\n V.Visualizer.sharedVisualizer.updateEverything()\n\n if self.data_collector != None:\n self.data_collector.finalize()",
"def start_solving(self):\n self.mesh.output_vtk_mesh(self.model_name + \"0\", \n [self.current_pressure, \n self.mesh.get_cell_domain_all()], \n [\"pressure\", \"domain\"])\n\n self.time_step_output(0., 0)\n\n for time_step in range(1,self.number_of_time_steps+1):\n current_time = time_step*self.delta_t\n print(time_step)\n\n self.update_pressure()\n self.find_upwinding_direction()\n self.update_concentration()\n \n if time_step%self.output_frequency == 0:\n self.mesh.output_vtk_mesh(self.model_name+str(time_step), \n [self.current_pressure,\n self.current_concentration, \n self.mesh.get_cell_domain_all()],\n [\"pressure\", \"concentration\" , \"domain\"])\n\n self.time_step_output(current_time, time_step)",
"def run(self, obs_data, eps_init, eps_last, eps_decay, n_particles, ess_min=0.5, logger=sys.stdout, info=False, rng=np.random):\n\n all_ps = []\n all_log_weights = []\n all_eps = []\n all_log_ess = []\n all_n_sims = []\n\n logger = open(os.devnull, 'w') if logger is None else logger\n\n # save some log values for reuse\n log_ess_min = np.log(ess_min)\n log_n_particles = np.log(n_particles)\n\n # sample initial population\n iter = 0\n eps = eps_init\n ps, n_sims = self.sample_initial_population(obs_data, n_particles, eps, logger, rng)\n log_weights = np.full(n_particles, -log_n_particles)\n\n if info:\n all_ps.append(ps)\n all_log_weights.append(log_weights)\n all_eps.append(eps)\n all_log_ess.append(0.0)\n all_n_sims.append(n_sims)\n\n logger.write('iter = {0}, eps = {1}, ess (%) = {2}, sims = {3}\\n'.format(iter, eps, 1.0, n_sims))\n\n while eps > eps_last:\n\n # sample next population\n iter += 1\n eps *= eps_decay\n ps, log_weights, n_new_sims = self.sample_next_population(ps, log_weights, obs_data, eps, logger, rng)\n n_sims += n_new_sims\n\n # calculate effective sample size\n log_ess = -scipy.misc.logsumexp(2.0 * log_weights) - log_n_particles\n\n # if population is degenerate, resample particles\n if log_ess < log_ess_min:\n ps = self.resample_population(ps, log_weights, rng)\n log_weights = np.full(n_particles, -log_n_particles)\n\n if info:\n all_ps.append(ps)\n all_log_weights.append(log_weights)\n all_eps.append(eps)\n all_log_ess.append(log_ess)\n all_n_sims.append(n_sims)\n\n logger.write('iter = {0}, eps = {1}, ess (%) = {2}, sims = {3}\\n'.format(iter, eps, np.exp(log_ess), n_sims))\n\n if info:\n return all_ps, all_log_weights, all_eps, all_log_ess, all_n_sims\n else:\n return ps, log_weights",
"def run():\n return estimate(0,1,0)",
"def run_sim(mass, start, stop, sampling_rate):\n axion = Axion(mass=mass)\n return axion.do_fast_axion_sim(start,\n stop,\n sampling_rate)",
"def step(self, step=None):\n\n self.qtime = -time.time()\n info(\"\\n Instanton optimization STEP %d\" % step, verbosity.low)\n\n if step == 0:\n info(\" @GEOP: Initializing INSTANTON\", verbosity.low)\n\n if self.beads.nbeads == 1:\n info(\" @GEOP: Classical TS search\", verbosity.low)\n if self.hessian_init == 'true':\n get_hessian(self.hessian, self.gm, self.beads.q)\n else:\n if ((self.beads.q - self.beads.q[0]) == 0).all(): # If the coordinates in all the imaginary time slices are the same\n info(\" @GEOP: We stretch the initial geometry with an 'amplitud' of %4.2f\" % self.delta, verbosity.low)\n imvector = get_imvector(self.initial_hessian, self.beads.m3[0].flatten())\n for i in range(self.beads.nbeads):\n self.beads.q[i, :] += self.delta * np.cos(i * np.pi / float(self.beads.nbeads - 1)) * imvector[:]\n if self.hessian_init != 'true':\n info(\" @GEOP: Hessian_init isn't true but we have stretched the polymer so we are going to compute the initial hessian anyway.\", verbosity.low)\n self.hessian_init = 'true'\n else:\n info(\" @GEOP: Starting from the provided geometry in the extended phase space\", verbosity.low)\n if not (self.initial_hessian is None):\n raise ValueError(\" You have to provided a hessian with size (3xnatoms)^2 but also geometry in the extended phase space (nbeads>1). Please check the inputs\\n\")\n\n if self.hessian_init == 'true':\n info(\" @GEOP: We are computing the initial hessian\", verbosity.low)\n get_hessian(self.hessian, self.gm, self.beads.q)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f\n\n if type(self.im.f) == type(None):\n self.im(self.beads.q, ret=False) # Init instanton mapper\n\n if (self.old_x == np.zeros((self.beads.nbeads, 3 * self.beads.natoms), float)).all():\n self.old_x[:] = self.beads.q\n if self.exit:\n softexit.trigger(\"Geometry optimization converged. Exiting simulation\")\n\n if len(self.fixatoms) > 0:\n for dqb in self.old_f:\n dqb[self.fixatoms * 3] = 0.0\n dqb[self.fixatoms * 3 + 1] = 0.0\n dqb[self.fixatoms * 3 + 2] = 0.0\n\n # Do one step. Update hessian for the new position. Update the position and force inside the mapper.\n Instanton(self.old_x, self.old_f, self.im.f, self.hessian, self.hessian_update, self.hessian_asr, self.im, self.gm, self.big_step, self.opt, self.mode)\n\n # Update positions and forces\n self.beads.q = self.gm.dbeads.q\n self.forces.transfer_forces(self.gm.dforces) # This forces the update of the forces\n\n # Print current instanton geometry and hessian\n if (self.save > 0 and np.mod(step, self.save) == 0) or self.exit:\n print_instanton_geo(self.prefix, step, self.im.dbeads.nbeads, self.im.dbeads.natoms, self.im.dbeads.names,\n self.im.dbeads.q, self.old_u, self.cell, self.energy_shift)\n print_instanton_hess(self.prefix, step, self.hessian)\n\n # Exit simulation step\n d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x)))\n self.exit = self.exitstep(self.forces.pot, self.old_u.sum(), d_x_max, self.exit, step)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f",
"def draw_samples(self):\n if self._integrator == 'HMC': \n self.momentum = torch.distributions.Normal(torch.zeros_like(self.parameters), torch.ones_like(self.parameters)).sample()\n start = time.time()\n if (self._integrator == 'RMHMC'): #torch has trouble differentiating through repeated eigenvalues\n self.jitters = self.jitter * torch.rand(self.parameters.shape[0])\n self.jitters[0] = 0.\n self.jitters[1] = 0.\n self.potential_ = self.get_potential()\n self.hamiltonian_ = self.get_hamiltonian()\n self.momentum = self.resample_momenta(init=True)\n self.momenta.append(self.momentum)\n if self.shadow:\n self.shadow_ = self.get_shadow()\n finished = 0\n counter = 0\n if self.verbose:\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n else:\n# for _ in tqdm(range(self.n_samples)):\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n counter += 1\n if counter > self.n_samples * 0.05:\n counter = 0\n print('('+str(int((sample+1)/self.n_samples*100))+'% complete)', int(self.accepted),'of', int(self.accepted + self.rejected), 'accepted', '('+str(int((self.accepted)/(self.accepted+self.rejected)*100))+'%)')\n total = float(self.accepted + self.rejected)\n end = time.time()\n if total >= self.n_samples:\n self.completed = True\n self.elapsed += end-start\n print('\\n', int(self.accepted), ' of ', int(self.accepted + self.rejected), ' samples accepted in', self.elapsed, ' seconds (', 100 * self.accepted/total,'%).')\n return None\n else:\n self.degenerates +=1\n self.find_mode()\n self.parameters = params_init + torch.randn(self.parameters.shape[0])/100\n self.reinitiate_samples()\n self.resample_momenta(init = True)\n return None",
"def simul_anneal(self, X, y, temperature, steps, cooldown='exponential', mean=0.0, stddev=1.0, quit=1e-4):\n cooldown = self.cooldown_method[cooldown]\n for i in range(steps):\n temperature = cooldown(temperature, i)\n before_energy = self._energy(X, y)\n move = self._to_neighbor(mean, stddev)\n after_energy = self._energy(X, y)\n dE = after_energy - before_energy\n if 0 < dE < quit:\n return\n if dE < 0.0 or np.exp(-dE/temperature) > np.random.rand():\n # accept the new state\n pass\n else:\n self.weight -= move",
"def one_cycle(self):\n self.nstep += 1\n new_global_min = False\n\n accept, minres = self._monte_carlo_step()\n\n if accept:\n self.energy = minres.fun\n self.x = np.copy(minres.x)\n new_global_min = self.storage.update(minres)\n\n # print some information\n if self.disp:\n self.print_report(minres.fun, accept)\n if new_global_min:\n print(\"found new global minimum on step %d with function\"\n \" value %g\" % (self.nstep, self.energy))\n\n # save some variables as BasinHoppingRunner attributes\n self.xtrial = minres.x\n self.energy_trial = minres.fun\n self.accept = accept\n\n return new_global_min",
"def run(self):\n \n # shortcut for self\n s = self\n \n # shortcut to existing heating fuel\n fuel = s.exist_fuel\n\n # holds summary measures for the heat pump project (e.g. seasonal COP,\n # internal rate of return). Fill out first item: secondary fuel info.\n s.summary = {'fuel_unit': fuel.unit, 'fuel_desc': fuel.desc}\n \n # Create the home energy simulation object\n sim = HomeHeatModel(\n city_id=s.city_id,\n hp_model_id=s.hp_model_id,\n exist_heat_fuel_id=s.exist_heat_fuel_id,\n exist_heat_effic=s.exist_heat_effic,\n exist_kwh_per_mmbtu=s.exist_kwh_per_mmbtu, \n co2_lbs_per_kwh=s.co2_lbs_per_kwh,\n low_temp_cutoff=s.low_temp_cutoff,\n off_months=s.off_months_chks,\n garage_stall_count=s.garage_stall_count,\n garage_heated_by_hp=s.garage_heated_by_hp,\n bldg_floor_area=s.bldg_floor_area,\n indoor_heat_setpoint=s.indoor_heat_setpoint,\n insul_level=s.insul_level,\n pct_exposed_to_hp=s.pct_exposed_to_hp,\n doors_open_to_adjacent=s.doors_open_to_adjacent,\n bedroom_temp_tolerance=s.bedroom_temp_tolerance, \n )\n\n # If other end uses use the heating fuel, make an estimate of their annual\n # consumption of that fuel. This figure is expressed in the physical unit\n # for the fuel type, e.g. gallons of oil. Save this as an object attribute\n # so it is accessible in other routines. See Evernote notes on values (AkWarm\n # for DHW and Michael Bluejay for Drying and Cooking).\n is_electric = (s.exist_heat_fuel_id == constants.ELECTRIC_ID) # True if Electric\n s.fuel_other_uses = s.includes_dhw * 4.23e6 / fuel.dhw_effic\n s.fuel_other_uses += s.includes_dryer * (0.86e6 if is_electric else 2.15e6)\n s.fuel_other_uses += s.includes_cooking * (0.64e6 if is_electric else 0.8e6)\n s.fuel_other_uses *= s.occupant_count / fuel.btus\n\n # For elecric heat we also need to account for lights and other applicances not\n # itemized above.\n if is_electric:\n # Use the AkWarm Medium Lights/Appliances formula but take 25% off\n # due to efficiency improvements since then.\n s.lights_other_elec = 2086. + 1.20 * s.bldg_floor_area # kWh in the year\n else:\n s.lights_other_elec = 0.0\n \n # Match the existing space heating use if it is provided. Do so by using\n # the UA true up factor.\n if not is_null(s.exist_fuel_use):\n \n # Remove the energy use from the other end uses that use the fuel, unless\n # this is electric heat and the user indicated that the entered value is\n # just space heating.\n if is_electric and s.elec_uses=='space':\n # user explicitly indicated that the entered annual usage value is\n # just space heating.\n space_fuel_use = s.exist_fuel_use\n else:\n space_fuel_use = s.exist_fuel_use - s.fuel_other_uses - s.lights_other_elec\n\n sim.no_heat_pump_use = True\n sim.calculate()\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use1 = sim.annual_results().secondary_kwh\n else:\n fuel_use1 = sim.annual_results().secondary_fuel_units\n \n # scale the UA linearly to attempt to match the target fuel use\n ua_true_up = space_fuel_use / fuel_use1\n sim.ua_true_up = ua_true_up\n sim.calculate()\n\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use2 = sim.annual_results().secondary_kwh\n else:\n fuel_use2 = sim.annual_results().secondary_fuel_units\n \n # In case it wasn't linear, inter/extrapolate to the final ua_true_up\n slope = (fuel_use2 - fuel_use1)/(ua_true_up - 1.0)\n # print(space_fuel_use, fuel_use1, fuel_use2, ua_true_up)\n ua_true_up = 1.0 + (space_fuel_use - fuel_use1) / slope\n # print(ua_true_up)\n\n else:\n ua_true_up = 1.0\n \n # Set the UA true up value into the model and also save it as\n # an attribute of this object so it can be observed.\n sim.ua_true_up = ua_true_up\n s.ua_true_up = ua_true_up\n \n # Run the base case with no heat pump and record energy results.\n # This model only models the space heating end use.\n sim.no_heat_pump_use = True\n sim.calculate()\n s.df_mo_en_base = sim.monthly_results()\n s.ann_en_base = sim.annual_results()\n # print(s.ann_en_base.secondary_kwh)\n \n # Run the model with the heat pump and record energy results\n sim.no_heat_pump_use = False\n sim.calculate()\n s.df_mo_en_hp = sim.monthly_results()\n s.ann_en_hp = sim.annual_results()\n s.df_hourly = sim.df_hourly\n\n # record design heat load\n s.summary['design_heat_load'], s.summary['design_heat_temp'] = sim.design_heat_load()\n \n # Calculate some summary measures\n s.summary['cop'] = s.ann_en_hp.cop\n s.summary['hp_max_capacity_5F'] = sim.hp_max_capacity_5F()\n s.summary['max_hp_reached'] = sim.max_hp_reached\n \n # CO2 savings\n s.summary['co2_lbs_saved'] = s.ann_en_base.co2_lbs - s.ann_en_hp.co2_lbs\n s.summary['co2_driving_miles_saved'] = convert_co2_to_miles_driven(s.summary['co2_lbs_saved'])\n s.summary['hp_load_frac'] = s.ann_en_hp.hp_load_mmbtu / (s.ann_en_hp.hp_load_mmbtu + s.ann_en_hp.secondary_load_mmbtu)\n \n # Create DataFrames that hold monthly energy cost amounts\n # Results are stored as object attributes.\n self.calc_monthly_cash()\n \n # Create a multi-year Cash Flow DataFrame and summary economic measures.\n # Results are stored as object attributes.\n self.calc_cash_flow()\n\n # Save a gzipped pickle of this object using Unix time as the file name.\n # make a directory to hold the files\n save_dir = 'hpcalc_runs'\n Path(save_dir).mkdir(exist_ok=True)\n fname = f'{time.time():.2f}.pkl.gz'\n s.file_name = fname\n pickle.dump(self, gzip.open(f'{save_dir}/{fname}', 'wb'))",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._estimate_edens_()\n self._compute_()\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n if self.verbose: print(\"\\n Processing Doppler.\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()",
"def part1():\n\tmoons = read_input()\n\tvelocities = [[0, 0, 0] for moon in moons]\n\ttime = 0\n\tend_time = 1000\n\twhile time < end_time:\n\t\tgravities = [[0, 0, 0] for moon in moons]\n\t\tfor i, moon in enumerate(moons):\n\t\t\tfor other in moons:\n\t\t\t\tfor axis in range(len(moon)):\n\t\t\t\t\tif moon[axis] < other[axis]:\n\t\t\t\t\t\tgravities[i][axis] += 1\n\t\t\t\t\telif moon[axis] > other[axis]:\n\t\t\t\t\t\tgravities[i][axis] -= 1\n\t\tfor i, moon in enumerate(moons):\n\t\t\tfor axis in range(3):\n\t\t\t\tvelocities[i][axis] += gravities[i][axis]\n\t\t\t\tmoon[axis] += velocities[i][axis]\n\t\ttime += 1\n\tenergy = 0\n\tfor i, moon in enumerate(moons):\n\t\tpotential = sum([abs(d) for d in moon])\n\t\tkinetic = sum(abs(d) for d in velocities[i])\n\t\tenergy += potential * kinetic\n\tprint(energy)",
"def run(self, seed='old'):\n if seed == 'old':\n founds, number_found = self.find_in_base()\n param = number_found - self.M_N\n\n if param < 0:\n print \"We have only {0} usable chromosomes in the database, per {1} required.\".format(number_found, self.M_N)\n l, __ = self.evolve_partials(abs(param))\n combined = founds+[l[i].x for i in range(len(l))]\n\n elif param > 0:\n combined = random.sample(founds, self.M_N)\n\n else:\n combined = founds\n\n if seed == 'fresh':\n print \"Evolving fresh chromosomes...\"\n l, __ = self.evolve_partials(self.M_N)\n combined = [l[i].x for i in range(len(l))]\n\n if len(combined) != self.M_N: raise ValueError\n print \"\\nLaunching Multi-Objective evolution...\"\n isl, prob = self.mlt_obj_evo(combined)\n self.writing_finals(isl, prob)",
"def test_fixture_single_run_warm(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"run\", \"-v\", \"-w\", \"-d\", config_dir,\n \"energy_central\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(output.stdout.decode(\"utf-8\"))\n print(output.stderr.decode(\"utf-8\"), file=sys.stderr)\n assert \"Running energy_central\" in str(output.stderr)\n assert \"Model run 'energy_central' complete\" in str(output.stdout)"
] | [
"0.6719989",
"0.6187217",
"0.61104983",
"0.6038098",
"0.5963481",
"0.5931287",
"0.590561",
"0.58786446",
"0.5835071",
"0.5818203",
"0.5817355",
"0.5707744",
"0.56497705",
"0.5628978",
"0.56262344",
"0.5613195",
"0.559241",
"0.5558531",
"0.5554702",
"0.5552925",
"0.5549502",
"0.55486166",
"0.5527644",
"0.55253243",
"0.5509791",
"0.54543346",
"0.54209256",
"0.5413877",
"0.54137135",
"0.54071075"
] | 0.6652485 | 1 |
Calls the patient update endpoint, and returns a summary. If the update struct is None, then the method returns the list of errors that happened while trying to update this Patient data. | def post_to_endpoint_with_patient_struct(
data_set_id: int,
json_data: json,
update_path: str,
summary_schema: marshmallow.Schema = None,
commit: bool = False,
return_patient_struct: Union[PatientStruct, PcorPatientStruct] = None
) -> Tuple[Optional[object], List[str], Union[PatientStruct, PcorPatientStruct]]:
#from django.conf import settings
logger = prefect.context.get("logger")
logger.info(f"Trying to post to https://app-9097.on-aptible.com")
err, content = requests_post("https://app-9097.on-aptible.com" + update_path, json_dict={
'json_data': json_data,
'commit': commit,
'data_set_id': data_set_id,
'api_key': "pBSBtzsb3OqTx57W"
})
if err:
return None, [err], return_patient_struct
response = json.loads(content)
success = response['success']
if not success:
return None, response['errors'], return_patient_struct
errors = None
summary = summary_schema.load(response['update_summary'])
if errors:
errors = ["%s: %s" % (key, err) for key, err in errors.items()]
return None, errors, return_patient_struct
return summary, [], return_patient_struct | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_to_endpoint(data_set_id: int,\n json_data: json,\n update_path: str,\n summary_schema: marshmallow.Schema = None,\n commit: bool = False) -> Tuple[Optional[object], List[str]]:\n result, errors, _ = post_to_endpoint_with_patient_struct(data_set_id,\n json_data,\n update_path,\n summary_schema,\n commit)\n return result, errors",
"def update(self):\n LOG.debug(\"Checking ADT Pulse cloud service for updates\")\n response = self.query(ADT_SUMMARY_URI, method='GET')\n if response.ok:\n self._update_sites(response.text)\n else:\n LOG.info(f\"Error returned from ADT Pulse service check: {response.status_code}\")",
"async def async_update_data():\n try:\n # Note: asyncio.TimeoutError and aiohttp.ClientError are already\n # handled by the data update coordinator.\n async with async_timeout.timeout(10):\n return await hub.update_status()\n except InvalidPasswordOrEmail as err:\n raise UpdateFailed(f\"The password or email address is invalid: {err}\")",
"def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})",
"def describe_dat_update(added_params, removed_params):\n # We only added parameters\n if added_params and not removed_params:\n # We added one\n if len(added_params) == 1:\n return \"Added DAT parameter to %s\" % added_params[0]\n # We added several, but all on the same port\n elif all(param == added_params[0]\n for param in added_params[1:]):\n return \"Added DAT parameters to %s\" % added_params[0]\n # We added several on different ports\n else:\n return \"Added DAT parameters\"\n # We only removed parameters\n elif removed_params and not added_params:\n # We removed one\n if len(removed_params) == 1:\n return \"Removed DAT parameter from %s\" % (\n removed_params[0])\n # We removed several, but all on the same port\n elif all(param == removed_params[0]\n for param in removed_params[1:]):\n return \"Removed DAT parameters from %s\" % (\n removed_params[0])\n # We removed several from different ports\n else:\n return \"Removed DAT parameters\"\n # Both additions and deletions\n else:\n # Replaced a parameter\n if ((len(added_params), len(removed_params)) == (1, 1) and\n added_params[0] == removed_params[0]):\n return \"Changed DAT parameter on %s\" % added_params[0]\n # Did all kind of stuff\n else:\n if added_params:\n port = added_params[0]\n else:\n port = removed_params[0]\n # ... to a single port\n if all(param == port\n for param in chain(added_params, removed_params)):\n return \"Changed DAT parameters on %s\" % port\n # ... to different ports\n else:\n return \"Changed DAT parameters\"",
"def post_patient_edit(form):\n # Extract the data from the form and add extra fields.\n\n contacted_fields = [\"Contacted\", \"Comment\"]\n disabled_fields = [\"NextURL\"] + [x for x in form.data.keys() if re.search(r\"Disabled\", x) is not None]\n data = {k: v for (k, v) in form.data.items() if k not in disabled_fields \n and k not in contacted_fields}\n data[\"UpdatedBy\"] = get_current_user()\n data[\"UpdatedDateTime\"] = get_current_datetime()\n\n for k in [\"SymptomType\" + str(n) for n in list(range(1, 11))]:\n if data[k] != []:\n # keep only values that are not \"Select...\"\n data[k] = [v for v in data[k] if v not in [\"Select...\", \" \", \"\",]]\n if data[k] != []:\n data[k] = \"; \".join(data[k])\n else:\n data[k] = None\n else:\n data[k] = None\n\n args = form.data\n cs = get_patient_contacted_status(args)\n try:\n current_cs = cs[\"current\"][\"Contacted\"]\n except:\n current_cs = \"\"\n try:\n current_comment = cs[\"current\"][\"Comment\"]\n except:\n current_comment = \"\"\n cs_user_entered = form.Contacted.data\n cs_comment_user_entered = form.Comment.data\n if current_cs != cs_user_entered or current_comment != cs_comment_user_entered:\n post_patient_contacted_status(args)\n \n # Look up key data that should not be user-changeable. XXX ideally these\n # columns would simply be excluded from PersonEntered\n q = (\"select PatientLastName, PatientSSN \" +\n \"from {schema}.PersonCrosswalk \".format(schema=app_schema) +\n \"where PersonID = ?\")\n print(\"person id\", form.PersonID.data, file=sys.stderr)\n tb = pd.read_sql(q, engine, params=[form.PersonID.data])\n assert tb.shape[0] == 1, \"Expected 1 row, but got {} rows\".format(tb.shape[0])\n d = tb.iloc[0,:].to_dict()\n data[\"LastName\"] = d[\"PatientLastName\"]\n data[\"SSN\"] = d[\"PatientSSN\"]\n\n # Get cursor.\n conn = engine.raw_connection()\n cursor = conn.cursor()\n\n # Define and execute query to insert a new row.\n keys_str = \", \".join(data.keys())\n vals_str = \", \".join(\"?\" for _ in data.keys())\n params = list(data.values())\n q = (\"insert into {schema}.PersonEntered \".format(schema=app_schema) +\n \"(\" + keys_str + \") \" +\n \"values (\" + vals_str + \")\")\n cursor.execute(q, params)\n\n conn.commit()\n \n return None",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})",
"def update(self, request, pk=None):\n data = request.data\n instance = self._get_object(pk)\n serializer = self.get_serializer(instance, data=data)\n if not serializer.is_valid():\n return Response({'errors': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n updated_obj = serializer.save()\n chart_review = PatientChartReviewRetrieveSerializer(updated_obj)\n return Response(chart_review.data, status=status.HTTP_200_OK)",
"def edit_patient(request):\n \n email = request.GET.get(\"email\")\n phone_number = request.GET.get(\"phone\")\n gender = request.GET.get(\"gender\")\n given_id = request.GET.get(\"id\")\n doctor = request.GET.get(\"doctor\")\n\n #construct boolean for is_doctor\n if doctor == \"false\":\n \n try:\n patient = Patient.objects.get(pk=given_id)\n except Patient.DoesNotExist:\n raise Http404(\"Patient not found!\")\n\n user_id = patient.user.id\n\n else:\n #when it's the current doctor\n user_id = request.user.id \n\n #call helper function to update patient fields\n updated_list = edit_user_information(user_id, email, phone_number, gender)\n\n return JsonResponse({\n \"phone\": updated_list[0],\n \"email\": updated_list[2],\n \"gender\": updated_list[1]\n })",
"def test_patient_detail_after_updating(self):\n url = reverse('doctor-list')\n response = self.client.get(reverse(\n 'patient:patient-detail', kwargs={'pk': Patient.objects.get(patient_name='testpatient1').id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Patient.objects.count(), 1)",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})",
"def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})",
"def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))",
"def problem_update(self, identifier, updates):\n return self._patch(\"problems/%d\" % identifier, json=updates).json()",
"def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})",
"def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})",
"def test_updating_patient_account(self):\n \n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n update_patient_account(1, form_data)\n\n patient = Patient.query.get(1)\n self.assertEqual(\"Jill\", patient.fname)",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})",
"async def _async_update_data(self) -> dict[str, Any]:\n try:\n stats = await self._api.async_get_stats()\n self.server_status = STATUS_RUNNING\n return stats\n except FrigateApiClientError as exc:\n self.server_status = STATUS_ERROR\n raise UpdateFailed from exc",
"def partial_update(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)",
"def check_update_sanity(self):\n for update in crest.get_all_updates(self.model):\n assert update._name is not None, f\"There is an Update in {update._parent._name} ({update._parent.__class__.__name__}) whose name is 'None'\"\n assert update._name != \"\", f\"There is an Update in {update._parent._name} ({update._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(update.state, crest.State), f\"Update {update._name}'s state is not a crest.State. It is: {update.state} ({update.state.__class__})\"\n assert update.state in crest.get_states(update._parent), f\"Update's state {update.state._name} ({update.state}) is not in the states of entity {update._parent._name} ({update._parent})\"\n\n assert isinstance(update.target, crest.Port), f\"Update {update._name}'s target is not a crest.Port\"\n assert update.target in api.get_targets(update._parent), f\"Update's target {update.target._name} ({update.target}) is not in the targets of entity {update._parent._name} ({update._parent})\"\n\n assert isinstance(update.function, (crestml.LearnedFunction, types.FunctionType)), f\"Update {update._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'dt' in inspect.signature(update.function).parameters, f\"Update {update._name}'s function has no dt parameter. entity: {update._parent._name} ({update._parent.__class__.__name__})\"\n assert 'self' in inspect.signature(update.function).parameters, f\"Update {update._name}'s function has no self parameter. entity: {update._parent._name} ({update._parent.__class__.__name__})\"\n assert len(inspect.signature(update.function).parameters) == 2, f\"An update should have one one argument 'dt' besides 'self'\"\n\n for port in SH.get_read_ports_from_update(update.function, update):\n assert port in api.get_sources(update._parent), f\"Update {update._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {update._parent._name} ({update._parent})\"",
"def update(self, view, show_errors):\n raise NotImplementedError(\"calling abstract method\")",
"def nut_report_update(message, args, sub_cmd, **kwargs):\n\n def resp_error(code, msg):\n # make sure we cancel whatever addition\n message.respond(u\"nut report-update error %(code)s|%(msg)s\" \\\n % {'code': code, 'msg': msg})\n return True\n\n def provider_entity(provider):\n \"\"\" Entity a Provider is attached to \"\"\"\n try:\n return NUTEntity.objects.get(id=provider.first_access().target.id)\n except:\n return None\n\n # check that all parts made it together\n if not args.strip().endswith('-eom-'):\n return resp_error('BAD_FORM', REPORT_ERRORS['BAD_FORM'])\n else:\n args = args.strip()[:-5].strip()\n\n # split up sections\n sections = {}\n try:\n parts = args.strip().lower().split('#')\n for index in range(0, len(parts)):\n if index == 0:\n infos = parts[index]\n else:\n sections[parts[index][0].upper()] = parts[index][1:]\n pec_sec = sections.get('P', '').strip()\n cons_sec = sections.get('C', '').strip()\n order_sec = sections.get('O', '').strip()\n other_sec = sections.get('T', '').strip()\n except:\n return resp_error('BAD_FORM', REPORT_ERRORS['BAD_FORM'])\n\n # split up infos\n try:\n username, pwhash, date_str = infos.strip().split()\n except:\n return resp_error('BAD_FORM_INFO', REPORT_ERRORS['BAD_FORM_INFO'])\n\n # get Provider based on username\n try:\n provider = Provider.objects.get(user__username=username)\n except Provider.DoesNotExist:\n return resp_error('NO_ACC', REPORT_ERRORS['NO_ACC'])\n\n # check that provider pwhash is good\n if not provider.check_hash(pwhash):\n return resp_error('BAD_PASS', REPORT_ERRORS['BAD_PASS'])\n\n # check that user is not disabled\n if not provider.is_active:\n return resp_error('ACC_DIS', REPORT_ERRORS['ACC_DIS'])\n\n # check that user has permission to submit report on entity\n entity = provider_entity(provider)\n\n if not entity:\n return resp_error('NOT_ENT', REPORT_ERRORS['NOT_ENT'])\n\n eentity = Entity.objects.get(id=entity.id)\n if not provider_can('can_submit_report', provider, eentity):\n return resp_error('NO_PERM', REPORT_ERRORS['NO_PERM'])\n\n # parse date and check if period is valid\n try:\n month = int(date_str[0:2])\n year = int('%s%s' % ('20', date_str[2:4]))\n period = MonthPeriod.find_create_from(year=year, month=month)\n except:\n return resp_error('BAD_FORM_PERIOD', REPORT_ERRORS['BAD_FORM_PERIOD'])\n\n # check period is the one we want\n if not period == current_reporting_period():\n return resp_error('BAD_PERIOD', REPORT_ERRORS['BAD_PERIOD'])\n\n # global infos\n infos = {'entity': entity,\n 'eentity': eentity,\n 'provider': provider,\n 'month': month,\n 'year': year,\n 'period': period,\n 'username': username,\n 'pwhash': pwhash}\n\n # Retrieve report\n try:\n nut_report = NutritionReport.objects.get(period=infos['period'],\n entity=infos['entity'],\n type=Report.TYPE_SOURCE)\n except:\n return resp_error('MISS', REPORT_ERRORS['MISS'])\n\n reports = []\n # common start of error message\n error_start = u\"Impossible d'enregistrer le rapport. \"\n\n logger.info(\"Processing PEC\")\n\n if pec_sec:\n subs = pec_sec.split('&')\n subs = subs[1:]\n for sub in subs:\n fields = sub.split()\n cap = fields[0].lower()\n sub_report = getattr(nut_report, 'pec_%s_report' % cap)\n for field in fields[1:]:\n cfield, value = field.split(':')\n rfield = uncompress_pec_field(cfield)\n setattr(sub_report, rfield, int(value))\n validator = PECReportValidator(sub_report)\n validator.errors.reset()\n try:\n validator.validate()\n except AttributeError as e:\n return resp_error('PEC_%s' % cap.upper(),\n error_start + e.__str__())\n except:\n pass\n errors = validator.errors\n # return first error to user\n if errors.count() > 0:\n return resp_error('PEC_%s' % cap.upper(),\n error_start + errors.all()[0])\n else:\n reports.append(sub_report)\n\n logger.info(\"Processing CONS\")\n\n if cons_sec:\n subs = cons_sec.split('&')\n subs = subs[1:]\n for sub in subs:\n fields = sub.split()\n cap = fields[0].lower()\n logger.info(cap.upper())\n for field in fields[1:]:\n cfield, value = field.split(':')\n rinpc, rfield = uncompress_cons_field(cfield)\n sub_report = getattr(getattr(nut_report, \n 'cons_%s_report' % cap),\n 'icr')(rinpc)\n setattr(sub_report, rfield, int(value))\n if sub_report.valid and not sub_report in reports:\n reports.append(sub_report)\n\n logger.info(\"Processing ORDER\")\n\n if order_sec:\n subs = order_sec.split('&')\n subs = subs[1:]\n for sub in subs:\n logger.info(\"\\t%s\" % sub)\n fields = sub.split()\n cap = fields[0].lower()\n for field in fields[1:]:\n cfield, value = field.split(':')\n rinpc, rfield = uncompress_cons_field(cfield)\n sub_report = getattr(getattr(nut_report,\n 'order_%s_report' % cap),\n 'icr')(rinpc)\n setattr(sub_report, rfield, int(value))\n if not sub_report in reports:\n reports.append(sub_report)\n\n logger.info(\"Processing OTHER\")\n\n if other_sec:\n fields = other_sec.split()\n for field in fields[1:]:\n cfield, value = field.split(':')\n rfield = uncompress_pec_field(cfield)\n sub_report = nut_report.pec_other_report\n setattr(sub_report, rfield, int(value))\n # check validity relative to PEC\n if not sub_report.total == sub_report.nut_report.sum_all_other:\n return resp_error('OTHER_INT', REPORT_ERRORS['OTHER_INT'])\n else:\n reports.append(sub_report)\n\n\n # check validity of changes\n # save to DB\n @reversion.create_revision()\n @transaction.commit_manually\n def save_reports(reports, nut_report, provider=None):\n reversion.set_user(provider.user)\n reversion.set_comment(\"SMS report update\")\n for report in reports:\n print(\"saving %s\" % report)\n try:\n sub_report.save()\n except:\n transaction.rollback()\n return False\n try:\n nut_report._status = nut_report.STATUS_MODIFIED_AUTHOR\n nut_report.modified_by = provider\n nut_report.save()\n except:\n transaction.rollback()\n return False\n\n transaction.commit()\n return True\n\n logger.info(\"Saving reports\")\n if not save_reports(reports, nut_report, provider):\n logger.warning(\"Unable to save reports\")\n return resp_error('SRV', REPORT_ERRORS['SRV'])\n logger.info(\"Reports saved\")\n\n ## CONFIRM RESPONSE\n \n confirm = \"nut report-update ok %s\" % nut_report.receipt\n\n message.respond(confirm)\n return True",
"def update(self):\n start = time.time()\n device_data = self._client.get_device_attributes(self._id,\n UPDATE_ATTRIBUTES)\n end = time.time()\n elapsed = round(end - start, 3)\n _LOGGER.debug(\"Updating %s (%s sec): %s\",\n self._name, elapsed, device_data)\n if \"error\" not in device_data:\n if \"errorCode\" not in device_data:\n self._brightness_pct = device_data[ATTR_INTENSITY] if \\\n device_data[ATTR_INTENSITY] is not None else 0.0\n self._operation_mode = device_data[ATTR_POWER_MODE] if \\\n device_data[ATTR_POWER_MODE] is not None else MODE_MANUAL\n self._rssi = device_data[ATTR_RSSI]\n self._wattage_override = device_data[ATTR_WATTAGE_OVERRIDE]\n self._occupancy = device_data[ATTR_OCCUPANCY]\n return\n else:\n if device_data[\"errorCode\"] == \"ReadTimeout\":\n _LOGGER.warning(\"Error in reading device %s: (%s), too slow to respond or busy.\", self._name, device_data)\n else:\n _LOGGER.warning(\"Unknown errorCode, device: %s, error: %s\", self._name, device_data)\n return\n else:\n if device_data[\"error\"][\"code\"] == \"DVCCOMMTO\": \n _LOGGER.warning(\"Cannot update %s: %s. Device is busy or does not respond quickly enough.\", self._name, device_data)\n elif device_data[\"error\"][\"code\"] == \"SVCINVREQ\":\n _LOGGER.warning(\"Invalid or malformed request to Neviweb, %s:\", device_data)\n elif device_data[\"error\"][\"code\"] == \"DVCACTNSPTD\":\n _LOGGER.warning(\"Device action not supported, %s:\", device_data)\n elif device_data[\"error\"][\"code\"] == \"DVCUNVLB\":\n _LOGGER.warning(\"Device %s unavailable, Neviweb maintnance update, %s:\", self._name, device_data)\n elif device_data[\"error\"][\"code\"] == \"SVCERR\":\n _LOGGER.warning(\"Device %s statistics unavailables, %s:\", self._name, device_data)\n else:\n _LOGGER.warning(\"Unknown error, device: %s, error: %s\", self._name, device_data)",
"def test_update(self, updateRecords=None):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)\n instance.resource_id = 4\n update_args = self.update_domain_only_args\n self._stubout_update(\n instance,\n fake_dns_instance,\n updateRecords,\n **update_args)\n\n uprops = dict(instance.properties)\n uprops.update({\n 'emailAddress': '[email protected]',\n 'ttl': 5555,\n 'comment': 'updated comment',\n })\n if updateRecords:\n uprops['records'] = updateRecords\n ut = rsrc_defn.ResourceDefinition(instance.name,\n instance.type(),\n uprops)\n instance.state_set(instance.CREATE, instance.COMPLETE)\n scheduler.TaskRunner(instance.update, ut)()\n self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()",
"def update(self)->None:\n database.cursor.execute(\"UPDATE rsvps SET response = %s WHERE meetup = %s AND user_id = %s\", (\n self.response,\n self.meetup,\n self.user\n ))\n database.connectio",
"def get_applicable_components(ip_address, headers, dup_payload):\n # Parse the single dup update report and print out versions needing\n # an update. In addition add them to the target_data as needed for\n # the job payload\n target_data = []\n dup_url = 'https://%s/api/UpdateService/Actions/UpdateService.GetSingleDupReport' % ip_address\n dup_resp = requests.post(dup_url, headers=headers,\n data=json.dumps(dup_payload), verify=False)\n if dup_resp.status_code == 200:\n dup_data = dup_resp.json()\n file_token = str(dup_payload['SingleUpdateReportFileToken'])\n for device in dup_data:\n device_name = str(device['DeviceReport']['DeviceServiceTag'])\n device_ip = str(device['DeviceReport']['DeviceIPAddress'])\n for component in device['DeviceReport']['Components']:\n curr_ver = str(component['ComponentCurrentVersion'])\n avail_ver = str(component['ComponentVersion'])\n upd_action = str(component['ComponentUpdateAction'])\n update_crit = str(component['ComponentCriticality'])\n reboot_req = str(component['ComponentRebootRequired'])\n comp_name = str(component['ComponentName'])\n print(\"\\n---------------------------------------------------\")\n print(\"Device =\", device_name)\n print(\"IPAddress =\", device_ip)\n print(\"Current Ver =\", curr_ver)\n print(\"Avail Ver =\", avail_ver)\n print(\"Action =\", upd_action)\n print(\"Criticality =\", update_crit)\n print(\"Reboot Req =\", reboot_req)\n print(\"Component Name =\", comp_name)\n\n if avail_ver > curr_ver:\n temp_map = {'Id': device['DeviceId'],\n 'Data': str(component['ComponentSourceName']) + \"=\" + file_token, 'TargetType': {}}\n temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId'])\n temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName'])\n target_data.append(temp_map)\n else:\n print(\"Unable to get components DUP applies to .. Exiting\")\n return target_data",
"def see_patient(self, request):\n appt = self.get_object() # Appointment object\n appt.status = IN_SESSION\n appt.seen_time = timezone.localtime(timezone.now()).time()\n response = patch_appointment_status(request, appt.id, IN_SESSION)\n if response:\n appt.save()\n return None\n return err_patch",
"def next_update(self):\r\n request = http.Request('GET', '/metadata/next_update.json')\r\n return request, parsers.parse_json",
"def handle_update(self):\n try:\n for controller in self.controllers:\n self._handle_single_update(controller)\n except urllib3.exceptions.HTTPError as http_error:\n raise HttpError('Error talking to Kubernetes', http_error) from http_error"
] | [
"0.5894665",
"0.5239594",
"0.49709976",
"0.49367508",
"0.492882",
"0.4927456",
"0.49062845",
"0.4904821",
"0.48947275",
"0.48865876",
"0.48839745",
"0.4856527",
"0.4836941",
"0.48353428",
"0.4834773",
"0.48274225",
"0.4799497",
"0.4790184",
"0.47784716",
"0.47743553",
"0.4772367",
"0.47708982",
"0.47521022",
"0.47372478",
"0.47173735",
"0.4716659",
"0.4712572",
"0.4696523",
"0.46909913",
"0.4682529"
] | 0.619505 | 0 |
Posts the roster member numbers, and the medical group restriction for a specific plan to the SFE. The post will include the value of the commit which will decide whether this is a validation only operation or one that would lead to data updates. | def _post_roster_to_endpoint(
plan_id: int,
roster_member_numbers: Set[str],
restrict_to_medical_group_id: Optional[int],
commit: bool
) -> Tuple[Optional[List[str]], int, List[str]]:
#from django.conf import settings
"""if restrict_to_medical_group_id:
url = settings.SFE_URL + reverse('api_plan_set_roster_for_mg', kwargs={
'medical_group_id': restrict_to_medical_group_id,
'plan_id': plan_id
})
else:
url = settings.SFE_URL + reverse('api_plan_set_roster', kwargs={
'plan_id': plan_id
})
err, content = requests_post(url, json_dict={
'all_member_numbers': list(roster_member_numbers),
'commit': commit,
'api_key': settings.STELLAR_PATIENT_UPDATE_API_KEY
})
if err:
return None, 0, [err]
response = json.loads(content)
success = response['success']
if not success:
return None, 0, response['errors']
orphaned_member_numbers = json.loads(response['orphaned_member_numbers'])
orphaned_count = int(response['orphaned_count'])
return orphaned_member_numbers, orphaned_count, []
"""
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validateAndSendToPiggy(self):\n completion = False\n allowAction = False\n mode = self.getMode()\n selected_date = self.getActiveDate()\n dates_user_is_allowed_to_manipulate = [datetime.date.today(), self.last_working_date]\n\n if getpass.getuser() not in MOSES.getAdminList(self.user_id, self.password):\n if selected_date not in dates_user_is_allowed_to_manipulate:\n allowAction = False\n self.alertMessage(\"Not Allowed\", \"You cannot make changes to dates other than your last working date and today.\")\n else:\n allowAction = True\n else:\n allowAction = True\n\n if allowAction:\n fsnData = self.getFSNDataDict()\n if mode == \"Addition\":\n fsn = fsnData[\"FSN\"]\n fsn_type = fsnData[\"Description Type\"]\n if self.isValidType(fsn, fsn_type):\n written, allow, override_ticket, reason = self.checkDuplicacy(fsn, fsn_type, self.getActiveDate())\n if written and not allow:\n completion = False\n elif written and allow:\n completion = MOSES.addToPiggyBank(self.user_id, self.password, fsnData)\n else:\n completion = MOSES.addToPiggyBank(self.user_id, self.password, fsnData)\n if completion:\n self.alertMessage(\"Sucess\",\"This FSN was successfully entered. %s\"%(reason))\n else:\n self.alertMessage(\"Failed\",\"This FSN could not be entered. %s\"%reason)\n\n elif mode == \"Modification\":\n #print \"Trying to modify an entry.\"\n success = MOSES.updatePiggyBankEntry(fsnData, self.user_id, self.password)\n if success:\n self.alertMessage(\"Success\", \"Successfully modified an entry in the Piggy Bank.\")\n completion = True\n else:\n self.alertMessage(\"Failure\", \"Sorry, that entry cannot be modified. That could be because if you make that change, this entry will conflict with another in the piggybank. Ask your TL to make the change herself.\")\n completion = False\n #print \"Modified!\"\n if completion:\n #Upon completion, do the following things:\n #Reset the form\n #Update the efficiency.\n self.resetForm()\n self.piggybanker_thread.getPiggyBank()\n self.porker_thread.updateForDate(selected_date)",
"def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201",
"def post(self):\n return CreateSavingPlan(request, current_user.id)",
"def test_kyc_post_legal_board_member(self):\n pass",
"def testPostAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPost(user, data=self.post_data)\n self.response_201(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertIn(\"sodar_uuid\", data)\n SequencingMachine.objects.filter(sodar_uuid=data[\"sodar_uuid\"]).delete()",
"def post_review(self, changenum, diff_content=None,\r\n parent_diff_content=None, submit_as=None,\r\n target_groups=None, target_people=None, summary=None,\r\n branch=None, bugs_closed=None, description=None,\r\n testing_done=None, rid=None, publish=True):\r\n try:\r\n save_draft = False\r\n\r\n if rid:\r\n review_request = self.get_review_request(rid)\r\n else:\r\n review_request = self.new_review_request(changenum, submit_as)\r\n\r\n if target_groups:\r\n self.set_review_request_field(review_request, 'target_groups',\r\n target_groups)\r\n save_draft = True\r\n\r\n if target_people:\r\n self.set_review_request_field(review_request, 'target_people',\r\n target_people)\r\n save_draft = True\r\n\r\n if summary:\r\n self.set_review_request_field(review_request, 'summary',\r\n summary)\r\n save_draft = True\r\n\r\n if branch:\r\n self.set_review_request_field(review_request, 'branch', branch)\r\n save_draft = True\r\n\r\n if bugs_closed:\r\n self.set_review_request_field(review_request, 'bugs_closed',\r\n bugs_closed)\r\n save_draft = True\r\n\r\n if description:\r\n self.set_review_request_field(review_request, 'description',\r\n description)\r\n save_draft = True\r\n\r\n if testing_done:\r\n self.set_review_request_field(review_request, 'testing_done',\r\n testing_done)\r\n save_draft = True\r\n\r\n if save_draft:\r\n self.save_draft(review_request)\r\n except APIError, e:\r\n rsp, = e.args\r\n if rid:\r\n return self.die('Error getting review request %s: %s (code %s)' %\r\n (rid, rsp['err']['msg'], rsp['err']['code']))\r\n else:\r\n error_message = 'Error creating review request: %s (code %s)\\n' % (rsp['err']['msg'],\r\n rsp['err']['code'])\r\n if rsp['err']['code'] == 105:\r\n bad_keys = rsp['fields']\r\n if bad_keys:\r\n error_message = 'Invalid key-value pairs:\\n'\r\n for key, issues in bad_keys.items():\r\n error_message += '%s: %s\\n' % (key, ', '.join(issues))\r\n\r\n return self.die(error_message)\r\n\r\n if not self.info.supports_changesets:\r\n try:\r\n self.upload_diff(review_request, diff_content, parent_diff_content)\r\n except APIError, e:\r\n rsp, = e.args\r\n print('Error uploading diff: %s (%s)' % (rsp['err']['msg'], rsp['err']['code']))\r\n self.debug(rsp)\r\n self.die('Your review request still exists, but the diff is not '\r\n 'attached.')\r\n\r\n if publish:\r\n self.publish(review_request)\r\n\r\n request_url = 'r/' + str(review_request['id'])\r\n review_url = urljoin(self.url, request_url)\r\n\r\n if not review_url.startswith('http'):\r\n review_url = 'http://%s' % review_url\r\n\r\n sys.stderr.write('Review request #%s posted.\\n' % review_request['id'])\r\n sys.stderr.write('\\n%s\\n' % review_url)\r\n\r\n return 1",
"def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)",
"def post(self):\n\n action = self.request.get('action')\n if not action:\n raise ErrorMessage(404, 'missing action (requested_action) params')\n\n self.require_action_permitted('grant')\n\n account = model.Account.get(self.request.get('key'))\n if not account:\n raise ErrorMessage(404, 'bad key given')\n\n #TODO(eyalf): define account.display_name() or something\n name = account.email\n if not action in account.requested_actions:\n #i18n: Error message\n raise ErrorMessage(404, _('No pending request for '\n '%(account_action)s by %(user)s')\n % (action, name))\n account.requested_actions.remove(action)\n grant = self.request.get('grant', 'deny')\n if grant == 'approve':\n account.actions.append(action)\n account.put()\n logging.info('%s request for %s was %s' % (account.email,\n action,\n grant))\n\n if self.params.embed:\n if grant == 'approve':\n self.write(\n #i18n: Application for the given permission action approved\n _('Request for becoming %(action)s was approved.') % action)\n else:\n self.write(\n #i18n: Application for the given permission action denied\n _('Request for becoming %(action)s was denied.') % action)\n else:\n raise Redirect(self.get_url('/grant_access'))",
"def test_submit_bad_data_when_updating_membership(self):\n self.login_as(\"bob\")\n\n # let's try to change bob's membership to ben\n # user is a read-only field so it is simply ignored:\n payload = {\"user\": {\"id\": self.USERS[\"ben\"][\"id\"]}}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"user\"][\"id\"], self.USER_ID)\n\n # now, let's try to move bob's membership to another community\n # community is a read-only field so it is also ignored:\n payload = {\"community\": self.COMMUNITIES[\"group2\"][\"id\"]}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"community\"], self.GROUP_ID)\n\n # now, let's try to submit bad value:\n payload = {\"is_admin\": \"Of course!\"}\n with self.assertNumQueries(4):\n response = self.client.put(self.url, payload)\n self.assert_validation_failed(response, data={\n \"is_admin\": [\"Must be a valid boolean.\"]\n })\n self.assertTrue(Membership.objects.get(\n community_id=self.GROUP_ID, user_id=self.USER_ID).is_admin)",
"def validate(self,admin,bal_org,bal_dst):\n\n rv=admin.helper.setAmount(admin.userName,self.org,bal_org)\n if rv!= None:\n rv=admin.helper.setAmount(admin.userName,self.dst,bal_dst)\n if rv != None:\n return True\n else:\n return False",
"def test_accept_member_with_moderator(self):\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')",
"def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})",
"def add_family_member_submission(request):\n full_name = request.POST.get('member-name', '')\n pin = request.POST.get('member-pin', '')\n if len(pin) != 4:\n messages.success(request, 'Please enter 4 characters as your PIN number')\n return HttpResponseRedirect('/add-family-member/')\n else:\n current_user = request.user\n if current_user.character_set.filter(character_name=full_name):\n messages.success(request, 'This member has already been added, try another name')\n return HttpResponseRedirect('/add-family-member/')\n else:\n current_user.character_set.create(character_name=full_name, character_pin=pin)\n return HttpResponseRedirect('/profile/')",
"def _post(self, which_port, msg):\n return _spacegrant_swig.general_burster_2_sptr__post(self, which_port, msg)",
"def test_kyc_post_document_board_member(self):\n pass",
"def repost(object=None, message=None, group_id=None, mark_as_ads=None):\n params = {\n 'object': object,\n 'message': message,\n 'group_id': group_id,\n 'mark_as_ads': mark_as_ads\n }\n result = call('wall.repost', **params)\n return parse_response(result)",
"def _submission_allowed(self):\n window = dt.timedelta(\n self.plugin_config.get(\"days\", 0),\n self.plugin_config.get(\"seconds\", 0),\n self.plugin_config.get(\"microseconds\", 0),\n self.plugin_config.get(\"milliseconds\", 0),\n self.plugin_config.get(\"minutes\", 0),\n self.plugin_config.get(\"hours\", 0),\n self.plugin_config.get(\"weeks\", 0),\n )\n\n subm_time = dt.datetime.strptime(self.submission_metadata[\"created_at\"], STRPTIME_FORMAT)\n\n prev_subms = 0\n for subm in self.submission_metadata[\"previous_submissions\"]:\n st = dt.datetime.strptime(subm[\"submission_time\"], STRPTIME_FORMAT)\n if subm_time - st <= window:\n prev_subms += 1\n\n if prev_subms >= self.plugin_config[\"allowed_submissions\"]:\n return False, \\\n f\"You have exceeded the rate limit for the autograder. Students are allowed {self.plugin_config['allowed_submissions']} \" + \\\n f\"submissions every {self._window_to_str()}.\"\n\n else:\n return True, \\\n f\"Students are allowed {self.plugin_config['allowed_submissions']} submissions every {self._window_to_str()}. \" + \\\n f\"You have {prev_subms} submissions in that period.\"",
"def post(self, request, *args, **kwargs):\n\n # alleen de RCL mag de planning uitbreiden\n if self.rol_nu != Rollen.ROL_RCL:\n raise PermissionDenied('Niet de beheerder')\n\n try:\n cluster_pk = int(kwargs['cluster_pk'][:6]) # afkappen voor de veiligheid\n cluster = (Cluster\n .objects\n .select_related('regio', 'regio__rayon')\n .get(pk=cluster_pk))\n except (ValueError, Cluster.DoesNotExist):\n raise Http404('Cluster niet gevonden')\n\n try:\n deelcomp_pk = int(kwargs['deelcomp_pk'][:6]) # afkappen voor de veiligheid\n deelcomp = (Regiocompetitie\n .objects\n .select_related('competitie')\n .get(pk=deelcomp_pk))\n except (ValueError, Regiocompetitie.DoesNotExist):\n raise Http404('Competitie niet gevonden')\n\n ronde = maak_regiocompetitie_ronde(deelcomp=deelcomp, cluster=cluster)\n\n if ronde:\n next_url = reverse('CompLaagRegio:regio-ronde-planning', kwargs={'ronde_pk': ronde.pk})\n else:\n # maximum aantal rondes bereikt - zou hier niet eens moeten zijn\n # next_url = reverse('CompLaagRegio:regio-cluster-planning', kwargs={'deelcomp_pk': deelcomp.pk,\n # 'cluster_pk': cluster.pk})\n raise Http404('Limiet bereikt')\n\n return HttpResponseRedirect(next_url)",
"def test_submit_for_corporate_approval(self):\n # set up the prescription to be ready for corporate approval\n p = self.make('Prescription')\n self.set_cbas_attributes(p)\n self.assertTrue(p.can_corporate_approve)\n self.assertTrue(p.planning_status == p.PLANNING_DRAFT)\n\n # submit for corporate approval\n url = reverse('admin:prescription_prescription_corporate_approve',\n args=(str(p.id),))\n response = self.client.post(url, {}, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # refresh prescription object\n p = Prescription.objects.get(name='test')\n self.assertTrue(p.planning_status == p.PLANNING_SUBMITTED)\n self.assertTrue(p.planning_status_modified is not None)",
"def test_apply_corporate_approval(self):\n p = self.make('Prescription')\n self.set_cbas_attributes(p)\n p.planning_status = p.PLANNING_SUBMITTED\n p.save()\n\n url = reverse('admin:prescription_prescription_corporate_approve',\n args=(str(p.id),))\n self.client.login(username='fmsb', password='test')\n response = self.client.post(url, {}, follow=True)\n self.assertEqual(response.status_code, 200)\n\n p = Prescription.objects.get(name='test')\n self.assertTrue(p.planning_status == p.PLANNING_APPROVED)\n self.assertTrue(p.planning_status_modified is not None)",
"def post(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):",
"def increaseApproval(_spender: address, _addedValue: uint256) -> bool:\n\n self.allowed[msg.sender][_spender] += _addedValue\n log.Approval(msg.sender, _spender, self.allowed[msg.sender][_spender])\n return True",
"def post_object(self,object_data):\n try:\n self.update_title(title=object_data[\"title\"],owner=object_data[\"owner\"])\n print(\"make request\")\n make_request(f'{GRAPH_URL}/planner/plans/', 'POST', object_data)\n logging.info(f'Created plan with title {object_data.get(\"title\")}')\n self.append_response(\"Ok\")\n return True\n except Exception as e:\n self.append_response(\"Error\")\n print(\"error : \",e)\n return False",
"def is_post_valid(cls, community_id, comment_op: dict):\n\n assert community_id, 'no community_id'\n community = cls._get_name(community_id)\n account_id = Accounts.get_id(comment_op['author'])\n role = cls.get_user_role(community_id, account_id)\n type_id = int(community[5])\n\n # TODO: check `nsfw` tag requirement #267\n # TODO: (1.5) check that beneficiaries are valid\n\n if type_id == TYPE_JOURNAL:\n if not comment_op['parent_author']:\n return role >= Role.member\n elif type_id == TYPE_COUNCIL:\n return role >= Role.member\n return role >= Role.guest # or at least not muted",
"def _handleRequestPostChargeParameters(self, data):\r\n print(\"\\\"Request Post Charge Parameters\\\" received\")\r\n message = self.whitebeet.v2gParseRequestPostChargeParameters(data)\r\n if 'dc' in message:\r\n print(\"SOC: {}%\".format(message['dc']['soc']))\r\n try:\r\n self.whitebeet.v2gSetDcPostChargeParameters(0, 1, int(self.charger.getEvsePresentVoltage()))\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))",
"def test_approve(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'mod_queue',\r\n 'op': 'approve',\r\n 1: [self.problem_id.to_deprecated_string(), '2.0', '2']})\r\n view.approve(post, self.course_id, 'mod_queue')\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value\r\n self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value\r\n self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])\r\n self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)",
"def _post(self, which_port, msg):\n return _spacegrant_swig.hdlc_deframer_sptr__post(self, which_port, msg)",
"def do_post(self, **kwargs) -> dict[str, str]:\n feature_id = kwargs['feature_id']\n gate_id = kwargs['gate_id']\n feature = self.get_specified_feature(feature_id=feature_id)\n new_state = self.get_int_param('state', validator=Vote.is_valid_state)\n\n user = self.get_current_user(required=True)\n gate = Gate.get_by_id(gate_id)\n if not gate:\n self.abort(404, msg='Gate not found')\n if gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\n\n old_state = gate.state\n self.require_permissions(user, feature, gate, new_state)\n\n # Note: We no longer write Approval entities.\n approval_defs.set_vote(feature_id, None, new_state,\n user.email(), gate_id)\n\n if new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\n else:\n notifier_helpers.notify_subscribers_of_vote_changes(\n feature, gate, user.email(), new_state, old_state)\n\n # Callers don't use the JSON response for this API call.\n return {'message': 'Done'}",
"def post_permissions(request, post):\n user = request.user\n is_editable = has_ownership = False\n\n if user.is_authenticated():\n\n if user == post.author :\n has_ownership = is_editable = True\n elif user.is_moderator or user.is_staff:\n is_editable = True\n\n post.is_editable = is_editable\n post.has_ownership = has_ownership\n\n return post",
"def test_01_check_to_state_draft_post(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_create')\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)"
] | [
"0.5406901",
"0.53190297",
"0.52782184",
"0.51523155",
"0.5132878",
"0.49806508",
"0.49678287",
"0.4966064",
"0.4957115",
"0.49533275",
"0.49210647",
"0.491864",
"0.49109998",
"0.48907435",
"0.4886257",
"0.48517922",
"0.4844767",
"0.4843075",
"0.48355353",
"0.48208615",
"0.48136264",
"0.48115227",
"0.4802264",
"0.4788641",
"0.47727525",
"0.47662514",
"0.47629517",
"0.47585994",
"0.47514963",
"0.47484997"
] | 0.6674818 | 0 |
Check Monte Calor statistics in CheckMate output. | def checkStats(checkmateOutput):
if not os.path.isfile(checkmateOutput):
print("Files %s not found" %checkmateOutput)
return False
# Get CMS-SUS-16-032 data:
data = np.genfromtxt(checkmateOutput,names=True,
dtype=None,encoding=None)
data = np.delete(data,np.where(data['sr'] == 'Combined'))
ibest = np.argmax(data['rexp'])
pt = data[ibest]
if not pt['s']:
ratio = 100.0
else:
ratio = pt['signalsumofweights']/pt['s']
nEvts = pt['signalsumofweights']
return ratio,nEvts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RunCheckMate(parserDict):\n t0 = time.time()\n parser = ConfigParserExt()\n parser.read_dict(parserDict)\n\n pars = parser.toDict(raw=False)[\"options\"]\n\n outputFolder = os.path.abspath(parser.get(\"CheckMateParameters\",\"OutputDirectory\"))\n resultFolder = os.path.join(outputFolder,parser.get(\"CheckMateParameters\",\"Name\"))\n if os.path.isdir(resultFolder):\n logger.info(\"Results folder %s found.\" %resultFolder)\n if parser.get(\"CheckMateParameters\",\"OutputExists\") == 'overwrite':\n logger.info(\"Overwriting\")\n shutil.rmtree(resultFolder)\n else:\n logger.info(\"Skipping\" %resultFolder)\n return \"---- %s skipped\" %resultFolder\n cardFile = getCheckMateCard(parser)\n logger.debug('Steering card %s created' %cardFile)\n\n #Create output dirs, if do not exist:\n try:\n os.makedirs(outputFolder)\n except:\n pass\n\n #Run CheckMate\n checkmatePath = os.path.abspath(pars['checkmateFolder'])\n checkmateBin = os.path.join(checkmatePath,'bin')\n logger.info('Running checkmate with steering card: %s ' %cardFile)\n logger.debug('Running: python2 ./CheckMATE %s at %s' %(cardFile,checkmateBin))\n run = subprocess.Popen('python2 ./CheckMATE %s' %(cardFile)\n ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=checkmateBin)\n output,errorMsg= run.communicate()\n logger.debug('CheckMATE error:\\n %s \\n' %errorMsg)\n logger.debug('CheckMATE output:\\n %s \\n' %output)\n\n os.remove(cardFile)\n\n logger.info(\"Done in %3.2f min\" %((time.time()-t0)/60.))\n\n #Remove parton level events:\n if pars['cleanUp'] is True:\n mg5folder = os.path.join(resultFolder,'mg5amcatnlo')\n if os.path.isdir(mg5folder):\n logger.debug('Removing data from: %s \\n' %mg5folder)\n for f in os.listdir(mg5folder):\n file_path = os.path.join(mg5folder, f)\n if os.path.isdir(file_path):\n shutil.rmtree(file_path)\n analysisfolder = os.path.join(resultFolder,'analysis')\n if os.path.isfile(os.path.join(analysisfolder,'analysisstdout_atlas_1712_02118_ew.log')):\n os.remove(os.path.join(analysisfolder,'analysisstdout_atlas_1712_02118_ew.log'))\n\n now = datetime.datetime.now()\n\n return \"Finished running CheckMATE at %s\" %(now.strftime(\"%Y-%m-%d %H:%M\"))",
"def test_section3_3(self):\n\n output_file = 'run.msmarco-passage.txt'\n self.temp_files.append(output_file)\n run_cmd = f'python -m pyserini.search --topics msmarco-passage-dev-subset \\\n --index msmarco-passage --output {output_file} \\\n --bm25 --output-format msmarco'\n status = os.system(run_cmd)\n self.assertEqual(status, 0)\n\n eval_cmd = f'python -m pyserini.eval.msmarco_passage_eval \\\n msmarco-passage-dev-subset {output_file}'\n stdout, stderr = run_command(eval_cmd)\n score = parse_score_msmarco(stdout, \"MRR @10\")\n self.assertAlmostEqual(score, 0.1874, delta=0.0001)",
"def check_lammps_sim(out_file, verbose=True):\n FINISHED = False\n try:\n with open(out_file, 'r') as f:\n lines = f.readlines()\n if 'Total wall time' in lines[-1]:\n FINISHED = True\n except Exception as e:\n if verbose:\n print(e)\n # print(lines[-1])\n return FINISHED",
"def test_1_xc_run(self) :\n self.banner(\"Testing if extra credit simulation gives the right percentages.\")\n filename = self.find_file('project9_xc.py')\n doors = random.randrange(10, 100)\n switch_target = 100 * ((doors - 1) / doors)\n stay_target = 100 * (1 / doors)\n target_range = 2\n \n with open('logs/test_extra_credit.out', 'a') as log :\n test = pexpect.spawnu('python ' + filename.as_posix() + ' {}'.format(doors), logfile=log, encoding='utf-8')\n turns = random.randrange(200, 500)\n test.sendline(str(turns))\n try : \n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n switch_percent = float(test.match.group(1))\n got = test.expect([pexpect.EOF, '(\\d+\\.\\d+)\\s*%'], timeout=5)\n stay_percent = float(test.match.group(1))\n except :\n self.fail(\"The grader program failed to parse the output of your project.\")\n\n if not (switch_target-target_range < switch_percent < switch_target+target_range) :\n self.fail('Your switch percentage ({}) is out of range. It should be between {} and {}'.format(switch_percent, switch_target-target_range, switch_target+target_range))\n if not (stay_target-target_range < stay_percent < stay_target+target_range) :\n self.fail('Your stay percentage ({}) is out of range. It should be between {} and {}'.format(stay_percent, stay_target-target_range, stay_target+target_range))\n test.close()",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def run_check(lst):\n print('Mean: ', get_mean(lst) == np.mean(lst))\n print('Median: ', get_median(lst) == np.median(lst))\n print('Mode: ', get_mode(lst) == scs.mode(lst).mode[0])",
"def computeStats(self, absList, statOut, errorOut):\n \n nMentions = 0\n pSum = 0\n rSum = 0\n for abstract in absList:\n # build hash of annotated clusters/chains keyed by ID\n errorOut.write('\\n---- '+abstract.id+' ----\\n')\n trueChainLengths = {}\n entityList = abstract.annotatedEntities.getList(self.entityTypes[0])\n errorOut.write('True chains:\\n')\n for entityTemplate in entityList:\n if len(entityTemplate.getAnnotatedId()) > 0:\n trueChain = entityTemplate.getMentionChain()\n trueChainLengths[entityTemplate.getAnnotatedId(checkEntireCluster=False)] = len(trueChain)\n for m in trueChain:\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n\n errorOut.write('----\\n')\n else:\n print abstract.id, entityTemplate.name, 'is missing an ID'\n \n # compute Recall and precision for each detected chain/cluster\n entityList = abstract.entities.getList(self.entityTypes[0])\n errorOut.write('\\nHypothesis chains:\\n')\n for entityTemplate in entityList:\n detectedChain = entityTemplate.getMentionChain()\n \n rootMention = entityTemplate.rootMention()\n errorOut.write('[Canonical name: '+rootMention.getCanonicalName()+']\\n')\n \n for m in detectedChain:\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('----\\n')\n\n nMentionsInChain = len(detectedChain)\n for mTemplate in detectedChain:\n nMentions += 1\n if len(mTemplate.getAnnotatedId(checkEntireCluster=False)) == 0:\n # mention is a false positive, it does not belong to any chain\n pSum += 1.0/nMentionsInChain\n rSum += 1\n else:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) not in trueChainLengths:\n print abstract.id, 'template with id =',mTemplate.getAnnotatedId(checkEntireCluster=False), 'not in a true chain'\n break\n nMentionsInTrueChain = trueChainLengths[mTemplate.getAnnotatedId(checkEntireCluster=False)]\n nCorrectInDetectedChain = 0\n annotatedMatches = set([])\n # count the number of mentions in the detected chain that\n # should be in the same chain as this mention\n for m in detectedChain:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) == m.getAnnotatedId(checkEntireCluster=False) \\\n and m.mention.matchedMention not in annotatedMatches:\n nCorrectInDetectedChain += 1\n annotatedMatches.add(m.mention.matchedMention)\n# else:\n# print abstract.id, 'Two mentions do not belong in same chain',\n# print mTemplate, m.getAnnotatedId()\n \n if nCorrectInDetectedChain > nMentionsInTrueChain:\n print abstract.id, 'id=',mTemplate.getAnnotatedId(checkEntireCluster=False), \n print 'detected chain=', nCorrectInDetectedChain,\n print 'true chain=', nMentionsInTrueChain\n nCorrectInDetectedChain = nMentionsInTrueChain\n \n# if nCorrectInDetectedChain != nMentionsInChain:\n# print abstract.id, 'id=',mTemplate.getAnnotatedId(), \n# print 'detected chain=', nCorrectInDetectedChain,\n# print 'true chain=', nMentionsInTrueChain\n \n pSum += float(nCorrectInDetectedChain) / nMentionsInChain\n rSum += float(nCorrectInDetectedChain) / nMentionsInTrueChain\n \n if nMentions == 0:\n print 'No mentions???'\n return \n \n precision = pSum/nMentions\n recall = rSum/nMentions \n fscore = 2*(recall*precision)/(recall + precision)\n \n sys.stdout.write('Recall\\tPrecision\\tF-score\\n')\n sys.stdout.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n# statOut.write(self.entityTypesString+'\\n')\n# statOut.write('Recall\\tPrecision\\tF-score\\n')\n# statOut.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n statOut.addStats('MC - '+self.entityTypesString, [['R', recall], ['P', precision], ['F',fscore]])",
"def mark():\n test_ret = subprocess.run([\"make\", \"test\"], encoding='utf-8',\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output = test_ret.stdout\n errors = test_ret.stderr\n lines = output.split('\\n')\n\n # find the line with the info we are looking for\n i = 0\n for idx, l in enumerate(lines):\n if \"% tests passed,\" in l:\n i = idx\n m = test_expr.search(lines[i])\n if m:\n perc, wrong, total = m.groups()\n perc = float(perc) / 100 # percent\n wrong = int(wrong)\n total = int(total)\n right = total - wrong\n else:\n print('\\n'.join(lines))\n right = int(input(\"Failed to parse score, input correct number manually: \"))\n total = int(input(\"Total tests: \"))\n comp = right / total\n output = '\\n'.join([lines[0]]+lines[2:])\n return (100 * comp, output, right, total)",
"def test_concentration_profile(self):\n # TODO: add an output for average particle concentration",
"def check_expectations(self):\n self.load_results()\n\n for (benchmark, producer), result in self.results.items():\n if not result.reports:\n print('No results found for ' + benchmark + ' ' + producer)\n result.test_passed = False\n else:\n for report in result.reports:\n if check_benchmark_result(report, result.expectation):\n print('Test passed: ' + result.directory)\n result.test_passed = True\n else:\n print('Test failed: ' + result.directory)\n result.test_passed = False",
"def report_totals(output):\n groups = (STATS_PATC.match(line) for line in output.splitlines())\n tuples = (g.groups() for g in groups if g)\n\n results = [0,0,0,0,0]\n for t in tuples:\n results[0] += int(t[0]) # total\n results[1] += int(t[1]) # failures\n results[2] += int(t[2]) # errors\n results[3] += int(t[3]) # skipped\n results[4] += float(t[4]) # elapsed time\n\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\n 'Time elapsed: %.2f' % tuple(results)",
"def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()",
"def compute_statistics(self):",
"def analyse():\n identity_list = create_identity_list()\n frequency = create_identity_list()\n\n for sim in range(SIMULATIONS):\n # Generate the hands and board\n cards_used = []\n hands_array = main.generate_hands(cards_used, CARDS, HANDS)\n ftr = main.generate_ftr(cards_used, CARDS)\n\n # Find strengths of each hand and determine winner list, and amount of winning hands\n strengths = main.get_strengths(hands_array, ftr, HANDS)\n winner = main.determine_winner(hands_array, ftr, strengths)\n winning_hands = len(main.get_winning_hands(winner))\n\n # Get the 'identities' of every hand\n hand_identities = get_identities(hands_array)\n\n # Add 1 to corresponding identity value if the hand won\n for hand in range(HANDS):\n row_col = get_row_col_index(hand_identities[hand])\n if winner[hand] == 1: # The corresponding hand won\n identity_list[row_col[0]][row_col[1]] += 1.0/winning_hands\n frequency[row_col[0]][row_col[1]] += 1\n\n # Turn the identity list into percentages using frequency list\n percentage_list = convert_to_percentages(identity_list, frequency)\n\n # Nicely print the results\n print_results(percentage_list)",
"def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg",
"def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg",
"def script_stats_analysis(self, data):\n anomal_chars = [r'^', r'{', r'}', r'\"', r',', r'<', r'>', ';']\n anomal_char_stats = {}\n char_stats = {\"upper\": 0, \"lower\": 0, \"numbers\": 0, \"symbols\": 0, \"spaces\": 0}\n anomalies = []\n c = Counter(data)\n anomaly_score = 0\n\n # Check the characters\n for char in c.most_common():\n if char[0] in anomal_chars:\n anomal_char_stats[char[0]] = char[1]\n if char[0].isupper():\n char_stats[\"upper\"] += char[1]\n elif char[0].islower():\n char_stats[\"lower\"] += char[1]\n elif char[0].isdigit():\n char_stats[\"numbers\"] += char[1]\n elif char[0].isspace():\n char_stats[\"spaces\"] += char[1]\n else:\n char_stats[\"symbols\"] += char[1]\n # Totals\n char_stats[\"total\"] = len(data)\n char_stats[\"alpha\"] = char_stats[\"upper\"] + char_stats[\"lower\"]\n\n # Detect Anomalies\n if char_stats[\"alpha\"] > 40 and char_stats[\"upper\"] > (char_stats[\"lower\"] * 0.9):\n anomalies.append(\"upper to lower ratio\")\n anomaly_score += 20\n if char_stats[\"symbols\"] > char_stats[\"alpha\"]:\n anomalies.append(\"more symbols than alphanum chars\")\n anomaly_score += 40\n for ac, count in anomal_char_stats.iteritems():\n if (count/char_stats[\"alpha\"]) > 0.05:\n anomalies.append(\"symbol count of '%s' very high\" % ac)\n anomaly_score += 40\n\n # Generate message\n message = \"Anomaly detected ANOMALIES: '{0}'\".format(\"', '\".join(anomalies))\n if anomaly_score > 40:\n return message, anomaly_score\n\n return \"\", 0",
"def _logging_smm(self, stats_obs, stats_sim):\n fname = 'monitoring.estimagic.smm.info'\n if self.num_evals == 1 and os.path.exists(fname):\n os.unlink(fname)\n\n with open(fname, 'a+') as outfile:\n\n fmt_ = '\\n\\n{:>8}{:>15}\\n\\n'\n outfile.write(fmt_.format('EVALUATION', self.num_evals))\n\n fmt_ = '{:>8}' + '{:>15}' * 4 + '\\n\\n'\n info = ['Moment', 'Observed', 'Simulated', 'Difference', 'Weight']\n outfile.write(fmt_.format(*info))\n\n for i, moment in enumerate(stats_obs):\n\n stat_obs, stat_sim = stats_obs[i], stats_sim[i]\n info = [i, stat_obs, stat_sim, abs(stat_obs - stat_sim), self.weighing_matrix[i, i]]\n\n fmt_ = '{:>8}' + '{:15.5f}' * 4 + '\\n'\n outfile.write(fmt_.format(*info))",
"def statistics_on_test(self, predicted_results, result):\n # Print confusion matrix and mean average precision score\n predicted_results_binary = self.predicted_results_to_binary(predicted_results)\n print(\"\\nConfusion matrix : \")\n print(confusion_matrix(result, predicted_results_binary))\n print(\"\\nAverage precision score : \", average_precision_score(result, predicted_results_binary))",
"def run_compare(self):\n misses = []\n for row in range(self.data.get_number_of_rows()):\n ref_sentence = self.data.get_row(row)[self.data.get_gold()]\n results = {}\n for team, team_sentence in self.data.get_row_for_teams(self.evaluator.get_teams(row), row).iteritems():\n results[team] = self.get_sentence_score(ref_sentence, team_sentence)\n misses.append(self.evaluator.compare_all(results, row))\n print np.median(misses), np.mean(misses)\n return np.median(misses)",
"def run_mcts(self):\n self.mcts.sigstop = False\n self.running_mcts = True\n\n self.mcts.search(max_time=self.max_time, c=self.exploration, verbose=True)\n\n self.running_mcts = False\n\n print(self.mcts.dump(self.mcts.root, 0, c=0))\n return self.get_top5()",
"def result_message_performance_metrics(file_path):\n if(os.path.isfile(os.path.join(file_path, \"layers_info_detailed.csv\"))) and\\\n (os.path.isfile(os.path.join(file_path, \"layers_output.csv\"))) or \\\n (os.path.isfile(os.path.join(file_path, \"model_inference.csv\"))):\n return 'Results have been generated successfully'\n else:\n return 'Some error while generating results'",
"def check(cls):\n vms = list(cls._vm_agents_for_host())\n\n large_overhead_vms = []\n swapping_vms = []\n total_guest_and_overhead = 0\n expected_guest_and_overhead = 0\n\n # individual VMs ok?\n for vm in vms:\n with vm:\n try:\n vm_mem = vm.qemu.proc().memory_full_info()\n except Exception:\n # It's likely that the process went away while we analyzed\n # it. Ignore.\n continue\n if vm_mem.swap > 1 * GiB:\n swapping_vms.append(vm)\n expected_size = (\n vm.cfg[\"memory\"] * MiB\n + 2 * vm.qemu.vm_expected_overhead * MiB\n )\n expected_guest_and_overhead += (\n vm.cfg[\"memory\"] * MiB + vm.qemu.vm_expected_overhead * MiB\n )\n total_guest_and_overhead += vm_mem.pss\n if vm_mem.pss > expected_size:\n large_overhead_vms.append(vm)\n\n output = []\n result = OK\n if large_overhead_vms:\n result = WARNING\n output.append(\n \"VMs with large overhead: \"\n + \",\".join(x.name for x in large_overhead_vms)\n )\n if swapping_vms:\n result = WARNING\n output.append(\n \"VMs swapping:\" + \",\".join(x.name for x in swapping_vms)\n )\n if total_guest_and_overhead > expected_guest_and_overhead:\n result = CRITICAL\n output.append(\"High total overhead\")\n\n if result is OK:\n output.insert(0, \"OK\")\n elif result is WARNING:\n output.insert(0, \"WARNING\")\n elif result is CRITICAL:\n output.insert(0, \"CRITICAL\")\n else:\n output.insert(0, \"UNKNOWN\")\n\n output.insert(1, \"{} VMs\".format(len(vms)))\n output.insert(\n 2, \"{:,.0f} MiB used\".format(total_guest_and_overhead / MiB)\n )\n output.insert(\n 3, \"{:,.0f} MiB expected\".format(expected_guest_and_overhead / MiB)\n )\n\n print(\" - \".join(output))\n\n return result",
"def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()",
"def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0",
"def find_active_mets(cons, sign=\"both\", colors=True):\n global pd\n if not pd:\n import pandas as pd\n\n def keysort(elem):\n \"\"\"Key function to sort the output\"\"\"\n if elem[1] == 0:\n return abs(float(elem[3][10:-10]))\n else:\n return abs(float(elem[3][10:-10])) / elem[1]\n\n def keysortbw(elem):\n if elem[1] == 0:\n return abs(float(elem[3]))\n else:\n return abs(float(elem[3])) / elem[1]\n\n # Call always after cons.runn()!\n if not hasattr(cons, \"output\"):\n print(\"Consortium must run before analyzing the output of the run!\")\n return\n\n if colors:\n colors = [\"\\033[1;32;40m\", \"\\033[1;31;40m\", \"\\033[0m\"]\n else:\n colors = [\"\", \"\", \"\"]\n\n sim_tsv = pd.read_csv(cons.output, sep=\"\\t\")\n nrow = sim_tsv.shape[0] - 1\n table_print = []\n for col in sim_tsv.columns:\n if col == \"time\":\n continue\n val0 = sim_tsv[col][0]\n valn = sim_tsv[col][nrow]\n increment = valn - val0\n if increment > 0 and sign != \"-\":\n increment = colors[0] + str(val0 - valn) + colors[2]\n elif increment < 0 and sign != \"+\":\n increment = colors[1] + str(val0 - valn) + colors[2]\n else:\n continue\n table_print.append((col, val0, valn, increment))\n if colors[0]:\n table_print = sorted(table_print, key=keysort, reverse=True)\n else:\n table_print = sorted(table_print, key=keysortbw, reverse=True)\n\n table_print = pd.DataFrame(\n table_print,\n columns=(\"Metabolite\", \"Initial Value\", \"Final Value\", \"Increment\"),\n )\n return table_print",
"def checkAccuracy():\n\tcmd = \"{}/bin/augustus --species={} {}/output/testSet.gb\"\\\n\t.format(path_to_aug, species, testfile)\n\te = subprocess.check_call(cmd, shell=True)",
"def show_current_scattering_statistics(self, out=sys.stdout):\n print(\"\", file=out)\n print(\"Model and map statistics:\", file=out)\n print(\" mean mFo map height @ carbon: %s\" % format_value(\"%.2f\",\n flex.max(self.carbon_fo_values)), file=out)\n if (self.calpha_mean_two_fofc > 0):\n print(\" mean 2mFo-DFc map height @ C-alpha: %s\" % format_value(\n \"%.2f\", self.calpha_mean_two_fofc), file=out)\n print(\" mean B-factor: %s\" % format_value(\"%.2f\", self.b_mean_all), file=out)\n if (self.b_mean_calpha > 0):\n print(\" mean C-alpha B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_calpha), file=out)\n print(\" mean water B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_hoh), file=out)\n n_water_fofc_peaks = 0\n n_water_anom_peaks = 0\n water_sel = self.water_selection()\n print(\" %d water molecules\" % len(water_sel), file=out)\n for i_seq in water_sel :\n map_stats = self.map_stats(i_seq)\n if (map_stats.fofc >= 3.0):\n n_water_fofc_peaks += 1\n if (map_stats.anom is not None) and (map_stats.anom >= 3.0):\n n_water_anom_peaks += 1\n print(\" %d waters have mFo-DFc map >= 3.0 sigma\" % \\\n n_water_fofc_peaks, file=out)\n if (self.anomalous_flag):\n print(\" %d waters have anomalous map >= 3.0 sigma\" % \\\n n_water_anom_peaks, file=out)\n print(\"\", file=out)",
"def log_summary(self, no_run_list):\n self.log_message('Entries not run' ,step='summary',status='start',name='config_file_reader')\n for name in no_run_list.keys():\n self.log_message('Did not run: '+name+', '+no_run_list[name],status='running')\n \n ret_total = 0\n for x in xrange(2):\n for ent in self.entries[x]:\n ret_total = ret_total + 0 if ent.return_val == None else ent.return_val\n self.log_message('Summary Complete, Run Time = ('+str(self.total_time)+')',status='complete')\n return ret_total",
"def stats(self):"
] | [
"0.56708354",
"0.5640992",
"0.56023943",
"0.55943125",
"0.5556258",
"0.5549421",
"0.5529195",
"0.5498868",
"0.54936326",
"0.5493021",
"0.5483865",
"0.5482603",
"0.5474185",
"0.5457",
"0.5450003",
"0.5450003",
"0.54043716",
"0.53940886",
"0.5369571",
"0.53473234",
"0.5343286",
"0.534284",
"0.5330812",
"0.5328513",
"0.532386",
"0.5322905",
"0.5316846",
"0.53024495",
"0.5300122",
"0.52967465"
] | 0.6516957 | 0 |
Adding existing user to project. Already has role. Should 'complete' anyway but do nothing. | def test_add_user_existing_with_role(self):
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
assignment = fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="member",
user={"id": user.id},
)
setup_identity_cache(
projects=[project], users=[user], role_assignments=[assignment]
)
url = "/v1/actions/InviteUser"
headers = {
"project_name": "test_project",
"project_id": project.id,
"roles": "project_admin,member,project_mod",
"username": "[email protected]",
"user_id": "test_user_id",
"authenticated": True,
}
data = {
"email": "[email protected]",
"roles": ["member"],
"project_id": project.id,
}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]})
tasks = Task.objects.all()
self.assertEqual(1, len(tasks))
self.assertTrue(tasks[0].completed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_user(username, password, email, role_type, curr_username, user_role, request_ip):\n if user_connector.check_username_availability(username):\n user_connector.add_user(username, password, email, role_type)\n log_connector.add_log('ADD USER', \"Added new user: {}\".format(username), curr_username, user_role, request_ip)\n return True\n log_connector.add_log('ADD USER FAIL', \"Failed to add user: {}\".format(username), curr_username, user_role, request_ip)\n return False",
"def add_user(self, REQUEST):\n\n role_id = REQUEST.form['role_id']\n country_code = role_id.rsplit('-', 1)[-1]\n user_id = REQUEST.form['user_id']\n agent = self._get_ldap_agent()\n\n if not self._allowed(agent, REQUEST, country_code):\n return None\n if not nfp_can_change_user(self, user_id, no_org=False):\n # This means somebody is manipulating the DOM in order to\n # add a user that belongs to an organisation from another\n # country (the button doesn't normally appear)\n return None\n\n with agent.new_action():\n role_id_list = agent.add_to_role(role_id, 'user', user_id)\n\n role_msg = get_role_name(agent, role_id)\n msg = \"User %r added to role %s. \\n\" % (user_id, role_msg)\n\n # for Eionet Groups roles only, test if the added user is member of a\n # national organisation\n\n if self.is_eionet_group(role_id):\n if not get_national_org(agent, user_id, role_id):\n msg += (\n \"The user you want to add to an Eionet Group does not\"\n \" have a mandatory reference to an organisation for \"\n \"your country. Please corect!\")\n\n IStatusMessage(REQUEST).add(msg, type='info')\n\n log.info(\"%s ADDED USER %r TO ROLE %r\",\n logged_in_user(REQUEST), user_id, role_id_list)\n\n if '-awp-' in role_id:\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/awps?nfp=%s#role_%s' %\n (country_code, role_id))\n\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/nrcs?nfp=%s#role_%s' %\n (country_code, role_id))",
"def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)",
"def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)",
"def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))",
"def add(\n new_user: schemas.UserCreate,\n db_session: Session = Depends(get_db),\n current_user: models.User = Depends(get_current_admin_user)\n):\n db_user = crud.get_by_email(db_session, new_user.email)\n\n if db_user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail='The user with this email already exists in the system.'\n )\n\n return crud.create(db_session, new_user)",
"def view_add_user(self, user, username, password):\r\n user.realm._checker.addUser(username, password)",
"def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))",
"def test_020_add_user_to_group(self):\n testflow.step(\"Adding user %s to group %s\", TEST_USER1, TEST_GROUP1)\n assert MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to add user to group '%s'\" % TEST_GROUP1\n\n testflow.step(\"Adding nonexisting user to group %s\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to add nonexisting user to group\"\n\n testflow.step(\"Adding user %s to nonexisting group\", TEST_USER2)\n assert not MANAGE_CLI.run(\n 'useradd',\n 'nonsense',\n user=TEST_USER2\n )[0], \"Possible to add user to nonexisting group\"",
"def addUser(User):\n # check if user already exists #\n user_exists = run('id -u %s >/dev/null 2>&1 || echo \"no\"' % (User))\n if user_exists == \"no\":\n sudo('useradd -m -c \"%s\" -s /bin/bash %s' % (agUsers[User], User))\n else:\n print \"[Info] User '%s' already exists on host '%s'\" % (User, env.host_string)",
"def add_user(self, firstname, lastname, email, username, password, role):\n\n new_user = {\n \"id\": len(self.db) + 1,\n \"firstname\": firstname,\n \"lastname\": lastname,\n \"email\": email,\n \"username\": username,\n \"password\": password,\n \"role\": role\n }\n\n ALL_USERS.append(new_user)",
"def add_user(self, username, email, password):\n\n new_user = User(username, email, password)\n new_user_details = new_user.get_details()\n for user in self.users:\n if new_user_details['email'] == user['email']:\n return 'User already exists'\n else:\n new_user_details['id'] = len(self.users)\n self.users.append(new_user_details)\n return 'Account created. You can now log in'",
"def test_add_user_existing(self):\n project = fake_clients.FakeProject(name=\"parent_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def add_user(self, role, emp_name, username, status, password):\n Log.info(\"Start to add user.\")\n self.click(self.user_add_btn)\n self.wait_unit_el_present(self.add_user_form)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(emp_name, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"New user is added.\")",
"def add_user(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\r\n\t\ttry:\r\n\t\t\tlogin.add_user(u, p)\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, e.message\r\n\t\t\t\r\n\t\tlogger.debug(\"Exiting - success\")\r\n\t\treturn True, \"%s has been added.\" % u",
"def add_user(self, user, role=OrganizationUserRole.MEMBER):\n users_count = self.users.all().count()\n if users_count == 0:\n role = OrganizationUserRole.OWNER\n org_user = self._org_user_model.objects.create(\n user=user, organization=self, role=role\n )\n if users_count == 0:\n self._org_owner_model.objects.create(\n organization=self, organization_user=org_user\n )\n\n # User added signal\n user_added.send(sender=self, user=user)\n return org_user",
"async def add_user(self, user):\n user_exists = await maybe_future(self.system_user_exists(user))\n\n if not user_exists:\n if self.create_system_users:\n await maybe_future(self.add_system_user(user))\n dir = Path('/home/' + user.name + '/examples')\n print('add user',user.name)\n if not dir.exists():\n print('adding user',user.name)\n subprocess.check_call(['cp', '-r', '/srv/ipython/examples', '/home/' + user.name + '/examples'])\n subprocess.check_call(['chown', '-R', user.name, '/home/' + user.name + '/examples'])\n else:\n raise KeyError(\"User %s does not exist.\" % user.name)\n\n await maybe_future(super().add_user(user))",
"def add_user_to_role(request, username_or_email, role, group_title, event_name):\r\n username_or_email = strip_if_string(username_or_email)\r\n try:\r\n user = _user_from_name_or_email(username_or_email)\r\n except User.DoesNotExist:\r\n return u'<font color=\"red\">Error: unknown username or email \"{0}\"</font>'.format(username_or_email)\r\n\r\n role.add_users(user)\r\n\r\n # Deal with historical event names\r\n if event_name in ('staff', 'beta-tester'):\r\n track.views.server_track(\r\n request,\r\n \"add-or-remove-user-group\",\r\n {\r\n \"event_name\": event_name,\r\n \"user\": unicode(user),\r\n \"event\": \"add\"\r\n },\r\n page=\"idashboard\"\r\n )\r\n else:\r\n track.views.server_track(request, \"add-instructor\", {\"instructor\": unicode(user)}, page=\"idashboard\")\r\n\r\n return '<font color=\"green\">Added {0} to {1}</font>'.format(user, group_title)",
"def add_role_to_user(self, user, role):\n user, role = self._prepare_role_modify_args(user, role)\n if role not in user.roles:\n user.roles.append(role)\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return False",
"def add_user(self):\n user = models.User(email=self.test_user,\n password=generate_password_hash(self.test_user_password))\n user.add()",
"def add_user(self, user: User):\n raise NotImplementedError",
"def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}",
"def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False",
"def add_user_to_project(tas_project, user_ref):\n keycloak_client = KeycloakClient()\n user = get_user(user_ref)\n keycloak_client.update_membership(tas_project.chargeCode, user.username, \"add\")\n\n # Check if this is the first time joining an allocation\n kc_user = keycloak_client.get_user_by_username(user.username)\n if not kc_user[\"attributes\"].get(UserAttributes.LIFECYCLE_ALLOCATION_JOINED):\n keycloak_client.update_user(\n user.username, lifecycle_allocation_joined=datetime.now()\n )\n\n return True",
"async def add_user(self, user_id) -> None:\n # await self.conn.execute(\n # \"INSERT INTO tg_users(userid) VALUES $1 ON CONFLICT DO NOTHING\",\n # user_id,\n # )\n return",
"def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass",
"def test_add_user_to_thread(self):\n thread = self.create_thread()\n user = self.create_user()\n thread.add_user_to_thread(user)\n self.assertTrue(\n UserThread.objects.filter(thread=thread, user=user).exists())",
"def add_user(self, first_name: str = None, last_name: str = None,\n role: int = UsersRole.CANDIDATE) -> Optional[Users]:\n if isinstance(role, UsersRole):\n role = role.value\n\n try:\n new_candidate = Users(first_name=first_name, last_name=last_name, role=role)\n self.session.add(new_candidate)\n self.session.commit()\n\n return new_candidate\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t add new candidate: {excpt}')\n\n return None",
"def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user",
"def testAddExists(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userB\", \"password\"))\n self.assertEquals(models.ERR_USER_EXISTS, self.users.add(\"userB\", \"password\"))"
] | [
"0.71821344",
"0.7031617",
"0.7004944",
"0.6919425",
"0.68790615",
"0.6856563",
"0.6812726",
"0.6791513",
"0.67497045",
"0.6730467",
"0.6708298",
"0.6707417",
"0.66984934",
"0.66916555",
"0.6689092",
"0.667523",
"0.6650772",
"0.6639229",
"0.6618744",
"0.6616759",
"0.6614431",
"0.6592097",
"0.6554203",
"0.6551104",
"0.6544456",
"0.6531416",
"0.6513977",
"0.6469192",
"0.6442235",
"0.64403534"
] | 0.71185714 | 1 |
Ensures that when a project becomes invalid at the submit stage that the a 400 is recieved and no final emails are sent. | def test_new_project_invalid_on_submit(self):
setup_identity_cache()
url = "/v1/actions/CreateProjectAndUser"
data = {"project_name": "test_project", "email": "[email protected]"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
headers = {
"project_name": "test_project",
"project_id": "test_project_id",
"roles": "admin,member",
"username": "[email protected]",
"user_id": "test_user_id",
"authenticated": True,
}
new_task = Task.objects.all()[0]
url = "/v1/tasks/" + new_task.uuid
response = self.client.post(
url, {"approved": True}, format="json", headers=headers
)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.data, {"notes": ["created token"]})
self.assertEqual(len(mail.outbox), 3)
fake_clients.identity_cache["projects"] = {}
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {"password": "testpassword"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(len(mail.outbox), 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invalid_project_tye_msg(proj_type):\n return {\"error\": f\"Project type {proj_type} is not valid, please use one of the following: \"\n f\"{', '.join(project_types)}\"}, 400",
"def test_pypi_not_200(self):\n form = self._get_form()\n self.assertFalse(self._validate_form(form, mock_status_code=303))\n self.assertTrue('name' in form.errors)",
"def test_create_invalid_email(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': 'NOT_AN_EMAIL!',\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)",
"def run_project_checks(sub: Submission, logger):\n\n project = sub.project\n codes = []\n found_submitter = False\n found_submitter_details = False\n\n # Contacts\n if not project.contacts:\n logger.error(\"No contacts found. At least one contact must be included.\")\n codes.append(\"PROJ-E01\")\n else:\n # Roles\n role_term = ontology_term(\"role\")\n allowed_roles = get_term_descendants(role_term[\"ontology\"], role_term[\"uri\"], logger)\n for i, c in enumerate(project.contacts):\n if c.roles:\n for r in c.roles:\n role_value = r.lower().rstrip()\n if role_value not in allowed_roles:\n logger.warning(\"Contact role \\\"{}\\\" is not an allowed term.\".format(role_value))\n codes.append(\"PROJ-E05\")\n elif role_value == \"submitter\":\n found_submitter = True\n if c.email and c.affiliation:\n found_submitter_details = True\n if not c.lastName:\n logger.error(\"A contact must have last name specified: {}.\".format(c))\n codes.append(\"PROJ-E02\")\n # At least one contact must have role \"submitter\"\n if not found_submitter:\n logger.error(\"At least one contact must have role \\\"submitter\\\".\")\n codes.append(\"PROJ-E03\")\n # At least one submitter contact needs email and affiliation\n if not found_submitter_details:\n logger.error(\"At least one contact with role \\\"submitter\\\" must have email and affiliation specified.\")\n codes.append(\"PROJ-E04\")\n\n # Format of PubMed ID and DOI\n if project.publications:\n for pub in project.publications:\n if pub.pubmedId:\n try:\n int(pub.pubmedId)\n except ValueError:\n logger.error(\"PubMed ID must be numerical. Got \\\"{}\\\".\".format(pub.pubmedId))\n codes.append(\"PROJ-E06\")\n if pub.doi:\n if not REGEX_DOI_FORMAT.match(pub.doi.rstrip()):\n logger.error(\"Publication DOI \\\"{}\\\" does not match expected pattern.\".format(pub.doi))\n codes.append(\"PROJ-E07\")\n\n # Release date\n if project.releaseDate:\n if not REGEX_DATE_FORMAT.match(project.releaseDate):\n logger.error(\"Release date \\\"{}\\\" is not in YYYY-MM-DD format.\".format(project.releaseDate))\n codes.append(\"PROJ-E09\")\n else:\n logger.error(\"No release date found. Project must have release date specified.\")\n codes.append(\"PROJ-E08\")\n\n return codes",
"def _validate(self, backend: str, active_account: str, project_id: str):\n\n if not re.match(r'[a-z][a-z0-9\\-]{5,29}', project_id):\n raise ValueError(\n ('Invalid Google Cloud Platform Project ID \"{}\": '\n 'must be between 6 and 30 characters and contain '\n 'lowercase letters, digits or hyphens').format(project_id))\n\n if not self.project_client.project_exists(project_id):\n raise ValueError('Project {} does not exist'.format(project_id))\n\n if not self._has_correct_permissions(backend, project_id,\n active_account):\n msg = 'User has incorrect permissions to deploy.'\n if backend == 'gae':\n msg = 'User must be a Project Owner to deploy on GAE'\n elif backend == 'gke':\n msg = ('User does not have correct permissions'\n 'to deploy on GKE')\n raise ValueError(msg)",
"def test_form_when_project_is_awaiting_approval(self):\n form = ProjectUserMembershipCreationForm(\n initial={\n 'user': self.project_applicant,\n },\n data={\n 'project_code': self.project_code,\n },\n )\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['project_code'],\n ['The project is currently awaiting approval.'],\n )",
"def test_pypi_500(self):\n form = self._get_form()\n self.assertFalse(self._validate_form(form, mock_status_code=500))\n self.assertTrue('name' in form.errors)",
"def test_invalid_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={\"invalid_field\": \"value\"},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code",
"def test_request_membership_form_with_an_invalid_project_id(self):\n pass",
"def test_form_with_an_invalid_project_code(self):\n self.approve_project(self.project)\n\n form = ProjectUserMembershipCreationForm(\n initial={\n 'user': self.project_applicant,\n },\n data={\n 'project_code': 'invalid-project-code',\n },\n )\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['project_code'],\n ['Invalid Project Code.'],\n )",
"def test_bad_email_is_rejected(self):\n self.updated_data['email'] = ''\n self.update_user()\n\n # email should be left as original email address\n self.assertEqual(self.user.email, self.updated_data['email'])\n # And we should get a HTTP 400 with error code.\n self.assertEqual(self.response.status_code, 400)\n # Eventually this needs to return a custom error instead of blank\n # For now the blank error will do\n self.assertEqual(\n self.response.data['email'],\n [ErrorDetail(string='This field may not be blank.', code='blank')]\n )",
"def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)",
"def submission_mark_as_failed(request, submission_pk):\n if request.method == \"POST\":\n try:\n submission = models.CompetitionSubmission.objects.get(pk=submission_pk)\n competition = submission.phase.competition\n if request.user.id != competition.creator_id and request.user not in competition.admins.all():\n raise Http404()\n submission.status = models.CompetitionSubmissionStatus.objects.get(codename=\"failed\")\n submission.save()\n return HttpResponse()\n except models.CompetitionSubmission.DoesNotExist:\n raise Http404()\n raise Http404()",
"def test_activate_form_bad(self):\r\n res = self.testapp.post(\r\n '/api/v1/suspend',\r\n content_type='application/json',\r\n status=406)\r\n success = json.loads(res.body)['error']\r\n self.assertTrue(\r\n success is not None,\r\n \"Should not be successful with no email address: \" + str(res))\r\n\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': '[email protected]'},\r\n status=404)\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'error' in success,\r\n \"Should not be successful with invalid email address: \" + str(res))",
"def test_bad_data(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"wrong_email_field\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.json(), {\"errors\": {\"email\": [\"This field is required.\"]}}\n )\n\n data = {\n \"email\": \"not_a_valid_email\",\n \"roles\": [\"not_a_valid_role\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.json(),\n {\n \"errors\": {\n \"email\": [\"Enter a valid email address.\"],\n \"roles\": ['\"not_a_valid_role\" is not a valid choice.'],\n }\n },\n )",
"def test_create_invalid_submission(self):\n with self.client:\n # invalid submission registration\n sub_response = register_illegal_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['errors']!=None)",
"def test_missing_project(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)\n self.assertEqual(task.project, 'Not assigned to project')",
"def patch(self):\n try:\n MessageService.resend_email_validation(token_auth.current_user())\n return {\"Success\": \"Verification email resent\"}, 200\n except ValueError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400",
"def test_not_authed_nonpublic_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=2).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=2, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_redirects(response, reverse('pontoon.home'))\n assert_equal(self.client.session['translate_error'], {'redirect': '/fakelocale/valid-project/'})",
"def test_returns_projects_with_tasks_to_validate_if_action_set_to_validate(self):\n # Arrange\n self.test_project_2.private = False\n # Since test_author is BEGINNER, they can only validate projects with validation permission ANY.\n self.test_project_1.validation_permission = ValidationPermission.ANY.value\n self.test_project_1.save()\n self.test_project_2.validation_permission = ValidationPermission.ANY.value\n self.test_project_2.save()\n # Reset all tasks of test_project_2 so that there are no tasks ready to validate.\n MappingService.map_all_tasks(self.test_project_2.id, self.test_author.id)\n ValidatorService.validate_all_tasks(self.test_project_2.id, self.test_author.id)\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"action\": \"validate\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # Test_project_2 has no tasks to validate, it should not be returned even when user has permsiion to validate.\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )",
"def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)",
"def test_bad_email(self):\n user = self.make_user()\n data = {\"email\": \"bogus\"}\n\n with self.login(user):\n response = self.post(\"referrals:create\", data=data)\n\n assert response.status_code == 302\n assert response[\"Location\"] == self.reverse(\"settings:dashboard\")\n message = list(get_messages(response.wsgi_request))[0]\n assert str(message) == \"'bogus' is an invalid email address.\"",
"def test_invalid_project(self):\n LocaleFactory.create(code='fakelocale')\n\n response = self.client.get('/fakelocale/invalid-project/')\n assert_redirects(response, reverse('pontoon.home'))\n assert_equal(self.client.session['translate_error'], {'none': None})",
"def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')",
"def test_invalid_project_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\n 'message': \"Project names must start with a letter, followed by any number of letters, digits, '-' or '_'.\",\n 'status': \"error\"\n },\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('123', 'description')",
"def form_invalid(self, form):\n response = super().form_invalid(form)\n if self.request.is_turbo:\n response.status_code = 422 # Unprocessable Entity\n return response",
"def send_mail_when_failed(self, body):\r\n pass",
"def test_project_create_cant_edit_users(self):\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertFalse(all([act.valid for act in actions]))",
"def test_empty_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code",
"def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)"
] | [
"0.6262488",
"0.622149",
"0.6171945",
"0.6151178",
"0.6124826",
"0.60645145",
"0.60137534",
"0.59481496",
"0.5908061",
"0.5797568",
"0.5786521",
"0.573891",
"0.57307863",
"0.57101095",
"0.5647287",
"0.5620445",
"0.5609429",
"0.559986",
"0.559503",
"0.5592268",
"0.5590574",
"0.55566585",
"0.5551501",
"0.55297345",
"0.5520604",
"0.55146617",
"0.54967195",
"0.54940546",
"0.5484289",
"0.54717946"
] | 0.6476846 | 0 |
Project created if not present, existing user attached. No token should be needed. | def test_new_project_existing_user(self):
user = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
setup_identity_cache(users=[user])
# unauthenticated sign up as existing user
url = "/v1/actions/CreateProjectAndUser"
data = {"project_name": "test_project", "email": user.email}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
# approve the sign-up as admin
headers = {
"project_name": "admin_project",
"project_id": "admin_project_id",
"roles": "admin,member",
"username": "admin",
"user_id": "admin_id",
"authenticated": True,
}
new_task = Task.objects.all()[0]
url = "/v1/tasks/" + new_task.uuid
response = self.client.post(
url, {"approved": True}, format="json", headers=headers
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"notes": ["Task completed successfully."]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")",
"def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)",
"def test_create_project_unknown_user(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': INVALID_UUID,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]",
"def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']",
"def test_new_project_invalid_on_submit(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"created token\"]})\n self.assertEqual(len(mail.outbox), 3)\n\n fake_clients.identity_cache[\"projects\"] = {}\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(mail.outbox), 3)",
"def _create_main_project_and_root(self): \n if len(ComicSite.objects.filter(short_name=settings.MAIN_PROJECT_NAME)) == 0:\n main = ComicSite.objects.create(short_name=settings.MAIN_PROJECT_NAME,\n description=\"main project, autocreated by comicframeworkTestCase._create_inital_project()\",\n skin=\"fakeskin.css\"\n )\n \n main.save()\n \n try:\n self.root = User.objects.get(username='root')\n except ObjectDoesNotExist:\n # A user who has created a project\n root = User.objects.create_user('root',\n '[email protected]',\n 'testpassword') \n root.is_staff = True\n root.is_superuser = True\n root.save()\n \n self.root = root\n\n call_command('check_permissions')",
"def project_create(project):\n client.project.create(project)",
"def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))",
"def create_project_if_necessary(ctx, org_name, project_name, ):\n org = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n pprint(cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=org.id))",
"def create_projects(self):\n if self.gl is None or self.config is None:\n print(\"No config/Gitlab found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Project creation.\")\n gl = self.gl\n config = self.config\n for project in config[\"projects\"]:\n # get the import url\n imp_url = config[\"projects\"][project][\"import_url\"]\n\n # Set rights/members/protected master\n if config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"all_users\":\n for user in self.users:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"user\":\n for user in self.users:\n if user.username == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'Access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"group\":\n for group in self.groups:\n if group.name == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for group \\'\" + group.name + \"\\'\")\n pj = group.projects.create({'name': project,\n 'namespace_id': group.id,\n 'import_url': imp_url})\n else:\n print(\"Project owner Config is wrong, aborting\")\n exit(1)\n # Delete protected Master Branch\n if config[\"projects\"][project][\"protect_master_branch\"] == \"False\":\n print(\"Removing Project master Branch protection\")\n pj.protectedbranches.delete('master')",
"def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)",
"def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())",
"def create_keystone_v3_project(self, **kwargs):\n LOG_OBJ.debug(\"Creating the project.\")\n print self.project_info\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"project\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"enabled\", \"disabled\"]:\n try:\n _project_info['project'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Project details : %s \" % output)\n print (\"Project details : %s \" % output)\n return output['project']['id']",
"def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response",
"def test_create_project_request(self):\n pass",
"def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))",
"def test_create_project(client, session, tokens):\n response = client.post(\n \"/projects\",\n json={\n \"name\": \"New Project\",\n \"organizations\": [],\n \"teams\": [],\n \"users\": [],\n },\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n project_id = response.json[\"id\"]\n assert Project.query.filter(Project.id == project_id).count() == 1",
"def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p",
"def testSessionCreate(self):\n success = False\n project = None\n\n try:\n project = self.session.create_project()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(project is None)",
"def create_keystone_v3_project_user(self, domain_name, domain_role,\n project_details, set_context=True):\n domain_id = self.get_keystone_v3_domain_id(domain_name)\n if not isinstance(domain_id, unicode):\n err_msg = (\"Get domain id is failed with reason %s\" % domain_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # Creation of project\n kwargs = {\"name\": project_details['project_name'],\n \"domain_id\": domain_id}\n project_id = self.create_keystone_v3_project(**kwargs)\n if not isinstance(project_id, unicode):\n err_msg = (\"Project creation failed with reason %s\" % project_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # creation of user with adding roles.\n user_id = self.create_keystone_v3_user_and_add_roles(\n project_details, domain_id, domain_role, project_id)\n if not isinstance(user_id, unicode):\n err_msg = (\"Problem while creating user and assigning role.\"\n \"Reason %s\" % user_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # Set the context to that of this new user of the tenant.\n if set_context:\n tokens = []\n for token_scope in [\"domain\", \"project\"]:\n token = self.get_keystone_v3_token(\n project_details['project_name'], domain_name,\n project_details['user_name'], project_details['password'],\n scope=token_scope)\n # NOTE: The token id is of type str not unicode, in v3 case.\n if not isinstance(token, str):\n err_msg = (\"Get v3 user token is failed with \"\n \"reason %s\" % token)\n LOG_OBJ.error(err_msg)\n return err_msg\n tokens.append(token)\n # Set the token\n self.set_tenant_info(project_details['project_name'], tokens[0],\n tokens[1], project_id)\n return project_id",
"def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)",
"def create_user(ctx, db_username, db_password, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n user = cmd.ensure_admin_user(\n client=ctx.obj, project_id=project.id, username=db_username,\n password=db_password)\n pprint(user)"
] | [
"0.7299471",
"0.7234782",
"0.7076508",
"0.7017567",
"0.6992709",
"0.69262415",
"0.68911916",
"0.6865844",
"0.6858165",
"0.68420464",
"0.6831582",
"0.6794033",
"0.6789412",
"0.6783985",
"0.67498165",
"0.6747258",
"0.6734577",
"0.6734577",
"0.6734577",
"0.6723486",
"0.6712834",
"0.6691413",
"0.6671751",
"0.6661085",
"0.66395086",
"0.66341025",
"0.6615456",
"0.661066",
"0.6545803",
"0.65374726"
] | 0.75843513 | 0 |
CreateProjectAndUser should create a notification. We should be able to grab it. | def test_notification_CreateProjectAndUser(self):
setup_identity_cache()
url = "/v1/actions/CreateProjectAndUser"
data = {"project_name": "test_project", "email": "[email protected]"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
new_task = Task.objects.all()[0]
headers = {
"project_name": "test_project",
"project_id": "test_project_id",
"roles": "admin,member",
"username": "[email protected]",
"user_id": "test_user_id",
"authenticated": True,
}
url = "/v1/notifications"
response = self.client.get(url, headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["notifications"][0]["task"], new_task.uuid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_successfully_create_notifications(self):\n pre_function_notifications = Notification.objects.all()\n self.assertEqual(len(pre_function_notifications), 0)\n\n create_notification(\n user=self.user_with_targets,\n title=\"Hi.\",\n body=\"Hello there, friend.\")\n\n post_function_notifications = Notification.objects.all()\n self.assertEqual(len(post_function_notifications), 1)",
"def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def test_create_and_retrieve_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message = 'Welcome to the eSports Competition'\n new_notification_category = 'Information'\n post_response = create_notification(\n client,\n new_notification_message,\n 15,\n new_notification_category)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n # The notification should have created a new notification category as well\n assert NotificationCategory.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n assert post_response_data['message'] == new_notification_message\n\n new_notification_url = post_response_data['url']\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['message'] == new_notification_message\n assert get_response_data['notification_category']['name'] == \\\n new_notification_category",
"def create_notification(self, user_id, message, notification_type):\n dao = NotificationsDAO()\n try:\n notif_id = dao.create_notification(user_id, message, notification_type)\n return jsonify(NotificationID=notif_id), 200\n except:\n return jsonify(Error=\"Error processing, query.\"), 400",
"def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def test_new_project_existing_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n # unauthenticated sign up as existing user\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": user.email}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # approve the sign-up as admin\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json(), {\"notes\": [\"Task completed successfully.\"]})",
"def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())",
"def create_notification(self, text: str, user_id: str, target_id: str, target_type: NotificationTargetType, *args, **kwargs):\n\n notification_data = api.create_notification(\n text, \n user_id, \n target_id,\n target_type,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Notification(notification_data)",
"def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)",
"def test_create_project_request(self):\n pass",
"def create_pubsub_notification(context, depends_on, status_string):\n\n return [{\n 'name': 'pubsub-notification-{}'.format(status_string),\n 'action': 'gcp-types/pubsub-v1:pubsub.projects.topics.publish',\n 'properties': {\n 'topic':\n context.properties['pubsubTopic'],\n 'messages': [{\n 'attributes': {\n 'projectId': context.properties['projectId'],\n 'status': status_string,\n }\n }]\n },\n 'metadata': {\n # The notification should only run after *all* project-related\n # resources have been deployed.\n 'dependsOn': depends_on,\n # Only trigger the pubsub message when the deployment is created (not on\n # update or delete).\n 'runtimePolicy': ['UPDATE_ALWAYS'],\n },\n }]",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def create_notification(self, new_notification: dict):\n bucket_name = new_notification['bucket']\n\n if self.gcp_env.project != new_notification['topic_project']:\n _logger.error(f'Notification project mismatch.')\n sys.exit(1)\n\n # create notification\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n notification = bucket.notification(\n topic_name=new_notification['topic_name'],\n topic_project=new_notification['topic_project'],\n custom_attributes=None,\n event_types=new_notification['event_types'],\n blob_name_prefix=new_notification['object_name_prefix'],\n payload_format=new_notification['payload_format'],\n notification_id=None,\n )\n\n notification.create(client=client)\n return notification",
"def test_create_project_unknown_user(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': INVALID_UUID,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)",
"def create_notification(self, notifying_href, notifying_action, notified_href, owner):\n if self.id == owner.id:\n return\n new_notification = Notification()\n new_notification.eid = make_uuid()\n new_notification.notifier = self\n new_notification.notifying_href = notifying_href\n new_notification.notifying_action = notifying_action\n new_notification.notified_href = notified_href\n new_notification.owner = owner\n new_notification.save()",
"def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')",
"def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_topic_notification_create_has_access(self):\n TopicNotification.objects.all().delete()\n private = utils.create_private_topic(user=self.user)\n utils.create_comment(topic=private.topic)\n\n utils.login(self)\n form_data = {'is_active': True, }\n response = self.client.post(\n reverse(\n 'spirit:topic:notification:create',\n kwargs={'topic_id': private.topic.pk, }),\n form_data)\n self.assertRedirects(\n response, private.topic.get_absolute_url(), status_code=302)\n self.assertEqual(len(TopicNotification.objects.all()), 1)",
"def test_create_owner(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_OWNER,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)",
"def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now,\n date_queued=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')",
"def create_notification(request):\n input_json = request\n try:\n create_notification_params = dict(zip(['type_id', 'distribution_type_id',\n 'notification_text', 'redirection_url', 'notification_status',\n 'notifier_profile_id', 'notified_profile_id', 'algorithm_id', 'comments',\n 'added_by', 'last_modified_by'],\n [input_json['type_id'], input_json['distribution_type_id'],\n input_json['notification_text'], input_json['redirection_url'],\n 1, input_json['notifier_profile_id'], input_json['notified_profile_id'],\n input_json['algorithm_id'], input_json['comments'],\n input_json['profile_id'], input_json['profile_id']]))\n serializer_var = serializer_save(SuperNotificationsSerializer, create_notification_params)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n ['Success', 'Notification was created successfully', serializer_var.data]))\n return output_json\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to create Notification.{ex}', None]))\n return output_json",
"def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)",
"async def create(self, ctx, name: str,\n owner: discord.Member = None) -> discord.Message:\n if ctx.projects.find_project(name):\n project = ctx.projects.find_project(name)\n if ctx.guild.get_Channel(int(project.get(\"channel\"))):\n return await ctx.send(\"A project with that name exists.\")\n else:\n await ctx.send(\"A project with this name exists but, a related\"\n \" project channel was not found. \"\n \"I will be overwriting the previous project.\")\n ctx.projects.delete_project(name)\n\n owner = owner if owner else ctx.author\n if not ctx.bot.db(\"guilds\").find(str(ctx.guild.id)):\n ctx.bot.db(\"guilds\").insert(str(ctx.guild.id), ctx.bot.empty_guild)\n\n # await ctx.send(\"Creating project channel...\")\n if not ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\"):\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n ctx.me: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True,\n manage_channels=True\n )\n }\n category = await ctx.guild.create_category(\"Flux Projects\",\n overwrites=overwrites)\n ctx.bot.db(\"guilds\").update(str(ctx.guild.id), {\n \"project_category\": str(category.id)})\n\n else:\n category = ctx.guild.get_channel(\n int(ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\")))\n\n overwrites = {owner: discord.PermissionOverwrite(read_messages=True,\n send_messages=False,\n add_reactions=True),\n ctx.me: discord.PermissionOverwrite(read_messages=True,\n send_messages=True),\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False)}\n\n channel = await ctx.guild.create_text_channel(f\"{name}-project\",\n category=category,\n overwrites=overwrites)\n await channel.send(f\"Project Owner: {owner}\")\n message = await channel.send(self.empty_progress_bar)\n await message.pin()\n res = ctx.projects.create_project(\n owner.id, owner.id, name, channel.id, message.id)\n if not res:\n return await ctx.send(\"An error has occurred. Use `.contact`\"\n \" with error: `ERR_PROJECT_STILL_EXISTS`\")\n return await ctx.send(\"Project created!\")",
"def test_topic_notification_create_maybe(self):\n user = utils.create_user()\n topic = utils.create_topic(self.category)\n comment = utils.create_comment(topic=topic)\n TopicNotification.create_maybe(user=user, comment=comment)\n notification = TopicNotification.objects.get(user=user, topic=topic)\n self.assertTrue(notification.is_active)\n self.assertTrue(notification.is_read)\n self.assertEqual(notification.action, COMMENT)\n\n # Creating it again should do nothing\n (TopicNotification.objects\n .filter(user=user, topic=topic)\n .update(is_active=False))\n TopicNotification.create_maybe(user=user, comment=comment)\n self.assertFalse(\n TopicNotification.objects.get(user=user, topic=topic).is_active)",
"def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]",
"def test_notify_user(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users([self.user_a], foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 1)",
"def test_new_project_invalid_on_submit(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"created token\"]})\n self.assertEqual(len(mail.outbox), 3)\n\n fake_clients.identity_cache[\"projects\"] = {}\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(mail.outbox), 3)",
"def test_duplicate_tasks_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\"project_name\": \"test_project_2\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)"
] | [
"0.6984443",
"0.6352937",
"0.62960654",
"0.62762576",
"0.6200471",
"0.6186733",
"0.6161037",
"0.6132949",
"0.613171",
"0.6121216",
"0.61175996",
"0.6106454",
"0.6106454",
"0.6106454",
"0.6099714",
"0.6092147",
"0.60814804",
"0.6060918",
"0.60601914",
"0.6047661",
"0.6045657",
"0.6041352",
"0.60290015",
"0.6021321",
"0.5992566",
"0.59889764",
"0.5982218",
"0.5977717",
"0.5967081",
"0.5957213"
] | 0.8151886 | 0 |
Ensure the update email workflow goes as expected. Create task, create token, submit token. | def test_update_email_task(self):
user = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
"project_name": "test_project",
"project_id": "test_project_id",
"roles": "project_admin,member,project_mod",
"username": "[email protected]",
"user_id": user.id,
"authenticated": True,
}
data = {"new_email": "[email protected]"}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]})
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {"confirm": True}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.name, "[email protected]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_email_task_send_email_current_name_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"nkdfslnkls\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"nkdfslnkls\",\n \"user_id\": user.id,\n \"authenticated\": True,\n \"email\": \"[email protected]\",\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_update_email_task_send_email_to_current_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": user.id,\n \"authenticated\": True,\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.name, \"[email protected]\")\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_update_user_endpoint_new_email(self):\n print(\"Generate a new email and check if email is not allocated\")\n email_id = Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"])\n kwargs = {'email_id': email_id, 'return_response_obj': True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n response = self.test_check_email_endpoint(**kwargs)\n assert json.loads(response.text)[\"data\"][\"available\"] is True, \"Unable to generate a new email id\"\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"",
"def test_update_email_task_not_authenticated(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {}\n\n data = {\"new_email\": \"new_test@examplecom\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def commit(self):\n data = self._to_json()\n resp = self._connection._put(get_url('task update', uuid=self._uuid), json=data)\n self._auto_update = self._last_auto_update_state\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)",
"def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])",
"def test_make_email_tasks__update(self, mock_f_e_b):\n mock_f_e_b.return_value = 'mock body html'\n actual_tasks = notifier.make_email_tasks(\n self.feature_1, True, self.changes)\n self.assertEqual(2, len(actual_tasks))\n owner_task, watcher_task = actual_tasks\n self.assertEqual('updated feature: feature one', owner_task['subject'])\n self.assertIn('mock body html', owner_task['html'])\n self.assertEqual('[email protected]', owner_task['to'])\n self.assertEqual('updated feature: feature one', watcher_task['subject'])\n self.assertIn('mock body html', watcher_task['html'])\n self.assertEqual('[email protected]', watcher_task['to'])\n mock_f_e_b.assert_called_once_with(\n True, self.feature_1, self.changes)",
"def test_reactivate_process(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n self.assertTrue(mock_sendmail.called)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address\")\r\n\r\n # now let's try to login\r\n # the migrations add a default admin account\r\n user_data = {'login': 'admin',\r\n 'password': 'admin',\r\n 'form.submitted': 'true'}\r\n\r\n res = self.testapp.post('/login',\r\n params=user_data,\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'account deactivated' in str(res),\r\n \"Login should have failed since we're not active: \" + str(res))\r\n\r\n act = Activation.query.first()\r\n self.testapp.delete(\r\n \"/api/v1/suspend?username={0}&code={1}&password={2}\".format(\r\n user_data['login'],\r\n act.code,\r\n 'admin'),\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'activated' in str(res),\r\n \"Should be prompted to login now: \" + str(res))\r\n\r\n user_data = {'login': 'admin',\r\n 'password': 'admin',\r\n 'form.submitted': 'true'}\r\n\r\n res = self.testapp.post('/login',\r\n params=user_data,\r\n status=302)",
"def test_incremental_tasks(self):\r\n self.create_2(sched='incremental')\r\n\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register(fullname=self.user.fullname, name=self.user.username,\r\n password=self.user.password)\r\n self.register(fullname=\"Marie Doe\", name=\"mariedoe\", password=\"dr0wss4p\")\r\n self.signin()\r\n\r\n # Get the only task with no runs!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n # Check that we received a clean Task\r\n assert data.get('info'), data\r\n assert not data.get('info').get('last_answer')\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # No more tasks available for this user!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert not data\r\n\r\n #### Get the only task now with an answer as Anonimous!\r\n self.signout()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No'\r\n\r\n # Submit a second Answer as Anonimous\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n #### Get the only task now with an answer as User2!\r\n self.signin(email=\"[email protected]\", password=\"dr0wss4p\")\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No No'",
"def test_make_email_tasks__new(self, mock_f_e_b):\n mock_f_e_b.return_value = 'mock body html'\n actual_tasks = notifier.make_email_tasks(\n self.feature_1, is_update=False, changes=[])\n self.assertEqual(2, len(actual_tasks))\n owner_task, watcher_task = actual_tasks\n self.assertEqual('new feature: feature one', owner_task['subject'])\n self.assertIn('mock body html', owner_task['html'])\n self.assertEqual('[email protected]', owner_task['to'])\n self.assertEqual('new feature: feature one', watcher_task['subject'])\n self.assertIn('mock body html', watcher_task['html'])\n self.assertEqual('[email protected]', watcher_task['to'])\n mock_f_e_b.assert_called_once_with(\n False, self.feature_1, [])",
"def test_additional_emails_role_no_email(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"[email protected]\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[0].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True, \"password\": \"1234\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"async def verify(token: TextData, background_tasks: BackgroundTasks):\n token_data = token.data\n mail, subject, body = await AccountProcessor.confirm_email(token_data)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Account Verified!\"}",
"def test_update_submission_service(self):\n pass",
"def task_verify_site(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'verify_site':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_verify_site\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)",
"def send_updating_email(request: Request, data: dict, action: str, email: str = None, updated_data: str = None) -> None:\n\n logger.info(msg=f'creating model subscriber for user id: {data[\"id\"]}')\n try:\n UserSubscriber.objects.create(owner=get_user_model().objects.get(id=data['id']))\n logger.info(msg=f'Subscriber for user {data[\"id\"]} has been created')\n send_telegram_message.delay(message=f'Subscriber for user {data[\"id\"]} has been created',\n group_type='success')\n except Exception as e:\n logger.warning(msg=f'Error on creating subscriber for user {data[\"id\"]}; {str(e)}')\n send_telegram_message.delay(message=f'Error on creating subscriber for user {data[\"id\"]}; {str(e)}',\n group_type='errors')\n logger.info(msg=f'creating web url for {action}')\n uid_data = _create_unique_uid(user_id=data['id'],\n updated_data=updated_data)\n url = _current_ip_port(is_secure=request.is_secure(),\n host=request.get_host(),\n url=f'/api/account/{action}/{uid_data[\"uid\"]}/{uid_data[\"user_id\"]}')\n logger.info(f'Move send email with action: {action} to celery task')\n request.data.pop('image', None)\n send_email_task.delay(data=data, action=action, url=url, request_data=request.data, email=email)\n if request.data.get('telegram_chat_id') is not None:\n send_telegram_message.delay(chat_id=request.data['telegram_chat_id'],\n message='This is your personal chat, you will receive '\n 'messages in this chat with new posts(if you have subscriptions).')",
"def test_update_submission(self):\n sub_response_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]\n sub_response_update = self.client.put(\n '/submission/{}'.format(str(sub.public_id)),\n headers=dict(\n Authorization=\"Token {}\".format(self.token)\n ),\n data=json.dumps(dict(\n submitted_texts=['updated_text1']\n )),\n content_type='application/json'\n )\n update_data = json.loads(sub_response_update.data.decode())\n upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)\n self.assertTrue(update_data['status']=='success')\n self.assertTrue(upd_sub.text_count == 1)",
"def test_cron_workflow_service_update_cron_workflow(self):\n pass",
"def test_api_user_resend_confirmation_post(self):\n pass",
"def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")",
"def test_activate_form_dual(self, mock_sendmail):\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=200)\r\n self.assertTrue(mock_sendmail.called)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'message' in success,\r\n \"Should be successful with admin email address\")\r\n\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': u'[email protected]'},\r\n status=406)\r\n\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'error' in success,\r\n \"Should not be successful on second try: \" + str(res))\r\n\r\n self.assertTrue(\r\n 'already' in str(res),\r\n \"Should find 'already' in the response: \" + str(res))",
"def put(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if task != None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # collect the json from the request\n task_json = simplejson.loads(self.request.body)\n # if the user is a guest the project must be unallocated\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if str(user.role) != '_Guest' or (task_json.has_key('projectId') == False or task_json['projectId'] == None):\n # update the project record\n task = helpers.apply_json_to_model_instance(task, task_json)\n # save the updated data\n task.put()\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"updateTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"updateTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(401, \"Not Authorized\")\n else:\n self.response.set_status(404, \"Task not found\")",
"def test_admin_approval_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)",
"def prepare(self):\n\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while setting up task\")\n\n # send initial confirmation email:\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n\n approve_list = [act.auto_approve for act in self.actions]\n\n # TODO(amelia): It would be nice to explicitly test this, however\n # currently we don't have the right combinations of\n # actions to allow for it.\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n\n if can_auto_approve and not allow_auto_approve:\n self.add_note(\"Actions allow auto aproval, but task does not.\")\n elif can_auto_approve:\n self.add_note(\"Action allow auto approval. Auto approving.\")\n self.approve()\n return\n\n if self.send_approval_notification:\n notes = {\"notes\": [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)",
"def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)",
"def test_new_project_invalid_on_submit(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"created token\"]})\n self.assertEqual(len(mail.outbox), 3)\n\n fake_clients.identity_cache[\"projects\"] = {}\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(mail.outbox), 3)",
"def test_user_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert data['info'], data\r\n self.signout()",
"def test_weekly_update_email(self):\n district_list = self._district_summary_data()\n totals = self._district_summary_totals(district_list)\n c = {'district_list': district_list, 'totals': totals,\n 'url': settings.MSPRAY_WEEKLY_DASHBOARD_UPDATE_URL}\n subject = render_to_string(\n 'alerts/emails/weekly_update_subject.txt', c).replace('\\n', '')\n text_content = render_to_string(\n 'alerts/emails/weekly_update_body.txt', c)\n html_content = render_to_string(\n 'alerts/emails/weekly_update_body.html', c).replace('\\n', '')\n memory_backend = 'django.core.mail.backends.locmem.EmailBackend'\n with self.settings(EMAIL_BACKEND=memory_backend):\n weekly_update_email(['Mosh <[email protected]>'], district_list,\n totals)\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n self.assertEqual(email.subject, subject)\n self.assertEqual(email.body, text_content)\n self.assertEqual(email.alternatives[0][0], html_content)",
"def test_update_user_endpoint_existing_email(self, **kwargs):\n print(\"Create a new user and capture the email\")\n kwargs['return_response_obj'] = True\n response = self.test_create_user_endpoint(**kwargs)\n email_id = json.loads(response.text)[\"data\"][\"user\"][\"email\"]\n kwargs = {'email_id': email_id, 'return_response_obj': True, \"return_failure_response\": True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"",
"def test_workflows_post(self):\n pass",
"def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)"
] | [
"0.6641487",
"0.65933645",
"0.63793033",
"0.5988052",
"0.58915395",
"0.5823073",
"0.5801181",
"0.56437814",
"0.5622126",
"0.556626",
"0.5563945",
"0.5509865",
"0.55096465",
"0.5467318",
"0.54488516",
"0.54419976",
"0.54326904",
"0.5379935",
"0.53690886",
"0.5351489",
"0.5348408",
"0.53481853",
"0.5345667",
"0.5336823",
"0.52997506",
"0.5287378",
"0.5273909",
"0.5271913",
"0.52648836",
"0.52579874"
] | 0.72772837 | 0 |
Tests the sending of additional emails to a set of roles in a project | def test_additional_emails_roles(self):
# NOTE(amelia): sending this email here is probably not the intended
# case. It would be more useful in utils such as a quota update or a
# child project being created that all the project admins should be
# notified of
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
user2 = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
user3 = fake_clients.FakeUser(
name="[email protected]", password="123", email="[email protected]"
)
assignments = [
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="member",
user={"id": user.id},
),
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="project_admin",
user={"id": user.id},
),
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="member",
user={"id": user2.id},
),
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="project_admin",
user={"id": user2.id},
),
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="member",
user={"id": user3.id},
),
fake_clients.FakeRoleAssignment(
scope={"project": {"id": project.id}},
role_name="project_mod",
user={"id": user3.id},
),
]
setup_identity_cache(
projects=[project], users=[user, user2, user3], role_assignments=assignments
)
url = "/v1/actions/InviteUser"
headers = {
"project_name": "test_project",
"project_id": project.id,
"roles": "project_admin,member,project_mod",
"username": "[email protected]",
"user_id": "test_user_id",
"authenticated": True,
}
data = {
"email": "[email protected]",
"roles": ["member"],
"project_id": project.id,
}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(len(mail.outbox[0].to), 2)
self.assertEqual(set(mail.outbox[0].to), set([user.email, user2.email]))
self.assertEqual(mail.outbox[0].subject, "invite_user_to_project_additional")
# Test that the token email gets sent to the other addresses
self.assertEqual(mail.outbox[1].to[0], "[email protected]")
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {"confirm": True, "password": "1234"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_additional_emails_role_no_email(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"[email protected]\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[0].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True, \"password\": \"1234\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_email_additional_addresses(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignments = [\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"project_admin\",\n user={\"id\": user.id},\n ),\n ]\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=assignments\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"[email protected]\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(set(mail.outbox[0].to), set([\"[email protected]\"]))\n self.assertEqual(mail.outbox[0].subject, \"invite_user_to_project_additional\")\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[1].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_admin_approval_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)",
"def test_send_email_on_invite(self):\n\n league = self.create_league()\n\n season = self.create_season(league)\n team = self.create_team(season)\n\n player = self.create_player()\n\n send_user_email_on_join(player, team.id)\n\n self.assertEqual(len(mail.outbox), 1)\n\n # if testing manually:\n # import pathlib\n # pathlib.Path(\"test_email.html\").write_text(last_sent.body)",
"def test_send_to_all(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertEquals(len(mail.outbox), 1 + len(self.staff) + len(self.students))\r\n self.assertItemsEqual(\r\n [e.to[0] for e in mail.outbox],\r\n [self.instructor.email] + [s.email for s in self.staff] + [s.email for s in self.students]\r\n )",
"def test_mail_admin_on_pending(self):\r\n\r\n def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):\r\n \"\"\" Changes user state and verifies e-mail sent to admin address only when pending. \"\"\"\r\n mail.outbox = []\r\n self._change_state(state)\r\n\r\n # If a message is sent to the user about course creator status change, it will be the first\r\n # message sent. Admin message will follow.\r\n base_num_emails = 1 if expect_sent_to_user else 0\r\n if expect_sent_to_admin:\r\n context = {'user_name': \"test_user\", 'user_email': '[email protected]'}\r\n self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')\r\n sent_mail = mail.outbox[base_num_emails]\r\n self.assertEquals(\r\n mock_render_to_string('emails/course_creator_admin_subject.txt', context),\r\n sent_mail.subject\r\n )\r\n self.assertEquals(\r\n mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),\r\n sent_mail.body\r\n )\r\n self.assertEquals(self.studio_request_email, sent_mail.from_email)\r\n self.assertEqual([self.studio_request_email], sent_mail.to)\r\n else:\r\n self.assertEquals(base_num_emails, len(mail.outbox))\r\n\r\n with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):\r\n # E-mail message should be sent to admin only when new state is PENDING, regardless of what\r\n # previous state was (unless previous state was already PENDING).\r\n # E-mail message sent to user only on transition into and out of GRANTED state.\r\n check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)\r\n check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)\r\n check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)",
"def test_optin_course(self):\r\n url = reverse('change_email_settings')\r\n response = self.client.post(url, {'course_id': self.course.id.to_deprecated_string(), 'receive_emails': 'on'})\r\n self.assertEquals(json.loads(response.content), {'success': True})\r\n\r\n self.client.logout()\r\n\r\n self.assertTrue(CourseEnrollment.is_enrolled(self.student, self.course.id))\r\n\r\n self.client.login(username=self.instructor.username, password=\"test\")\r\n self.navigate_to_email_view()\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Assert that self.student.email in mail.to\r\n self.assertEqual(len(mail.outbox), 1)\r\n self.assertEqual(len(mail.outbox[0].to), 1)\r\n self.assertEquals(mail.outbox[0].to[0], self.student.email)",
"def send_role_notification_email(\n inviter_id, recipient_id, recipient_role, exploration_id,\n exploration_title):\n\n # Editor role email body and email subject templates.\n email_subject_template = (\n '%s - invitation to collaborate')\n\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n '<b>%s</b> has granted you %s to their exploration, '\n '\"<a href=\"https://www.oppia.org/create/%s\">%s</a>\", on Oppia.org.<br>'\n '<br>'\n 'This allows you to:<br>'\n '<ul>%s</ul>'\n 'You can find the exploration '\n '<a href=\"https://www.oppia.org/create/%s\">here</a>.<br>'\n '<br>'\n 'Thanks, and happy collaborating!<br>'\n '<br>'\n 'Best wishes,<br>'\n 'The Oppia Team<br>'\n '<br>%s')\n\n # Return from here if sending email is turned off.\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n # Return from here is sending editor role email is disabled.\n if not feconf.CAN_SEND_EDITOR_ROLE_EMAILS:\n log_new_error('This app cannot send editor role emails to users.')\n return\n\n recipient_user_settings = user_services.get_user_settings(recipient_id)\n inviter_user_settings = user_services.get_user_settings(inviter_id)\n recipient_preferences = user_services.get_email_preferences(recipient_id)\n\n if not recipient_preferences.can_receive_editor_role_email:\n # Do not send email if recipient has declined.\n return\n\n if recipient_role not in EDITOR_ROLE_EMAIL_HTML_ROLES:\n raise Exception(\n 'Invalid role: %s' % recipient_role)\n\n role_description = EDITOR_ROLE_EMAIL_HTML_ROLES[recipient_role]\n rights_html = EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE[role_description]\n\n email_subject = email_subject_template % exploration_title\n email_body = email_body_template % (\n recipient_user_settings.username, inviter_user_settings.username,\n role_description, exploration_id, exploration_title, rights_html,\n exploration_id, EMAIL_FOOTER.value)\n\n _send_email(\n recipient_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION, email_subject, email_body,\n feconf.NOREPLY_EMAIL_ADDRESS,\n sender_name=inviter_user_settings.username)",
"def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))",
"def test_send_to_staff(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'staff',\r\n 'subject': 'test subject for staff',\r\n 'message': 'test message for subject'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # the 1 is for the instructor in this test and others\r\n self.assertEquals(len(mail.outbox), 1 + len(self.staff))\r\n self.assertItemsEqual(\r\n [e.to[0] for e in mail.outbox],\r\n [self.instructor.email] + [s.email for s in self.staff]\r\n )",
"def test_send_mail_authorized(self):\r\n\r\n course_authorization = CourseAuthorization(course_id=self.course.id, email_enabled=True)\r\n course_authorization.save()\r\n\r\n session = self.client.session\r\n session[u'idash_mode:{0}'.format(self.course.location.course_key.to_deprecated_string())] = 'Email'\r\n session.save()\r\n\r\n response = self.client.post(\r\n self.url, {\r\n 'action': 'Send email',\r\n 'to_option': 'all',\r\n 'subject': 'Welcome to the course!',\r\n 'message': 'Lets start with an introduction!',\r\n }\r\n )\r\n self.assertContains(response, \"Your email was successfully queued for sending.\")",
"def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])",
"def test_00_mail_access_rights(self):\n cr, uid = self.cr, self.uid\n mail_compose = self.registry('mail.compose.message')\n\n # Prepare group: Pigs and PigsPortal\n pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')\n port_msg_id = self.mail_group.message_post(cr, uid, self.group_port_id, body='Message')\n\n # Do: Chell browses Pigs -> ko, employee group\n chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id)\n with self.assertRaises(except_orm):\n trigger_read = chell_pigs.name\n\n # Do: Chell posts a message on Pigs, crash because can not write on group or is not in the followers\n with self.assertRaises(except_orm):\n self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='Message')\n\n # Do: Chell is added into Pigs followers and browse it -> ok for messages, ko for partners (no read permission)\n self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_chell_id])\n chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id)\n trigger_read = chell_pigs.name\n for message in chell_pigs.message_ids:\n trigger_read = message.subject\n for partner in chell_pigs.message_follower_ids:\n with self.assertRaises(except_orm):\n trigger_read = partner.name\n\n # Do: Chell comments Pigs, ok because he is now in the followers\n self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='I love Pigs')\n # Do: Chell creates a mail.compose.message record on Pigs, because he uses the wizard\n compose_id = mail_compose.create(cr, self.user_chell_id,\n {'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},\n {'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_pigs_id})\n mail_compose.send_mail(cr, self.user_chell_id, [compose_id])\n # Do: Chell replies to a Pigs message using the composer\n compose_id = mail_compose.create(cr, self.user_chell_id,\n {'subject': 'Subject', 'body': 'Body text'},\n {'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})\n mail_compose.send_mail(cr, self.user_chell_id, [compose_id])\n\n # Do: Chell browses PigsPortal -> ok because groups security, ko for partners (no read permission)\n chell_port = self.mail_group.browse(cr, self.user_chell_id, self.group_port_id)\n trigger_read = chell_port.name\n for message in chell_port.message_ids:\n trigger_read = message.subject\n for partner in chell_port.message_follower_ids:\n with self.assertRaises(except_orm):\n trigger_read = partner.name",
"def test_email_reminders(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 19, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/11 18:00\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 12, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n cancellation_period=24)\n # cancellation period starts 2015/2/12 18:00\n event1 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n cancellation_period=24)\n baker.make_recipe(\n 'booking.booking', event=event, _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event1, _quantity=5,\n )\n # add user emails\n _add_user_email_addresses(Booking)\n\n management.call_command('email_reminders')\n # emails are only sent for event1\n self.assertEqual(len(mail.outbox), 5)",
"def test_correct_roles(self):\r\n users_with_invalid_role = []\r\n\r\n agents = self.selenium.get_agents_data()\r\n for email, expected_role in self.new_agents.items():\r\n for agent in agents:\r\n if agent[\"email\"] == email and agent[\"role\"] != expected_role:\r\n users_with_invalid_role.append({email: f\"should be {expected_role}, but is {agent['role']}\"})\r\n self.assertFalse(users_with_invalid_role, msg=users_with_invalid_role)",
"def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))",
"def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")",
"def test_update_email_task_send_email_current_name_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"nkdfslnkls\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"nkdfslnkls\",\n \"user_id\": user.id,\n \"authenticated\": True,\n \"email\": \"[email protected]\",\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_send_mail_unauthorized(self):\r\n\r\n response = self.client.post(\r\n self.url, {\r\n 'action': 'Send email',\r\n 'to_option': 'all',\r\n 'subject': \"Welcome to the course!\",\r\n 'message': \"Lets start with an introduction!\"\r\n }\r\n )\r\n self.assertContains(response, \"Email is not enabled for this course.\")",
"def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)",
"def test_update_email_task_send_email_to_current_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": user.id,\n \"authenticated\": True,\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.name, \"[email protected]\")\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_unicode_students_send_to_all(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n\r\n # Create a student with Unicode in their first & last names\r\n unicode_user = UserFactory(first_name=u'Ⓡⓞⓑⓞⓣ', last_name=u'ՇﻉรՇ')\r\n CourseEnrollmentFactory.create(user=unicode_user, course_id=self.course.id)\r\n self.students.append(unicode_user)\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertEquals(len(mail.outbox), 1 + len(self.staff) + len(self.students))\r\n\r\n self.assertItemsEqual(\r\n [e.to[0] for e in mail.outbox],\r\n [self.instructor.email] + [s.email for s in self.staff] + [s.email for s in self.students]\r\n )",
"def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()",
"def test_staff_emails(self):\n self.assertEqual(\n JenkinsUser.objects.get_staff_emails(),\n {'[email protected]'})",
"def test_sendEmailVerification(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_email_verification()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"[email protected]\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Email Verification'\n assert 'To verify your email' in msg.body\n assert 'Dear John' in msg.body",
"def test_only_send_one_email_to_studio(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 10, 10, tzinfo=dt_timezone.utc)\n for i in range(5):\n baker.make_recipe(\n 'booking.booking', event=self.event,\n status='OPEN', paid=False,\n payment_confirmed=False,\n user__email=\"unpaid_user{}@test.com\".format(i),\n date_booked= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent= datetime(2015, 2, 9, 2, tzinfo=dt_timezone.utc),\n )\n\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking (6) and studio once\n # for all cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n self.assertEqual(len(mail.outbox), 7)\n self.assertEqual(\n unpaid_booking.status, 'CANCELLED', unpaid_booking.status\n )\n self.assertEqual(\n Booking.objects.filter(status='CANCELLED').count(), 6\n )\n cancelled_booking_emails = [\n [booking.user.email] for booking\n in Booking.objects.filter(status='CANCELLED')\n ]\n all_emails = cancelled_booking_emails + [[settings.DEFAULT_STUDIO_EMAIL]]\n self.assertEqual(\n sorted(all_emails),\n sorted([email.to for email in mail.outbox])\n )",
"def send_membership_email(to_emails, title, body, receiver_names=None):\n send_email(\n to_emails,\n title,\n body,\n receiver_names=receiver_names,\n from_email=settings.MEMBERSHIP_EMAIL_HOST_USER,\n from_name='UWCC Membership',\n smtp_password=settings.MEMBERSHIP_EMAIL_HOST_PASSWORD,\n )",
"def test_50_mail_flow_access_rights(self):\n cr, uid = self.cr, self.uid\n mail_compose = self.registry('mail.compose.message')\n partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id\n user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id\n\n # Prepare groups: Pigs (employee), Jobs (public)\n pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])\n jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])\n\n # ----------------------------------------\n # CASE1: Bert, without groups\n # ----------------------------------------\n\n # Do: Bert reads Jobs basic fields, ok because public = read access on the group\n self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['name', 'description'])\n # Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages\n jobs_message_ids = self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['message_ids'])['message_ids']\n self.mail_message.read(cr, user_bert_id, jobs_message_ids)\n # Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)\n bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)\n trigger_read = bert_jobs.name\n for message in bert_jobs.message_ids:\n trigger_read = message.subject\n for partner in bert_jobs.message_follower_ids:\n with self.assertRaises(except_orm):\n trigger_read = partner.name\n # Do: Bert comments Jobs, ko because no creation right\n self.assertRaises(except_orm,\n self.mail_group.message_post,\n cr, user_bert_id, self.group_jobs_id, body='I love Pigs')\n\n # Do: Bert writes on its own profile, ko because no message create access\n with self.assertRaises(except_orm):\n self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')\n self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')\n\n # ----------------------------------------\n # CASE2: Raoul, employee\n # ----------------------------------------\n\n # Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids\n raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)\n trigger_read = raoul_jobs.name\n for message in raoul_jobs.message_ids:\n trigger_read = message.subject\n for partner in raoul_jobs.message_follower_ids:\n trigger_read = partner.name\n\n # Do: Raoul comments Jobs, ok\n self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')\n # Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard\n compose_id = mail_compose.create(cr, user_raoul_id,\n {'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},\n {'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})\n mail_compose.send_mail(cr, user_raoul_id, [compose_id])\n # Do: Raoul replies to a Jobs message using the composer\n compose_id = mail_compose.create(cr, user_raoul_id,\n {'subject': 'Subject', 'body': 'Body text'},\n {'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})\n mail_compose.send_mail(cr, user_raoul_id, [compose_id])",
"def test_email_additional_action_invalid(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": \"test_project_id\",\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})\n self.assertEqual(len(mail.outbox), 0)",
"def test_email(self):\n # No email should be send\n self.assertEqual(len(mail.outbox), 0)\n\n # enable plugin and set mail setting to true\n plugin = registry.plugins.get('inventreecorenotificationsplugin')\n plugin.set_setting('ENABLE_NOTIFICATION_EMAILS', True)\n NotificationUserSetting.set_setting(\n key='NOTIFICATION_METHOD_MAIL',\n value=True,\n change_user=self.user,\n user=self.user,\n method=InvenTreeCoreNotificationsPlugin.EmailNotification.METHOD_NAME\n )\n\n # run through\n self._notification_run(InvenTreeCoreNotificationsPlugin.EmailNotification)\n\n # Now one mail should be send\n self.assertEqual(len(mail.outbox), 1)"
] | [
"0.77477735",
"0.7315475",
"0.6827417",
"0.66901034",
"0.66202974",
"0.6550147",
"0.6497265",
"0.64453137",
"0.6418991",
"0.6414361",
"0.64034605",
"0.6370533",
"0.62843436",
"0.6227989",
"0.62265706",
"0.6219871",
"0.6216618",
"0.61955845",
"0.61282665",
"0.6121579",
"0.61214656",
"0.61187124",
"0.6084827",
"0.60799384",
"0.60652864",
"0.60407305",
"0.6031134",
"0.6024476",
"0.60213864",
"0.60085803"
] | 0.8471276 | 0 |
The additional email actions should not send an email if the action is invalid. | def test_email_additional_action_invalid(self):
setup_identity_cache()
url = "/v1/actions/InviteUser"
headers = {
"project_name": "test_project",
"project_id": "test_project_id",
"roles": "project_admin,member,project_mod",
"username": "[email protected]",
"user_id": "test_user_id",
"authenticated": True,
}
data = {
"email": "[email protected]",
"roles": ["member"],
"project_id": "test_project_id",
}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {"errors": ["actions invalid"]})
self.assertEqual(len(mail.outbox), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_email_disabled(self):\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n # We should get back a HttpResponseForbidden (status code 403)\r\n self.assertContains(response, \"Email is not enabled for this course.\", status_code=403)",
"def test_send_mail_unauthorized(self):\r\n\r\n response = self.client.post(\r\n self.url, {\r\n 'action': 'Send email',\r\n 'to_option': 'all',\r\n 'subject': \"Welcome to the course!\",\r\n 'message': \"Lets start with an introduction!\"\r\n }\r\n )\r\n self.assertContains(response, \"Email is not enabled for this course.\")",
"def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)",
"def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n blank_contact = self.create_contact(data={'email': ''})\n null_contact = self.create_contact(data={'email': None})\n self.group.contacts.add(blank_contact)\n self.group.contacts.add(null_contact)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)\n self.stopRouter()",
"def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)",
"def test_nonexistent_to_option(self):\r\n email = CourseEmail(course_id=self.course.id, to_option=\"IDONTEXIST\")\r\n email.save()\r\n entry = InstructorTask.create(self.course.id, \"task_type\", \"task_key\", \"task_input\", self.instructor)\r\n task_input = {\"email_id\": email.id} # pylint: disable=E1101\r\n with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):\r\n perform_delegate_email_batches(entry.id, self.course.id, task_input, \"action_name\") # pylint: disable=E1101\r",
"def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)",
"def confirm_email(self):\n # The base class' implementation does nothing\n pass",
"def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')",
"def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)",
"def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)",
"def _confirm_action(self, action):\n\t\treturn True",
"def require_moderator_email_prereqs_are_satisfied():\n\n if not feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:\n raise Exception(\n 'For moderator emails to be sent, please ensure that '\n 'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.')\n if not feconf.CAN_SEND_EMAILS:\n raise Exception(\n 'For moderator emails to be sent, please ensure that '\n 'CAN_SEND_EMAILS is set to True.')",
"def test_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Françoise <[email protected]>\", \"Jean <[email protected]>\"])",
"def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)",
"def send_mail_when_failed(self, body):\r\n pass",
"def test_invalid_action(self):\n subject = self.subject(1, name='test_marker')\n with self.assertRaises(NotImplementedError) as context:\n subject.action\n with self.assertRaises(NotImplementedError) as context:\n subject.get_action_url()",
"def cant(user, action):\n\n return not can(user, action)",
"def test_no_email(self):\n user = self.make_user()\n data: dict = {}\n\n with self.login(user):\n response = self.post(\"referrals:create\", data=data)\n\n message = list(get_messages(response.wsgi_request))[0]\n assert str(message) == \"'missing email' is an invalid email address.\"",
"def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)",
"def test_bad_email(self):\n user = self.make_user()\n data = {\"email\": \"bogus\"}\n\n with self.login(user):\n response = self.post(\"referrals:create\", data=data)\n\n assert response.status_code == 302\n assert response[\"Location\"] == self.reverse(\"settings:dashboard\")\n message = list(get_messages(response.wsgi_request))[0]\n assert str(message) == \"'bogus' is an invalid email address.\"",
"def _generate_action_mail(self, entry):\n actions = []\n\n if 'collection' in entry:\n if entry['collection'] in self.collections['mail']:\n collection = self.collections['mail'][entry['collection']]\n else:\n collection = self.collections['mail']['default']\n else:\n collection = self.collections['mail']['default']\n\n for _ in range(0, entry['amount']):\n if 'subject' in entry:\n subject = entry['subject']\n else:\n if len(collection['subjects']) > 0:\n subject = random.choice(collection['subjects'])\n else:\n subject = random.choice(self.collections['mail']['default']['recipients'])\n\n if 'recipient' in entry:\n recipient = entry['recipient']\n else:\n if len(collection['recipients']) > 0:\n recipient = random.choice(collection['recipients'])\n else:\n recipient = random.choice(self.collections['mail']['default']['recipients'])\n\n if 'message' in entry:\n message = entry['message']\n else:\n if len(collection['messages']) > 0:\n message = random.choice(collection['messages'])\n else:\n message = random.choice(self.collections['mail']['default']['messages'])\n\n if 'attachments' in entry:\n attachments = entry['attachments']\n else:\n if len(collection['attachments']) > 0:\n attachments = [random.choice(collection['attachments'])]\n else:\n attachments = [random.choice(self.collections['mail']['default']['attachments'])]\n\n actions.append(\n {'type': 'mail',\n 'application': entry['application'],\n 'recipient': recipient,\n 'subject': subject,\n 'message': message,\n 'attachments': attachments})\n\n return actions",
"def test_activation_email_missing_template(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])",
"def __call__(self):\n if any(assignee.actual_time is None for assignee in self.order.assignees.all()):\n raise ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: self.message,\n })",
"def test_send_mail_without_mail(self):\n event_without_mail = self.create_event(self.family, name=None)\n fadm = admin.EventAdmin(Event, self.site)\n with patch.object(fadm, \"message_user\") as message_user_mock:\n fadm.send_mail(\"Request\", [self.event, event_without_mail])\n message_user_mock.assert_called_once_with(\n \"Request\", \"The event of the 2018-12-31 has no email template set\",\n admin.messages.ERROR)",
"def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)",
"def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)\n self.stopRouter()",
"def handle_failure(self, instance, sender=None, reason=None):\n send_email.delay(\n to_email=sender,\n context={\"reason\": reason},\n subject=\"Kunne ikke sende ut begrenset epost\",\n plain_template=\"restricted/email/process_failure.txt\",\n html_template=\"restricted/email/process_failure.html\",\n )",
"def test_no_email(self):\n\n a = Agency(name=\"Broadcasting Board of Governors\", slug=\"brodcasting\")\n a.save()\n\n response = self.client.get(\n reverse('contact_landing', args=['broadcasting']))\n self.assertTrue(200, response.status_code)\n content = response.content.decode('utf-8')\n self.assertTrue('Request online' not in content)",
"def test_create_invalid_email(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': 'NOT_AN_EMAIL!',\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)"
] | [
"0.65730184",
"0.6376346",
"0.62104857",
"0.6187945",
"0.60576475",
"0.59244066",
"0.59205645",
"0.5912073",
"0.58782667",
"0.58319366",
"0.58272535",
"0.5803269",
"0.5791973",
"0.57378376",
"0.57144916",
"0.571216",
"0.57079375",
"0.5701372",
"0.56703043",
"0.5663843",
"0.5649258",
"0.5632727",
"0.5632221",
"0.55882657",
"0.5585367",
"0.55847",
"0.55785495",
"0.5563236",
"0.55614954",
"0.5560066"
] | 0.7289466 | 0 |
When can_edit_users is false, and a new user is invited, the task should be marked as invalid if the user doesn't already exist. | def test_user_invite_cant_edit_users(self):
project = fake_clients.FakeProject(name="test_project")
setup_identity_cache(projects=[project])
url = "/v1/actions/InviteUser"
headers = {
"project_name": "test_project",
"project_id": project.id,
"roles": "project_admin,member,project_mod",
"username": "user",
"user_id": "test_user_id",
"authenticated": True,
}
data = {
"username": "new_user",
"email": "[email protected]",
"roles": ["member"],
"project_id": project.id,
}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {"errors": ["actions invalid"]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})",
"def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()",
"def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))",
"def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_duplicate_tasks_new_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)",
"def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreate, self).form_valid(form)",
"def test_authenticated_user_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)",
"def reinvite_user(self, user, email):\n if self.is_moderator and self.has_perm('accounts.invite_user'):\n # Reset email, set a new token and update decision datetime\n user.email = email\n user.auth_token = generate_unique_id()\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied",
"def test_project_create_cant_edit_users(self):\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertFalse(all([act.valid for act in actions]))",
"def test_admin_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")",
"def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)",
"def test_anonymous_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = AnonymousTaskRunFactory.create(task=task)\r\n taskrun2 = AnonymousTaskRunFactory.build(task=task)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').create,\r\n taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = AnonymousTaskRunFactory.build(task=task2)\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)",
"def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)",
"def test_authenticated_user_update_other_users_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n own_taskrun = TaskRunFactory.create()\r\n other_users_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_authenticated.id == own_taskrun.user.id\r\n assert self.mock_authenticated.id != other_users_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n own_taskrun)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n other_users_taskrun)",
"def test_new_user_not_my_project(self):\n setup_identity_cache()\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": \"test_project_id\",\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def testMemberCanEdit(self):\n self.client.login(username=\"admin\", password=\"test\")\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") != -1,\n \"Authenticated users cannot edit tasks.\")\n self.client.logout()",
"def clean(self):\n cleaned_data = super().clean()\n\n if not self.instance.is_active:\n raise forms.ValidationError('Invalid invitation.')\n\n if self.instance.email not in self.user.get_emails():\n raise forms.ValidationError(\n 'You are not invited.')\n\n return cleaned_data",
"def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)",
"def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)",
"def allow_to_edit(user):\n return allow_to_edit_well(user)",
"def _check_is_editable(self, raise_error: bool = True) -> bool:",
"def test_add_user_existing(self):\n project = fake_clients.FakeProject(name=\"parent_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_request_membership_form_with_an_invalid_user_id(self):\n pass",
"def add_user_with_status_unrequested(user):\r\n _add_user(user, CourseCreator.UNREQUESTED)",
"def put(self, id):\n payload = marshal(api.payload, invite_user)\n taskroom_service.invite_user(id, payload['email'])\n return {'Message': \"User Added to the Task Room\"}",
"def save(self, user, commit=True):\n task = super(NonProjectTaskForm, self).save(commit=False)\n task.editor = user\n task.owner = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n return task"
] | [
"0.70514643",
"0.64830637",
"0.6148935",
"0.6148552",
"0.61462927",
"0.6118526",
"0.60407144",
"0.59387314",
"0.5936648",
"0.5901571",
"0.589283",
"0.58872414",
"0.57998174",
"0.57644355",
"0.5719481",
"0.5709963",
"0.5695988",
"0.5656522",
"0.5620688",
"0.5614186",
"0.5606365",
"0.5584095",
"0.5575544",
"0.5569179",
"0.5536957",
"0.5531845",
"0.5530771",
"0.55300546",
"0.55245245",
"0.55009985"
] | 0.661029 | 1 |
When can_edit_users is false, and a new user is invited, the task should be marked as valid if the user exists. | def test_user_invite_cant_edit_users_existing_user(self):
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(name="[email protected]")
setup_identity_cache(projects=[project], users=[user])
url = "/v1/actions/InviteUser"
headers = {
"project_name": "test_project",
"project_id": project.id,
"roles": "project_admin,member,project_mod",
"username": "user",
"user_id": "test_user_id",
"authenticated": True,
}
data = {
"username": "new_user",
"email": "[email protected]",
"roles": ["member"],
"project_id": project.id,
}
response = self.client.post(url, data, format="json", headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreate, self).form_valid(form)",
"def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()",
"def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))",
"def test_duplicate_tasks_new_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)",
"def testMemberCanEdit(self):\n self.client.login(username=\"admin\", password=\"test\")\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") != -1,\n \"Authenticated users cannot edit tasks.\")\n self.client.logout()",
"def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)",
"def test_authenticated_user_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_valid_admin_approval(self):\n\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_admin_approval_already_approved(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertTrue(activated)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)",
"def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)",
"def form_valid(self, form):\n new_task = form.save(commit=False)\n new_task.user = self.request.user\n new_task.date = self.get_date()\n new_task.save()\n return super(LoginRequiredMixin, self).form_valid(form)",
"async def check_can_edit_user(\n authorization_client: AuthorizationClient, req_user_id: str, user_id: str\n):\n admin_tuple, req_admin_tuple = await asyncio.gather(\n authorization_client.get_administrator(user_id),\n authorization_client.get_administrator(req_user_id),\n )\n\n if admin_tuple[1] is None:\n return True\n\n if req_admin_tuple[1] == AdministratorRole.FULL:\n return True\n\n return False",
"def allow_to_edit(user):\n return allow_to_edit_well(user)",
"def save(self, commit=True):\n user = super(InvitationCompleteForm, self).save(commit)\n\n def save_invited_user():\n invited_user = self.invited_user\n invited_user.created_user = user\n invited_user.status = InvitedUser.STATUS_REGISTERED\n invited_user.save()\n if commit:\n save_invited_user()\n else:\n self.save_invited_user = save_invited_user\n return user",
"def test_authenticated_user_update_other_users_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n own_taskrun = TaskRunFactory.create()\r\n other_users_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_authenticated.id == own_taskrun.user.id\r\n assert self.mock_authenticated.id != other_users_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n own_taskrun)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n other_users_taskrun)",
"def test_project_create_cant_edit_users(self):\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertFalse(all([act.valid for act in actions]))",
"def test_admin_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def user_can_edit(self, user):\n return user == self.owner",
"def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()",
"def test_success_edit(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=1)\n start = datetime.combine(current, time(19, 30))\n end = start + timedelta(hours=2, minutes=30)\n edit(member.username, event_id, True, start, end)\n\n # Check that the user's availability was updated\n schedule = data.events[event_id].availabilities[member.username].times\n days_from_creation = 1\n start_index = 2 * start.hour + start.minute // 30\n end_index = 2 * end.hour + end.minute // 30\n\n for d in range(MAX_DAYS):\n if any(schedule[d]):\n print(d, schedule[d])\n for t in range(INTERVALS):\n if d == days_from_creation and start_index <= t < end_index:\n assert schedule[d][t]\n else:\n assert not schedule[d][t]",
"def _check_is_editable(self, raise_error: bool = True) -> bool:",
"def reinvite_user(self, user, email):\n if self.is_moderator and self.has_perm('accounts.invite_user'):\n # Reset email, set a new token and update decision datetime\n user.email = email\n user.auth_token = generate_unique_id()\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied",
"def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)",
"def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")",
"def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))"
] | [
"0.66174513",
"0.6601052",
"0.6384289",
"0.6334532",
"0.621152",
"0.61806226",
"0.61534464",
"0.6063311",
"0.6060537",
"0.6060164",
"0.6044851",
"0.60244834",
"0.59576625",
"0.594278",
"0.59351856",
"0.5911798",
"0.5905776",
"0.5897276",
"0.5896448",
"0.5874475",
"0.5834748",
"0.5832225",
"0.5829974",
"0.58152944",
"0.5813906",
"0.5794659",
"0.57868403",
"0.5786624",
"0.57835263",
"0.57703084"
] | 0.70513034 | 0 |
When can_edit_users is false, and a new signup comes in, the task should be marked as invalid if it needs to create a new user. Will return OK (as task doesn't auto_approve), but task will actually be invalid. | def test_project_create_cant_edit_users(self):
setup_identity_cache()
url = "/v1/actions/CreateProjectAndUser"
data = {"project_name": "test_project", "email": "[email protected]"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]})
task = Task.objects.all()[0]
action_models = task.actions
actions = [act.get_action() for act in action_models]
self.assertFalse(all([act.valid for act in actions])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))",
"def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})",
"def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)",
"def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_duplicate_tasks_new_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)",
"def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)",
"def test_valid_admin_approval(self):\n\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"async def user_signup(\n form: SignUp,\n task: BackgroundTasks,\n db: Session = Depends(db_session)):\n user = User()\n user.name = form.name\n user.email = form.login\n user.hashed_password = PWD_CONTEXT.hash(form.password)\n user.disabled = False\n db.add(user)\n try:\n db.commit()\n except exc.IntegrityError:\n db.rollback\n return {\"success\": False, \"msg\": \"Пользователь уже зарегистрирован\"}\n\n task.add_task(send_welcome_email, user.email)\n return {\"success\": True}",
"def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')",
"def test_anonymous_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = AnonymousTaskRunFactory.create(task=task)\r\n taskrun2 = AnonymousTaskRunFactory.build(task=task)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').create,\r\n taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = AnonymousTaskRunFactory.build(task=task2)\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)",
"def test_new_project_existing_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n # unauthenticated sign up as existing user\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": user.email}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # approve the sign-up as admin\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json(), {\"notes\": [\"Task completed successfully.\"]})",
"def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreate, self).form_valid(form)",
"def test_admin_approval_already_approved(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertTrue(activated)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_authenticated_user_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_user_has_valid_or_pending(self):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n\r\n # If it's any of these statuses, they don't have anything outstanding\r\n for status in [\"created\", \"ready\", \"denied\"]:\r\n attempt.status = status\r\n attempt.save()\r\n assert_false(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)\r\n\r\n # Any of these, and we are. Note the benefit of the doubt we're giving\r\n # -- must_retry, and submitted both count until we hear otherwise\r\n for status in [\"submitted\", \"must_retry\", \"approved\"]:\r\n attempt.status = status\r\n attempt.save()\r\n assert_true(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)",
"def test_no_admins_registered(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n\n with self.assertRaises(ImproperlyConfigured):\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())",
"def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"def test_activate_form_bad(self):\r\n res = self.testapp.post(\r\n '/api/v1/suspend',\r\n content_type='application/json',\r\n status=406)\r\n success = json.loads(res.body)['error']\r\n self.assertTrue(\r\n success is not None,\r\n \"Should not be successful with no email address: \" + str(res))\r\n\r\n res = self.testapp.post('/api/v1/suspend',\r\n params={'email': '[email protected]'},\r\n status=404)\r\n success = json.loads(res.body)\r\n self.assertTrue(\r\n 'error' in success,\r\n \"Should not be successful with invalid email address: \" + str(res))",
"def test_admin_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_add_user_existing(self):\n project = fake_clients.FakeProject(name=\"parent_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_create__normal_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': False, 'isSiteEditor': False}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertFalse(actual_json['is_site_editor'])\n self.assertFalse(actual_json['is_admin'])\n\n new_appuser = (user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get())\n result_email = new_appuser.email\n result_is_admin = new_appuser.is_admin\n new_appuser.key.delete()\n self.assertEqual('[email protected]', result_email)\n self.assertFalse(result_is_admin)",
"def test_anonymous_user_create_first_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n taskrun = AnonymousTaskRunFactory.build()\r\n\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun)",
"def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)",
"def test_authenticated_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = TaskRunFactory.create(task=task)\r\n taskrun2 = TaskRunFactory.build(task=task, user=taskrun1.user)\r\n\r\n assert self.mock_authenticated.id == taskrun1.user.id\r\n assert_raises(Forbidden, getattr(require, 'taskrun').create, taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = TaskRunFactory.build(task=task2, user=taskrun1.user)\r\n\r\n assert self.mock_authenticated.id == taskrun3.user.id\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)",
"def test_edit_user_success(self):\n\t\texpected_messages = [None]\n\t\texpected_response_code = 204\n\t\tdisplayName, email, password = (\"John Doe\", \"[email protected]\", \"12345678\")\n\t\tnew_displayName, new_email = (\"Edited John\", \"[email protected]\")\n\n\t\tregister_response = ApiFacade.register_user(displayName, email, password)\n\t\tauthentication_response = ApiFacade.authenticate_user(email, password)\n\t\ttoken = authentication_response.get_token()\n\t\tedituser_response = ApiFacade.edit_user(token, new_displayName, new_email)\n\t\tviewdetails_response = ApiFacade.view_details(token)\n\t\tdeletion_response = ApiFacade.delete_user(authentication_response.get_token())\n\n\t\tresponse_status_match = edituser_response.get_http_status() == expected_response_code\n\n\t\tself.assertEqual(response_status_match, True, \n\t\t\tmsg = \"Expected HTTP{0}; got HTTP{1}; on data [{2}][{3}][{4}]\"\n\t\t\t.format(expected_response_code, edituser_response.get_http_status(),\n\t\t\t\tdisplayName, email, password))\n\n\t\tself.assertEqual(viewdetails_response.get_email(), new_email,\n\t\t\tmsg = \"Email not updated; expected {{0}}; got {{1}}\"\n\t\t\t.format(new_email, viewdetails_response.get_email()))\n\n\t\tself.assertEqual(viewdetails_response.get_displayName(), new_displayName,\n\t\t\tmsg = \"Email not updated; expected {{0}}; got {{1}}\"\n\t\t\t.format(new_displayName, viewdetails_response.get_displayName()))",
"def testSetIsAllowed(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.users.set([(u'user', u'secret', u'User', u'[email protected]',\n None)])\n self.assertEqual(u'[email protected]', user.email)"
] | [
"0.66085404",
"0.64380985",
"0.640038",
"0.62846017",
"0.6143267",
"0.61050075",
"0.6069316",
"0.60530555",
"0.5980463",
"0.5970805",
"0.59477574",
"0.59021604",
"0.5873239",
"0.58261275",
"0.5819795",
"0.57675356",
"0.5758916",
"0.5753213",
"0.5694632",
"0.5676942",
"0.5632199",
"0.5621372",
"0.5612243",
"0.55931926",
"0.55795395",
"0.5577466",
"0.55601984",
"0.5549602",
"0.55380076",
"0.55355376"
] | 0.65307754 | 1 |
When can_edit_users is false, and a new signup comes in, the task should be marked as valid if the user already exists. Will return OK (as task doesn't auto_approve), but task will actually be valid. | def test_project_create_cant_edit_users_existing_user(self):
user = fake_clients.FakeUser(name="[email protected]")
setup_identity_cache(users=[user])
url = "/v1/actions/CreateProjectAndUser"
data = {"project_name": "test_project", "email": "[email protected]"}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.json(), {"notes": ["task created"]})
task = Task.objects.all()[0]
action_models = task.actions
actions = [act.get_action() for act in action_models]
self.assertTrue(all([act.valid for act in actions])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_valid_admin_approval(self):\n\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})",
"def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)",
"def test_admin_approval_already_approved(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertTrue(activated)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_user_has_valid_or_pending(self):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n\r\n # If it's any of these statuses, they don't have anything outstanding\r\n for status in [\"created\", \"ready\", \"denied\"]:\r\n attempt.status = status\r\n attempt.save()\r\n assert_false(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)\r\n\r\n # Any of these, and we are. Note the benefit of the doubt we're giving\r\n # -- must_retry, and submitted both count until we hear otherwise\r\n for status in [\"submitted\", \"must_retry\", \"approved\"]:\r\n attempt.status = status\r\n attempt.save()\r\n assert_true(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)",
"def test_project_create_cant_edit_users(self):\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertFalse(all([act.valid for act in actions]))",
"def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')",
"def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreate, self).form_valid(form)",
"def test_duplicate_tasks_new_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)",
"async def user_signup(\n form: SignUp,\n task: BackgroundTasks,\n db: Session = Depends(db_session)):\n user = User()\n user.name = form.name\n user.email = form.login\n user.hashed_password = PWD_CONTEXT.hash(form.password)\n user.disabled = False\n db.add(user)\n try:\n db.commit()\n except exc.IntegrityError:\n db.rollback\n return {\"success\": False, \"msg\": \"Пользователь уже зарегистрирован\"}\n\n task.add_task(send_welcome_email, user.email)\n return {\"success\": True}",
"def test_admin_approval_not_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)\n self.assertIs(profile.user.is_active, False)",
"def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)",
"def test_new_project_existing_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n # unauthenticated sign up as existing user\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": user.email}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # approve the sign-up as admin\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json(), {\"notes\": [\"Task completed successfully.\"]})",
"def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)",
"def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)",
"def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)",
"def test_edit_user_success(self):\n\t\texpected_messages = [None]\n\t\texpected_response_code = 204\n\t\tdisplayName, email, password = (\"John Doe\", \"[email protected]\", \"12345678\")\n\t\tnew_displayName, new_email = (\"Edited John\", \"[email protected]\")\n\n\t\tregister_response = ApiFacade.register_user(displayName, email, password)\n\t\tauthentication_response = ApiFacade.authenticate_user(email, password)\n\t\ttoken = authentication_response.get_token()\n\t\tedituser_response = ApiFacade.edit_user(token, new_displayName, new_email)\n\t\tviewdetails_response = ApiFacade.view_details(token)\n\t\tdeletion_response = ApiFacade.delete_user(authentication_response.get_token())\n\n\t\tresponse_status_match = edituser_response.get_http_status() == expected_response_code\n\n\t\tself.assertEqual(response_status_match, True, \n\t\t\tmsg = \"Expected HTTP{0}; got HTTP{1}; on data [{2}][{3}][{4}]\"\n\t\t\t.format(expected_response_code, edituser_response.get_http_status(),\n\t\t\t\tdisplayName, email, password))\n\n\t\tself.assertEqual(viewdetails_response.get_email(), new_email,\n\t\t\tmsg = \"Email not updated; expected {{0}}; got {{1}}\"\n\t\t\t.format(new_email, viewdetails_response.get_email()))\n\n\t\tself.assertEqual(viewdetails_response.get_displayName(), new_displayName,\n\t\t\tmsg = \"Email not updated; expected {{0}}; got {{1}}\"\n\t\t\t.format(new_displayName, viewdetails_response.get_displayName()))",
"def new_user_form_valid(self, form):\n new_user = form.save()\n new_user.set_password(form.cleaned_data[\"password\"])\n\n h = hashlib.sha1()\n h.update(str(random.random()).encode('utf-8'))\n salt = h.hexdigest()[:5]\n\n h = hashlib.sha1()\n text = salt+new_user.name\n h.update(text.encode('utf-8'))\n\n new_user.activation_key = h.hexdigest()\n new_user.save()\n\n subject = \"Your Work Schedule: Confirm registration\"\n text = (\n \"\"\"Hi {}, \\n please confirm Your registration by clicking or\n copy-past this link \\n {}/user_account/activate/{}/ \\n\n Please confirm with in 48 houers. Thank You for using our app.\n \\n Your Sandbox Team\n \"\"\".format(new_user.name, HOST_NAME, new_user.activation_key))\n\n send_mail(\n subject,\n text,\n EMAIL_HOST_USER,\n [new_user.email],\n fail_silently=False\n )\n return HttpResponseRedirect(self.get_success_url())",
"def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')",
"async def check_can_edit_user(\n authorization_client: AuthorizationClient, req_user_id: str, user_id: str\n):\n admin_tuple, req_admin_tuple = await asyncio.gather(\n authorization_client.get_administrator(user_id),\n authorization_client.get_administrator(req_user_id),\n )\n\n if admin_tuple[1] is None:\n return True\n\n if req_admin_tuple[1] == AdministratorRole.FULL:\n return True\n\n return False",
"def test_anonymous_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = AnonymousTaskRunFactory.create(task=task)\r\n taskrun2 = AnonymousTaskRunFactory.build(task=task)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').create,\r\n taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = AnonymousTaskRunFactory.build(task=task2)\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)",
"def test_duplicate_signup_attempt(self):\n\n self.client.post(\n \"api/v2/auth/signup\", data=json.dumps(self.generic_user), content_type=\"application/json\")\n res = self.client.post(\n \"api/v2/auth/signup\", data=json.dumps(self.generic_user), content_type=\"application/json\")\n result = json.loads(res.data)\n self.assertEqual(result[\"Error\"], \"User already exists\")\n self.assertEqual(res.status_code, 409)",
"def is_user_change_required(self):\n return self.__running_user != self.__desired_user",
"def test_register_existing_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[0]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 409)\n self.assertIn('user with email already registred', str(response.data))",
"def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)",
"def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)",
"def test_add_user_existing(self):\n project = fake_clients.FakeProject(name=\"parent_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def signup():\n req = request.get_json()\n user = req['user']\n is_created = views.UserManagement().create(user)\n if not is_created:\n return jsonify(msg.ALREADY_USE), 400\n\n return jsonify(msg.SUCCESS), 200"
] | [
"0.6319128",
"0.62552834",
"0.61908275",
"0.615733",
"0.61488724",
"0.60932577",
"0.60647994",
"0.6049347",
"0.59604836",
"0.5944117",
"0.59132886",
"0.59021646",
"0.5847654",
"0.5742341",
"0.5733471",
"0.5727593",
"0.56754845",
"0.5663465",
"0.56541914",
"0.5634608",
"0.56052333",
"0.56027997",
"0.55771047",
"0.5551926",
"0.55470055",
"0.55459267",
"0.5544949",
"0.554209",
"0.55405736",
"0.55328184"
] | 0.6379207 | 0 |
Set up form data. | def setUpFormData(self):
self.formData = {'labGroup': '5', 'abbrev': 'etoh', 'name': 'ethanol', 'CAS_ID': '64-17-5', 'CSID': '682',
'chemicalClasses': [ChemicalClass.objects.get(label='Solv').pk]} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n self.user = User.objects.get(username='Aslan')\n self.user.save()\n self.setUpFormData()\n self.form = CompoundForm(self.user, self.formData)",
"def initial_form_data(self, request, step, form):\n return None",
"def set_form(self, form):\n self.parameters = form",
"def setup(self):\r\n self.text_input_values = {}\r\n if self.tag == 'radiotextgroup':\r\n self.html_input_type = \"radio\"\r\n elif self.tag == 'checkboxtextgroup':\r\n self.html_input_type = \"checkbox\"\r\n else:\r\n raise Exception(\"ChoiceGroup: unexpected tag {0}\".format(self.tag))\r\n\r\n if self.value == '':\r\n # Make `value` an empty dictionary, if it currently has an empty\r\n # value. This is necessary because the template expects a\r\n # dictionary.\r\n self.value = {}\r\n self.choices = self.extract_choices(self.xml)",
"def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()",
"def make_form(self):",
"def __init__(self):\n self.constant_fields = {}\n self.post_score_renames = {}\n self.form = None\n self.form_field_regex = None\n self.field_count = None\n\n self.set_generic_fields()\n self.set_specific_fields()\n self.set_post_score_renames()",
"def test_get_configuration_start_form_data(self):\n pass",
"def test_get_start_form_data(self):\n pass",
"def prepare(self, form):\n \n return form",
"def from_form_data(self, data: dict = {}):\n with logging.LogCall(__file__, \"from_form_data\", self.__class__):\n pass",
"def form_tweaks(self):\n pass",
"def form(self, form):\n\n self._form = form",
"def form(self, form):\n\n self._form = form",
"def populate_form(self, **kwargs):\n for name, value in kwargs.items():\n self.populate_field(name, value)",
"def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data",
"def fill_defaults(self):\n\n self.forms = {} # form_name: form object\n self.models = {} # model_name: model object\n self.calc = None\n\n self.extra_data_strings = [\n # (USE_STRING, FORM_PREFIX, FormClass),\n # (\"MAIN\", \"main\", CalcExtraDataS001),\n ]\n self.bolt_array_strings = [\n # (USE_STRING, FORM_PREFIX, BOLT_FORM_PREFIX),\n # (\"MAIN\", \"main\", \"main\"),\n ]\n self.bolt_strings = [\n # (USE_STRING, FORM_PREFIX),\n # (\"MAIN\", \"main\")\n ]\n self.force_strings = [\n # (USE_STRING, FORM_PREFIX, FORM_TYPE),\n # (\"ULS\", \"uls\", \"FULL\"),\n # (\"SLS\", \"sls\", \"FULL\"),\n ]\n self.profile_strings = [\n # TYPE_LIST=None is all types\n # (USE_STRING, FORM_PREFIX, TYPE_LIST, CAN_BE_ANY),\n # (\"LANDING\", \"landing\", None, False),\n # (\"ARRIVING\", \"beam\", [\"L\", \"U\"], True),\n ]\n self.plate_strings = [\n # FORM_TYPE's -> BOLTED, BOLTED_STIFFENER, WELDED, DUMMY\n # (USE_STRING, FORM_PREFIX, FORM_TYPE),\n # (\"END_PLATE\", \"endplate\", \"BOLTED\"),\n ]\n self.cover_plates_selection_strings = [\n # (USE_STRING, FORM_PREFIX),\n # (\"MAIN\", \"main\"),\n ]\n self.position_strings = [\n # (USE_STRING, FORM_PREFIX, PROFILE_PREFIX),\n # (\"MAIN\", \"main\", \"landing\"),\n ]\n self.extra_bracing_strings = [\n # (USE_STRING, FORM_PREFIX),\n # (\"MAIN\", \"bracings\"),\n ]\n self.sheetnumber = \"X001\"\n self.code = \"EC3\"\n self.solver = None",
"def __init__(self, form_name, request):\n self.form_name = form_name\n self.request = request\n\n # get request data\n if self.request.POST:\n self.request_data = self.request.POST\n else:\n self.request_data = self.request.GET\n\n self.files = self.request.FILES\n\n # initialize values\n self.valid = None\n self.form_class = None\n self.form = None\n self.page = None\n self.record = None\n self.key = None\n self.template_file = None\n self.error = None",
"def initial_formset_data(self, request, step, formset):\n return None",
"def form_data(self, clear=[], **kwargs):\n form_data = {\n 'payer_name': 'William Williams',\n 'billing_address': '1 Main Street',\n 'billing_city': 'Anytown',\n 'country': 'USA',\n 'billing_state': 'MD',\n 'billing_zip': '20852',\n 'payment_type': 'CreditCard',\n 'project_code': '15-4FF',\n 'payment_amount': '3000',\n 'information_consent': True,\n }\n for key in clear:\n del form_data[key]\n for key, value in kwargs.items():\n form_data[key] = value\n return form_data",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )",
"def __init__(self, handler=None, formdata=None, obj=None, prefix='', **kwargs):\n if handler:\n self._handler = handler\n super(Form, self).__init__(formdata=TornadoInputWrapper(self._handler), obj=obj, prefix=prefix, **kwargs)",
"def controls_setup(self):\n\n self.email = element.TextBox(self, dom_id='mailing-list-email', alias='E-mail Textbox')\n self.close = element.Button(self, button_type='button', css_selector='.mailing-list-confirm .btn-close',\n alias='Close Button')\n self.signup = element.Button(self, css_selector='form.slide-left button[type=submit]', alias='Subscribe Button')",
"def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]",
"def fillData(self):\n self.textname.SetValue(c.getUserName())\n self.textphon.SetValue(str(c.getPhone()))\n self.textcarr.SetValue(c.getCarrier())\n self.smsfin.SetValue(c.getSmsFinished())\n self.smserr.SetValue(c.getSmsError())",
"def _form_data(self, response):\n SQFI_audit_type = response.xpath(self.filters[6]).extract_first()\n SQFI_audit_type_val = response.xpath(self.filters[7]).extract_first()\n food_sector_categories = response.xpath(self.filters[8]).extract_first()\n food_sector_categories_val = response.xpath(self.filters[9]).extract()\n audit_rating = response.xpath(self.filters[10]).extract_first()\n audit_rating_val = response.xpath(self.filters[11]).extract()\n country = response.xpath(self.filters[12]).extract_first()\n country_val = response.xpath(self.filters[13]).extract()\n form_data = {\n SQFI_audit_type: SQFI_audit_type_val,\n food_sector_categories: food_sector_categories_val,\n audit_rating: audit_rating_val,\n country: country_val,\n }\n return form_data",
"def setUpFormData(self):\n super(NoCAS, self).setUpFormData()\n self.formData['CAS_ID'] = ''",
"def get_form_kwargs(self):\n kwargs = {'initial': self.get_initial()}\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n kwargs.update(self.get_additional_form_args())\n return kwargs",
"def controls_setup(self):\n\n self.supervisor_name = element.Link(self, css_selector='th:nth-child(1) > a',\n alias=\"Admin Supervisor Name Link\")\n self.username = element.Caption(self, css_selector='td:nth-child(2)', alias=\"Username\")\n self.email = element.Caption(self, css_selector='td:nth-child(3)', alias=\"Email\")\n self.active_start_date = element.Link(self, css_selector='td:nth-child(4)', alias=\"Active Start Date Text\")\n self.active_end_date = element.Link(self, css_selector='td:nth-child(5)', alias=\"Active End Date Text\")",
"def controls_setup(self):\n\n self.date_received = element.Link(self, alias=\"Date Received\",\n css_selector='td:nth-child(1) > a', angular=True)\n self.job_type = element.Caption(self, alias=\"Job Type\", css_selector='td:nth-child(2)', angular=True)\n self.description = element.Caption(self, alias=\"Client Name\", css_selector='td:nth-child(3)', angular=True)\n self.address = element.Caption(self, alias=\"Address\", css_selector='td:nth-child(4)', angular=True)\n self.suburb = element.Caption(self, alias=\"Suburb\", css_selector='td:nth-child(5)', angular=True)\n self.client = element.Caption(self, alias=\"Client\", css_selector='td:nth-child(6)', angular=True)"
] | [
"0.68016547",
"0.6669204",
"0.6392889",
"0.62460214",
"0.62324643",
"0.6223161",
"0.61881846",
"0.61854374",
"0.61827356",
"0.6179994",
"0.61214954",
"0.60462725",
"0.60309213",
"0.60309213",
"0.6017674",
"0.6009395",
"0.5995892",
"0.59803677",
"0.5944125",
"0.59229565",
"0.58933836",
"0.5889531",
"0.5802188",
"0.57903075",
"0.5769061",
"0.57664645",
"0.5759687",
"0.5739451",
"0.57287264",
"0.57263345"
] | 0.6727658 | 1 |
Set up lab group for tests. | def setUp(self):
self.labGroup = LabGroup.objects.makeLabGroup(
title="LegacyPassTest1", address='1, war drobe, Narnia', email='[email protected]', access_code='old_magic')
self.labGroup.save()
super(NoLabForUser, self).setUp() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n self.labGroup = LabGroup.objects.makeLabGroup(\n 'test', 'War Drobe', '[email protected]', 'ancient_magic')\n self.labGroup.save()\n self.labGroup.users.add(User.objects.get(username='Aslan'))\n self.labGroup.save()\n self.response = self.s.get(self.url, params=self.params)",
"def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass",
"def setUpContainer(self):\n self.dev1 = Device(name='dev1')\n eg = ElectrodeGroup(name='elec1',\n description='a test ElectrodeGroup',\n location='a nonexistent place',\n device=self.dev1)\n return eg",
"def setUp(self):\r\n # Ensure that the superclass sets up\r\n super(ContainerBase, self).setUp()\r\n\r\n self.auth_page = AutoAuthPage(self.browser, staff=True)\r\n self.outline = CourseOutlinePage(\r\n self.browser,\r\n self.course_info['org'],\r\n self.course_info['number'],\r\n self.course_info['run']\r\n )\r\n\r\n self.container_title = \"\"\r\n self.group_a = \"Expand or Collapse\\nGroup A\"\r\n self.group_b = \"Expand or Collapse\\nGroup B\"\r\n self.group_empty = \"Expand or Collapse\\nGroup Empty\"\r\n self.group_a_item_1 = \"Group A Item 1\"\r\n self.group_a_item_2 = \"Group A Item 2\"\r\n self.group_b_item_1 = \"Group B Item 1\"\r\n self.group_b_item_2 = \"Group B Item 2\"\r\n\r\n self.group_a_handle = 0\r\n self.group_a_item_1_handle = 1\r\n self.group_a_item_2_handle = 2\r\n self.group_empty_handle = 3\r\n self.group_b_handle = 4\r\n self.group_b_item_1_handle = 5\r\n self.group_b_item_2_handle = 6\r\n\r\n self.group_a_item_1_action_index = 0\r\n self.group_a_item_2_action_index = 1\r\n\r\n self.duplicate_label = \"Duplicate of '{0}'\"\r\n self.discussion_label = \"Discussion\"\r\n\r\n self.setup_fixtures()\r\n\r\n self.auth_page.visit()",
"def setUp(self):\r\n super(TestAnswerDistributions, self).setUp()\r\n\r\n self.homework = self.add_graded_section_to_course('homework')\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()",
"def __init__(self, name):\n self.name = name\n\n # test groups are added with add_test_groups()\n self.test_groups = dict()",
"def test_add_group(self):\n pass",
"def setUp(self) -> None:\n super().setUp()\n\n self.learner_group_model = learner_group_models.LearnerGroupModel(\n id='learner_group_32',\n title='title',\n description='description',\n facilitator_user_ids=['facilitator_1', 'facilitator_2'],\n learner_user_ids=['learner_1', 'learner_2', 'learner_3'],\n invited_learner_user_ids=['invited_user_1', 'invited_user_2'],\n subtopic_page_ids=['subtopic_1', 'subtopic_2'],\n story_ids=['story_1', 'story_2'])\n self.learner_group_model.update_timestamps()\n self.learner_group_model.put()",
"def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }",
"def test_trivial(self):\n group = Group()",
"def test_create_group(self):\n pass",
"def test_create_group(self):\n pass",
"def setUpClass(cls):\n super().setUpClass()\n cls.group = Group.objects.create(\n title=TEST_GROUP_NAME,\n slug=TEST_GROUP_SLUG\n )",
"def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]",
"def setUp(self):\n module = __import__(self.moduleName)\n components = self.moduleName.split('.')\n for component in components[1:]:\n module = getattr(module, component)\n \n self.suite.addTest(self._getTestSuite(module = module))",
"def setUp(self) -> None:\n create_test_categories()",
"def setUp(cls):\n cls.directory = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)), 'examples', 'arkane')\n cls.level_of_theory = LevelOfTheory(\"cbs-qb3\")\n cls.frequencyScaleFactor = 0.99\n cls.useHinderedRotors = False\n cls.useBondCorrections = True",
"def __init__(self, test_group, resolution, coord_type):\n name = 'restart_test'\n self.resolution = resolution\n self.coord_type = coord_type\n subdir = '{}/{}/{}'.format(resolution, coord_type, name)\n super().__init__(test_group=test_group, name=name,\n subdir=subdir)\n\n self.add_step(\n InitialState(test_case=self, resolution=resolution))\n self.add_step(\n SshAdjustment(test_case=self, ntasks=4, openmp_threads=1))\n\n for part in ['full', 'restart']:\n name = '{}_run'.format(part)\n step = Forward(test_case=self, name=name, subdir=name, ntasks=4,\n openmp_threads=1, resolution=resolution,\n with_frazil=True)\n\n step.add_namelist_file(\n 'compass.ocean.tests.ice_shelf_2d.restart_test',\n 'namelist.{}'.format(part))\n step.add_streams_file(\n 'compass.ocean.tests.ice_shelf_2d.restart_test',\n 'streams.{}'.format(part))\n self.add_step(step)\n\n self.add_step(Viz(test_case=self), run_by_default=False)",
"def __init__(self, test_group):\n super().__init__(test_group=test_group, name='restart_test')\n\n self.add_step(\n SetupMesh(test_case=self, initial_condition='zero'))\n\n name = 'full_run'\n step = RunModel(test_case=self, name=name, subdir=name, ntasks=4,\n openmp_threads=1)\n # modify the namelist options and streams file\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.full', out_name='namelist.landice')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.full', out_name='streams.landice')\n self.add_step(step)\n\n input_dir = name\n name = 'visualize_{}'.format(name)\n step = Visualize(test_case=self, name=name, subdir=name,\n input_dir=input_dir)\n self.add_step(step, run_by_default=False)\n\n name = 'restart_run'\n step = RunModel(test_case=self, name=name, subdir=name, ntasks=4,\n openmp_threads=1,\n suffixes=['landice', 'landice.rst'])\n\n # modify the namelist options and streams file\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.restart', out_name='namelist.landice')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.restart', out_name='streams.landice')\n\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.restart.rst', out_name='namelist.landice.rst')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.restart.rst', out_name='streams.landice.rst')\n self.add_step(step)\n\n input_dir = name\n name = 'visualize_{}'.format(name)\n step = Visualize(test_case=self, name=name, subdir=name,\n input_dir=input_dir)\n self.add_step(step, run_by_default=False)",
"def setUp(self):\n self.log = mock_log().bind(base_log=True)\n self.state = GroupState('tid', 'gid', 'g', {}, {}, None, {}, True,\n ScalingGroupStatus.ACTIVE)\n self.group = mock_group(self.state)",
"def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)",
"def _set_up():\n repl._setUp = self.setUp",
"def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3",
"def tearDown(self):\n self.labGroup.delete()\n super(NoLabForUser, self).tearDown()",
"def __init__(self, test_group, name, subdir=None):\n self.name = name\n self.mpas_core = test_group.mpas_core\n self.test_group = test_group\n if subdir is not None:\n self.subdir = subdir\n else:\n self.subdir = name\n\n self.path = os.path.join(self.mpas_core.name, test_group.name,\n self.subdir)\n\n # steps will be added by calling add_step()\n self.steps = dict()\n self.steps_to_run = list()\n\n # these will be set during setup\n self.config = None\n self.config_filename = None\n self.work_dir = None\n self.base_work_dir = None\n # may be set during setup if there is a baseline for comparison\n self.baseline_dir = None\n\n # these will be set when running the test case\n self.new_step_log_file = True\n self.logger = None\n self.log_filename = None\n self.validation = None",
"def setUp(self):\n # Disable log messages to silence expected warnings\n cfdm.log_level(\"DISABLE\")\n # Note: to enable all messages for given methods, lines or\n # calls (those without a 'verbose' option to do the same)\n # e.g. to debug them, wrap them (for methods, start-to-end\n # internally) as follows:\n #\n # cfdm.log_level('DEBUG')\n # < ... test code ... >\n # cfdm.log_level('DISABLE')\n\n nc_group_structure_names = [\n None,\n \"/\",\n \"group/...\",\n \"group/\",\n \"group/.../\",\n \"/group/.../\",\n ]\n self.nc_grouped_dimension_names = [\n obj.replace(\"...\", \"ncdim\")\n for obj in nc_group_structure_names\n if obj is not None\n ]\n self.nc_grouped_variable_names = [\n obj.replace(\"...\", \"ncvar\")\n for obj in nc_group_structure_names\n if obj is not None\n ]",
"def setUp(self):\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"",
"def setUp(self):\n self.supvisors = DummySupvisors()",
"def setUp(self):\n self.testdatapath = os.path.join(mkdtemp())\n self.testfilenames = [\n os.path.join(self.testdatapath, \"0035.nc\"),\n os.path.join(self.testdatapath, \"0107.nc\")\n ]\n\n self.gpis = [1, 10, 11, 12]\n reg_grid = grids.genreg_grid().to_cell_grid()\n self.grid = reg_grid.subgrid_from_gpis(self.gpis)",
"def setUp(self):\n self.setup_beets()"
] | [
"0.6833717",
"0.6591932",
"0.6449999",
"0.63306993",
"0.630889",
"0.62895",
"0.62602586",
"0.6256517",
"0.62400544",
"0.6221543",
"0.6106464",
"0.6106464",
"0.61008394",
"0.60976285",
"0.6095497",
"0.60188913",
"0.60178053",
"0.60139465",
"0.6004077",
"0.59957445",
"0.59734666",
"0.5968385",
"0.5953602",
"0.5950467",
"0.59449524",
"0.59429187",
"0.59408396",
"0.59401584",
"0.5907364",
"0.589605"
] | 0.68219155 | 1 |
Make sure that saving a valid form works. | def test_saving(self):
if self.form.is_valid():
self.compound = self.form.save()
self.assertIsNotNone(self.compound.id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def form_valid(self, form):\n form.save()\n return super().form_valid(form)",
"def form_valid(self, form, factura_form, remito_form, ot_linea_form):\n form.save()\n factura_form.save()\n remito_form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form):\n if self.fields is None:\n self.object.__dict__.update({\n field.name:form.cleaned_data[field.name] for field in form.visible_fields()\n })\n else:\n self.object.__dict__.update({\n field:form.cleaned_data[field] for field in self.fields\n })\n self.object.save()\n if self.request.is_ajax():\n return self.ajax_form_valid()\n else:\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, factura_form, remito_form, ot_linea_form):\n self.object = form.save()\n factura_form.instance = self.object\n factura_form.save()\n remito_form.instance = self.object\n remito_form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, factura_form, ot_linea_form):\n form.save()\n factura_form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form):\n self.object = form.save(commit=False) # don't save M2M fields\n\n # Need to save that object because of commit=False previously.\n # This doesn't save our troublesome M2M field.\n self.object.save()\n\n # saving intermediary M2M model: Qualification\n for lesson in form.cleaned_data['lessons']:\n Qualification.objects.create(lesson=lesson, person=self.object)\n\n # Important: we need to use ModelFormMixin.form_valid() here!\n # But by doing so we omit SuccessMessageMixin completely, so we need to\n # simulate it. The code below is almost identical to\n # SuccessMessageMixin.form_valid().\n response = super(ModelFormMixin, self).form_valid(form)\n success_message = self.get_success_message(form.cleaned_data)\n if success_message:\n messages.success(self.request, success_message)\n return response",
"def form_valid(self, form, factura_form, ot_linea_form):\n self.object = form.save()\n factura_form.instance = self.object\n factura_form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def test_that_view_saves_data_if_form_valid(self):\n\n self.client.login(username='admin', password='admin')\n url = reverse(\"to_form\", args=str(self.my_instance.id))\n response = self.client.post(url, data={'name': 'Oleg', 'surname': 'Senyshyn', 'date': date(1995, 05, 03),\n 'email': '[email protected]', 'skype': 'sen9a1990'}, format='json')\n self.assertEqual('Data has been edit', json.loads(response.content)['ok'])\n my_instance = Contact.objects.first()\n self.assertEqual('Oleg', my_instance.name)\n self.assertEqual('Senyshyn', my_instance.surname)\n self.assertEqual(date(1995, 05, 03), my_instance.date)\n self.assertEqual('[email protected]', my_instance.email)\n self.assertEqual('sen9a1990', my_instance.skype)",
"def form_valid(self, form, ot_linea_form):\n form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, ot_linea_form):\n form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def test_cleaned_data_worked(self):\n pass",
"def form_valid(self, form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form):\r\n\r\n\t\tis_submitting = False\r\n\r\n\t\tCruise = form.save(commit=False)\r\n\t\t\r\n\t\tCruise.leader = self.request.user\r\n\t\ttry:\r\n\t\t\tCruise.organization = Cruise.leader.userdata.organization\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\tform.cleaned_data[\"leader\"] = self.request.user\r\n\r\n\t\tif hasattr(self, \"request\"):\r\n\t\t\t# check whether we're saving or submitting the form\r\n\t\t\tif self.request.POST.get(\"save_cruise\"):\r\n\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\tmessages.add_message(self.request, messages.SUCCESS, mark_safe('Cruise successfully saved. You may edit and submit it on the \"<a href=\"/user/cruises/unsubmitted/\">Unsubmitted Cruises</a>\" page.'))\r\n\t\t\telif self.request.POST.get(\"submit_cruise\"):\r\n\t\t\t\tis_submitting = True\r\n\r\n\t\t\t\tcruiseday_form = CruiseDayFormSet(self.request.POST)\r\n\t\t\t\tparticipant_form = ParticipantFormSet(self.request.POST)\r\n\t\t\t\tcruise_days = cruiseday_form.cleaned_data\r\n\t\t\t\tcruise_participants = participant_form.cleaned_data\r\n\t\t\t\tcruise_invoice = invoice_form.cleaned_data\r\n\t\t\t\tif (Cruise.is_submittable(user=self.request.user, cleaned_data=form.cleaned_data, cruise_invoice=cruise_invoice, cruise_days=cruise_days, cruise_participants=cruise_participants)):\r\n\t\t\t\t\tCruise.is_submitted = True\r\n\t\t\t\t\tCruise.submit_date = timezone.now()\r\n\t\t\t\t\tmessages.add_message(self.request, messages.SUCCESS, mark_safe('Cruise successfully submitted. You may track its approval status on the \"<a href=\"/user/cruises/submitted/\">Submitted Cruises</a>\" page.'))\r\n\t\t\t\telse:\r\n\t\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\t\tmessages.add_message(self.request, messages.ERROR, mark_safe('Cruise could not be submitted:' + str(Cruise.get_missing_information_string(cleaned_data=form.cleaned_data, cruise_invoice=cruise_invoice, cruise_days=cruise_days, cruise_participants=cruise_participants)) + '<br>If you decide to do this later, you can get back to this cruise to review and add any missing or invalid information on the \"<a href=\"/user/cruises/unsubmitted/\">Unsubmitted Cruises</a>\" page.'))\r\n\t\t\telse:\r\n\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\tmessages.add_message(self.request, messages.ERROR, mark_safe('Cruise could not be submitted: We were unable to determine the action you wished to take on submit. Please try to submit again below.'))\r\n\r\n\t\tCruise.save()\r\n\t\tself.object = form.save()\r\n\t\tcruiseday_form.instance = self.object\r\n\t\tcruiseday_form.save()\r\n\t\tparticipant_form.instance = self.object\r\n\t\tparticipant_form.save()\r\n\t\tdocument_form.instance = self.object\r\n\t\tdocument_form.save()\r\n\t\tequipment_form.instance = self.object\r\n\t\tequipment_form.save()\r\n\t\tinvoice_form.instance = self.object\r\n\t\tinvoice_form.save()\r\n\r\n\t\treturn HttpResponseRedirect(self.get_success_url(is_submitting, Cruise))",
"def pre_save(self):\r\n self.validate()",
"def test_form_create(self):\n create = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': False,\n }\n\n form = self.form_cls(create)\n print(form.errors)\n\n form.save()\n\n actual = models.Entry.objects.get(slug='last-post-final')\n self.assertEquals(actual.title, create['title'])\n self.assertEquals(actual.content.raw, create['content'])\n self.assertIsNone(actual.published_timestamp)",
"def form_valid(self, form, receita_ingrediente_form, receita_instrucao_form):\n self.object = form.save()\n receita_ingrediente_form.instance = self.object\n receita_ingrediente_form.save()\n receita_instrucao_form.instance = self.object\n receita_instrucao_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def test_submit_form_using_valid_data():",
"def form_valid(self, form, revision_form):\n self.object = form.save()\n\n revision_form.instance = self.object\n revision_form.save()\n\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, instrumento_linea_form):\n form.save()\n instrumento_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def test_clean(self):\n doc = Document.objects.create()\n\n form = DocumentMergeForm()\n form.cleaned_data = {\n \"primary_document\": doc.id,\n \"rationale\": \"other\",\n \"rationale_notes\": \"\",\n }\n form.clean()\n assert len(form.errors) == 1\n\n # should not produce an error if rationale notes provided\n form = DocumentSearchForm()\n form.cleaned_data = {\n \"primary_document\": doc.id,\n \"rationale\": \"other\",\n \"rationale_notes\": \"test\",\n }\n form.clean()\n assert len(form.errors) == 0\n\n # should not produce an error if rational is \"duplicate\" or \"join\"\n form = DocumentSearchForm()\n form.cleaned_data = {\n \"primary_document\": doc.id,\n \"rationale\": \"duplicate\",\n \"rationale_notes\": \"\",\n }\n form.clean()\n assert len(form.errors) == 0",
"def form_valid(self, form, ot_linea_form):\n self.object = form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, ot_linea_form):\n self.object = form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, form, instrumento_linea_form):\n self.object = form.save()\n instrumento_linea_form.instance = self.object\n instrumento_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def form_valid(self, forms):\n try:\n taxpayer = self.save_taxpayer(forms)\n self.save_contact(forms, taxpayer, self.save_address(forms, taxpayer))\n self.save_bankaccount(forms, taxpayer)\n messages.success(\n self.request,\n TAXPAYER_CREATION_SUCCESS_MESSAGE,\n )\n messages.success(\n self.request,\n THANKS,\n )\n except ObjectDoesNotExist:\n messages.error(\n self.request,\n TAXPAYER_CREATION_ERROR_MESSAGE\n )\n finally:\n return HttpResponseRedirect(self.get_success_url())",
"def test_saves_user_on_save(self):\n person = Person.objects.get(user__username='admin')\n personform = PersonForm(instance=person, data={'user': person.user.pk, 'name': 'has_changed'})\n \n if personform.is_valid():\n person = personform.save()\n self.assertEquals(User.objects.get(pk=person.user.pk).first_name, \\\n \"has_changed\")\n else:\n self.fail(personform.errors)\n # self.fail(\"personform not valid\")",
"def test_valid_form_true(self):\n form = UserRegisterForm(data=self.data)\n self.assertTrue(form.is_valid())",
"def test_clean(self):\n form = DocumentSearchForm()\n form.cleaned_data = {\"q\": \"\", \"sort\": \"relevance\"}\n form.clean()\n assert len(form.errors) == 1\n\n # Otherwise should not raise an error\n form = DocumentSearchForm()\n form.cleaned_data = {\"q\": \"test\", \"sort\": \"relevance\"}\n form.clean()\n assert len(form.errors) == 0\n form = DocumentSearchForm()\n form.cleaned_data = {\"q\": \"\", \"sort\": \"scholarship_desc\"}\n form.clean()\n assert len(form.errors) == 0",
"def form_valid(self, form, tarea_linea_form):\n form.save()\n tarea_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def testFormValidates(self):\n sdq1 = getattr(self.s1, 'sdq1')\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors == {}, \"Validation error raised: %s\" % controller_state.getErrors()",
"def form_valid(self, form):\n with create_revision():\n form_data = form.save(commit=False)\n bom_name = form.cleaned_data['bom_name']\n warehouse_name = form.cleaned_data['warehouse_name']\n initial_parts = form.initial.get('parts')\n updated_parts = form.cleaned_data.get('parts')\n # Using sets to find newly added parts by using set diff\n\n # Forming sets only with PartNumbers instead of PartsDict\n # to make comparisons easy\n initial_partNumbers = {\n part['PartNumber'] for part in initial_parts\n }\n updated_partNumbers = {\n part['PartNumber'] for part in updated_parts\n }\n newly_added_partNumbers = updated_partNumbers - initial_partNumbers\n removed_partNumbers = initial_partNumbers - updated_partNumbers\n\n # Reforming part dicts to access part attributes easily.\n newly_added_parts = [\n part for part in updated_parts\n if part['PartNumber'] in newly_added_partNumbers\n ]\n form_data.total_price = cal_total_price(bom_name)\n # Create outwards for parts used in asset creation.\n for part in newly_added_parts:\n part_obj = Parts.objects.get(part_number=part['PartNumber'])\n authorised_by = \"asset_auto_creation\"\n verified_by = \"asset_auto_creation\"\n mode_of_transport = \"asset_auto_creation\"\n remarks = \"This outward was created due to asset creation.\"\n\n Outward.objects.create(\n part_type=part_obj.part_type,\n part_number=part_obj,\n part_name=part_obj.part_name,\n quantity=part['Qty'],\n from_warehouse_name=warehouse_name,\n to_warehouse_name=warehouse_name,\n authorised_by=authorised_by,\n verified_by=verified_by,\n mode_of_transport=mode_of_transport,\n unit_of_measure=part_obj.unit_of_measure,\n remarks=remarks,\n )\n\n form_data.save()\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Updated from web form.\")\n return_value = super().form_valid(form_data)\n\n return return_value",
"def check_is_saved(self):\n raise NotImplementedError()"
] | [
"0.68990594",
"0.68580776",
"0.6716657",
"0.6689772",
"0.66768503",
"0.65493596",
"0.6530574",
"0.64837015",
"0.64713764",
"0.64713764",
"0.6424546",
"0.64115185",
"0.6370163",
"0.63618284",
"0.6353641",
"0.63391066",
"0.6338812",
"0.6329545",
"0.6305311",
"0.62634265",
"0.62634265",
"0.62610596",
"0.6248327",
"0.62341994",
"0.6202981",
"0.6198477",
"0.6186506",
"0.6175546",
"0.61610705",
"0.61356485"
] | 0.7100611 | 0 |
Get data for the form and remove the CAS number. | def setUpFormData(self):
super(NoCAS, self).setUpFormData()
self.formData['CAS_ID'] = '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_field(self, url, data):\r\n while data != []:\r\n for field in data:\r\n if 'id' in field:\r\n requests.delete(url + str(field['id']), headers=self.headers)\r\n data.clear()\r\n data = requests.get(url, headers=self.headers).json()['results']",
"def setUpFormData(self):\n super(InconsistentCSIDCAS, self).setUpFormData()\n self.formData['CAS_ID'] = '290-37-9'",
"def remove_field():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_field(_id)\n return __result(data, code, message)",
"def clearData(self):\r\n self.title.setVal(\"\")\r\n self.authorBox.clearData()\r\n self.addPrimeAuthorFn()",
"def setUpFormData(self):\n super(InconsistentCASName, self).setUpFormData()\n self.formData['CAS_ID'] = '290-37-9'\n self.formData['CSID'] = '8904' # consistent values for pyrazine.",
"def remove_form(self, form):\n idx = self.get_phase_form_index(form)\n if idx is None:\n raise ScheduleError(\"That form doesn't exist in the phase\")\n\n self.forms.remove(self.forms[idx])",
"def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)",
"def clearData(self):\r\n self.title.setVal(\"\"),\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\"),\r\n self.affiliation.setVal(\"\")\r\n self.fullName.setVal(\"\")",
"def _revert(self):\n self.kwargs[\"collect\"].reset_scan_form_data()",
"def delete(self):\n self.data = None",
"def remove_ei_form(self, form):\n if form in self.ei_forms:\n self.ei_forms.remove(form)",
"def display_record_removal_form_and_return_data():\n removal_date = st.date_input(\"Removal Date\", value=dt.date.today())\n credit_value: int = st.number_input(\n \"Credits\", value=0, min_value=0, max_value=1, step=1, format=\"%d\",\n )\n return removal_date, credit_value",
"def delete():\n global num_carte, code_secret, montant\n length_card = len(card_num_entry.get())\n if length_card in [5, 10, 15]:\n num_carte = num_carte[:-2]\n card_num.set(num_carte)\n else:\n num_carte = num_carte[:-1]\n card_num.set(num_carte)\n\n code_secret = code_secret[:-1]\n code.set(code_secret)\n\n montant = str(montant)[:-1]\n amount.set(montant)",
"def render_deletion_form(request: Request):\n provider_data = open_for_reading()\n return templates.TemplateResponse(\"deletion_form.html\", {\n \"request\": request,\n \"provider_data\": provider_data\n })",
"def run(self, data, config=None, pipeline=None):\n del data[self.field]\n return data",
"def initial_form_data(self, request, step, form):\n return None",
"def get_processed_form_data(form, form_element_entries):\n keys_to_remove = get_ignorable_form_fields(form_element_entries)\n values_to_remove = get_ignorable_form_values()\n\n field_name_to_label_map = \\\n get_field_name_to_label_map(form, keys_to_remove, values_to_remove)\n\n keys_to_remove = list(field_name_to_label_map.keys())\n\n return (\n field_name_to_label_map,\n get_cleaned_data(form, keys_to_remove, values_to_remove)\n )",
"def clear_fields(self):\n\n if not self.is_root:\n return\n self.field_data.clear()",
"def clear_form(form_id):\n db.execute('update form_field set parent_form_field = null'\n ' where form_id = %s', form_id) # remove foreign keys\n db.execute('delete from form_field where form_id = %s', form_id)",
"def clean(self):\n cleaned_data = super(EnterpriseCustomerAdminForm, self).clean()\n if 'catalog' in cleaned_data and not cleaned_data['catalog']:\n cleaned_data['catalog'] = None\n return cleaned_data",
"def clear(self):\n self._check_private_key(\"clear data\")\n headers = {'Phant-Private-Key': self.privateKey}\n self._delete(self.inputUrl(''), headers=headers)",
"def removeData(self, data: ghidra.program.model.listing.Data) -> None:\n ...",
"def clearValue(self):\n self.data = []",
"def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}",
"def _do_clear_page(self):\n self.txtCode.handler_block(self._lst_handler_id[0])\n self.txtCode.set_text('')\n self.txtCode.handler_unblock(self._lst_handler_id[0])\n\n self.txtName.handler_block(self._lst_handler_id[1])\n self.txtName.set_text('')\n self.txtName.handler_unblock(self._lst_handler_id[1])\n\n _buffer = self.txtRemarks.do_get_buffer()\n _buffer.handler_block(self._lst_handler_id[2])\n _buffer.set_text('')\n _buffer.handler_block(self._lst_handler_id[2])\n\n self.chkSafetyCritical.handler_block(self._lst_handler_id[3])\n self.chkSafetyCritical.set_active(False)\n self.chkSafetyCritical.handler_unblock(self._lst_handler_id[3])\n\n return None",
"def delete(self):\n return self.get_data()",
"def remove_rec(self):\n print(\"Write phone number:\")\n remove_phone_number_input = input()\n return self.storage.remove(remove_phone_number_input)",
"def remove_data(data=None): #clear\n data = get_data(data)\n shutil.rmtree(data)",
"def clean(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n force = True\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n containers = self.credentials_module.list_containers(token)\n if containers:\n self.docker_module.clean_containers(containers, force)\n exceptions.make_log(\"info\", \"Delete containers\")\n\n token_info = self.credentials_module.get_token(token)\n self.batch_module.clean_environment(token_info, admin_token)\n exceptions.make_log(\"info\", \"Batch system cleaned\")\n self.credentials_module.remove_token_from_cache(token)\n exceptions.make_log(\"info\", \"Delete token: %s\" % token)\n return token",
"def clearData():\n Co8PersistentData.__dataDict.clear()"
] | [
"0.56692374",
"0.56033903",
"0.55418986",
"0.5272646",
"0.5236702",
"0.52326304",
"0.5222763",
"0.5213981",
"0.5182077",
"0.5145639",
"0.5117664",
"0.5114514",
"0.51122105",
"0.5094855",
"0.5049387",
"0.50321674",
"0.5010105",
"0.49786502",
"0.49755615",
"0.49728656",
"0.4969505",
"0.49677208",
"0.49339727",
"0.49187404",
"0.49097496",
"0.48945776",
"0.48891535",
"0.48811027",
"0.48775247",
"0.48728475"
] | 0.6313918 | 0 |
Create inconsistency between CAS number and name. | def setUpFormData(self):
super(InconsistentCASName, self).setUpFormData()
self.formData['CAS_ID'] = '290-37-9'
self.formData['CSID'] = '8904' # consistent values for pyrazine. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self, cr, user, vals, context=None):\n if ('name' not in vals) or (vals.get('name')=='/'): \n vals['name'] = self.pool.get('ir.sequence').get(cr, user, 'services.contracts.archive')\n return super(env_and_safety_allowances_archive, self).create(cr, user, vals, context)",
"def _mk_coref_id():\n num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]\n if alpha == 'Z':\n alpha = 'A'\n num += 1\n else:\n alpha = chr(ord(alpha) + 1)\n\n _mk_coref_id.id = '%s%s' % (num, alpha)\n return _mk_coref_id.id",
"def make_name(self, offset, name, flags=0):\n self.ret = idc.set_name(offset, str(name), idc.SN_NOCHECK | idc.SN_NOWARN | 0x800)\n return self.ret",
"def _create_comparison(systems, info_name):\n info_comparison = COMPARISON_DIFFERENT\n\n system_id_values = [{'id': x['id'], 'value': x.get(info_name, \"FACT_NOT_SET\")} for x in systems]\n\n system_values = {system['value'] for system in system_id_values}\n\n if \"FACT_NOT_SET\" in system_values:\n info_comparison = COMPARISON_INCOMPLETE_DATA\n elif len(system_values) == 1:\n info_comparison = COMPARISON_SAME\n\n return {'name': info_name, 'state': info_comparison, 'systems': system_id_values}",
"def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"123\")\n self.assertFalse(self.phonebook.is_consistent())",
"def make_id(self, name: str) -> str:\n # id_cache is intentionally mutable\n id = self.id_cache.get(name)\n if not id:\n id = 'epub-%d' % self.env.new_serialno('epub')\n self.id_cache[name] = id\n return id",
"def test_create_with_only_name(self):\n with OrionState() as cfg:\n name = \"bm00001\"\n with pytest.raises(NoConfigurationError) as exc:\n get_or_create_benchmark(cfg.storage, name).close()\n\n assert f\"Benchmark {name} does not exist in DB\" in str(exc.value)",
"def test_cannot_create_with_same_category_and_name(self):\n # Create an initial service\n self.project.services.create(name = \"service1\", category = self.category)\n # Then try to create the same service using the serializer\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = self.category.pk),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['name'][0].code, 'unique')",
"def ensure_deterministic_id(name, storage, version=1, update=None):\n experiment = storage.fetch_experiments({\"name\": name, \"version\": version})[0]\n algo_lock_info = storage.get_algorithm_lock_info(uid=experiment[\"_id\"])\n\n storage.delete_experiment(uid=experiment[\"_id\"])\n storage.delete_algorithm_lock(uid=experiment[\"_id\"])\n\n _id = zlib.adler32(str((name, version)).encode())\n experiment[\"_id\"] = _id\n\n if experiment[\"refers\"][\"parent_id\"] is None:\n experiment[\"refers\"][\"root_id\"] = _id\n\n if update is not None:\n experiment.update(update)\n\n storage.create_experiment(\n experiment,\n algo_locked=algo_lock_info.locked,\n algo_state=algo_lock_info.state,\n algo_heartbeat=algo_lock_info.heartbeat,\n )",
"def _make_benchcontract_address(name):\n\n print(\"name: {}\".format(name))\n print(\"namespace: {}\".format(namespace))\n return namespace + hashlib.sha512(name.encode(\"utf-8\")).hexdigest()[0:64]",
"def __assign_name_id(self):\n if not self.name_id:\n self.name_id = str(BaseTicketing.objects.create())",
"def create(self, atomic_desc, atomic_numbers=[]):\n return self.acronym, []",
"def get_cas_enzyme(name, cas_file=CAS_PATH):\n for line in open(cas_file):\n if not line.startswith(\"#\"):\n cas = line.rstrip().split(\"\\t\")\n if cas[0] == name:\n return Cas(*cas)\n raise ValueError(f\"Cas not found in {cas_file}: {name}\")",
"def test_create_course_duplicate_number(self):\r\n self.client.ajax_post('/course/', self.course_data)\r\n self.course_data['display_name'] = 'Robot Super Course Two'\r\n self.course_data['run'] = '2013_Summer'\r\n\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')",
"def test_inconsistent_name(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'givenName': ['Maarten'],\n 'sn': ['Visscher'],\n 'cn': ['Wessel']}\n }\n with self.assertRaises(CloneError):\n clone(entries)",
"def test_duplicate_names_fail(self):\n name = 'some_name'\n instance_types.create(name, 256, 1, 120, 200, 'flavor1')\n self.assertRaises(exception.InstanceTypeExists,\n instance_types.create,\n name, 256, 1, 120, 200, 'flavor2')",
"def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)",
"def sid_name_error(sid_name):\n if sid_name in cilKeyReference.sid_soi_reference:\n pass\n else:\n raise Exception(\"No reference for \\\"{0}\\\" in cilKeyReference.py\".format(sid_name))",
"def test_create_invalid_name(self):\r\n print(\"Create survey with invalid name\")\r\n s_name = \"\"\r\n c_id = 1\r\n questions = [1, 2]\r\n\r\n prev_noSurveys = len(Survey.query.all())\r\n self.assertEqual(self.system.create_survey(s_name, c_id, questions), 0)\r\n curr_noSurveys = len(Survey.query.all())\r\n self.assertEqual(prev_noSurveys, curr_noSurveys)",
"def _create_counter(self, name):\n otel_safe_name = _get_otel_safe_name(name)\n\n if _is_up_down_counter(name):\n counter = self.meter.create_up_down_counter(name=otel_safe_name)\n else:\n counter = self.meter.create_counter(name=otel_safe_name)\n\n logging.debug(\"Created %s as type: %s\", otel_safe_name, _type_as_str(counter))\n return counter",
"def test_name_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'name': 'clear', 'units': 'mag',\n 'description': 'Un-filtered apparent magnitude.'})",
"def test_check_project_name(self):\n with self.assertRaises(InputError):\n ARC(project='ar c')\n with self.assertRaises(InputError):\n ARC(project='ar:c')\n with self.assertRaises(InputError):\n ARC(project='ar<c')\n with self.assertRaises(InputError):\n ARC(project='ar%c')",
"def test_unique_naming(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'problem', block_id='problem1'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='guestx', offering='contender', branch='draft'), 'course', 'head345679'\r\n )\r\n category = 'problem'\r\n new_payload = \"<problem>empty</problem>\"\r\n new_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 1', 'data': new_payload},\r\n )\r\n another_payload = \"<problem>not empty</problem>\"\r\n another_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 2', 'data': another_payload},\r\n definition_locator=original.definition_locator,\r\n )\r\n # check that course version changed and course's previous is the other one\r\n parent = modulestore().get_item(locator)\r\n self.assertNotEqual(new_module.location.block_id, another_module.location.block_id)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertIn(another_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.data, new_payload)\r\n self.assertEqual(another_module.data, another_payload)\r\n # check definition histories\r\n new_history = modulestore().get_definition_history_info(new_module.definition_locator)\r\n self.assertIsNone(new_history['previous_version'])\r\n self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)\r\n self.assertEqual(new_history['edited_by'], \"anotheruser\")\r\n another_history = modulestore().get_definition_history_info(another_module.definition_locator)\r\n self.assertEqual(another_history['previous_version'], original.definition_locator.definition_id)",
"def test_create_id_dupe(self):\r\n user = random.getrandbits(32)\r\n courses = modulestore().get_courses()\r\n with self.assertRaises(DuplicateCourseError):\r\n dupe_course_key = courses[0].location.course_key\r\n modulestore().create_course(dupe_course_key.org, dupe_course_key.offering, user)",
"def _solid_name_check(self, solid_name, chk_dict=None):\n return self._name_check(solid_name, 'solids', chk_dict=chk_dict)",
"def __init__(self, value: str) -> None:\n if value in NEURO_SYS_IDENTIFIERS:\n value = NEURO_SYS_IDENTIFIERS[value]\n if identifier.STANDARD.match(value.__str__()): # pylint: disable=no-member\n self.is_old_style = False\n elif identifier.OLD_STYLE.match(value.__str__()): # pylint: disable=no-member\n self.is_old_style = True\n else:\n raise InvalidIdentifier(f'Not a valid arXiv ID: {value}')",
"def try_create_uniqe_name(self,name=None,plan_id=None):\n if self.valid_name(name):\n for i in range (1,20):\n new_name=name+\"_\"+str(i)\n if self.unique_name(name=new_name,plan_id=plan_id):\n return new_name\n return False\n else:\n return False",
"def create(self, vals):\n number_id = self.env['educa.establishment'].search([('id', '=', vals['establishment_id'])]).number_id\n code = self.search([('establishment_id', '=', vals['establishment_id'])], order=\"name desc\", limit=1).name\n if code:\n num = str(code).split('/')\n start_num =(int(num[0]))\n end_num = int(num[1])\n final_name=str(start_num) +'/' +str(end_num+1)\n vals['name'] = final_name\n else:\n vals['name'] = str(number_id) +'/' +'1'\n return super(EducaClassroom, self).create(vals)",
"def _make_identical(self, name):\n if not name in self.all_variables:\n return name\n i = 2\n while '%s%s' % (name, i) in self.all_variables:\n i += 1\n return '%s%s' % (name, i)",
"def test_invalid_type_cr_name(self):\n self.assertRaises(QISKitError, ClassicalRegister, size=3, name=1)"
] | [
"0.55365926",
"0.5442971",
"0.5290361",
"0.52699053",
"0.52344525",
"0.5213541",
"0.51775724",
"0.5171424",
"0.5146217",
"0.5143811",
"0.5137238",
"0.5084902",
"0.505352",
"0.5047763",
"0.5042051",
"0.49853882",
"0.49819195",
"0.49509123",
"0.49481195",
"0.4940359",
"0.4940359",
"0.49401116",
"0.4936884",
"0.49327603",
"0.4925717",
"0.48952135",
"0.48894328",
"0.48887223",
"0.48678425",
"0.486216"
] | 0.5477288 | 1 |
feat =feature input_zone_polygon = shpF input_value_raster = trainF band = 1 coords=[commonBox[0], commonBox[2], commonBox[3], commonBox[1]] | def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band
# Open data
raster = gdal.Open(input_value_raster)
shp = ogr.Open(input_zone_polygon)
lyr = shp.GetLayer()
# Get raster georeference info
transform = raster.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
sizeX = raster.RasterXSize
sizeY = raster.RasterYSize
lrx = xOrigin + (sizeX * pixelWidth)
lry = yOrigin + (sizeY * pixelHeight)
# Reproject vector geometry to same projection as raster
#sourceSR = lyr.GetSpatialRef()
#targetSR = osr.SpatialReference()
#targetSR.ImportFromWkt(raster.GetProjectionRef())
#coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)
#feat = lyr.GetNextFeature()
#geom = feat.GetGeometryRef()
#geom.Transform(coordTrans)
# Get extent of feat
geom = feat.GetGeometryRef()
if (geom.GetGeometryName() == 'MULTIPOLYGON'):
count = 0
pointsX = []; pointsY = []
for polygon in geom:
geomInner = geom.GetGeometryRef(count)
ring = geomInner.GetGeometryRef(0)
numpoints = ring.GetPointCount()
for p in range(numpoints):
lon, lat, z = ring.GetPoint(p)
pointsX.append(lon)
pointsY.append(lat)
count += 1
elif (geom.GetGeometryName() == 'POLYGON'):
ring = geom.GetGeometryRef(0)
numpoints = ring.GetPointCount()
pointsX = []; pointsY = []
for p in range(numpoints):
lon, lat, z = ring.GetPoint(p)
pointsX.append(lon)
pointsY.append(lat)
else:
sys.exit("ERROR: Geometry needs to be either Polygon or Multipolygon")
#xmin = min(pointsX)
#xmax = max(pointsX)
#ymin = min(pointsY)
#ymax = max(pointsY)
if len(coords) == 0:
xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)
xmax = lrx if (max(pointsX) > lrx) else max(pointsX)
ymin = lry if (min(pointsY) < lry) else min(pointsY)
ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)
else:
xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)
xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)
ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)
ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)
# Specify offset and rows and columns to read
xoff = int((xmin - xOrigin)/pixelWidth)
yoff = int((yOrigin - ymax)/pixelWidth)
xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side
ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side
#print(xoff, yoff, xcount, ycount)
# Create memory target raster
target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform((
xmin, pixelWidth, 0,
ymax, 0, pixelHeight,
))
# Create for target raster the same projection as for the value raster
raster_srs = osr.SpatialReference()
raster_srs.ImportFromWkt(raster.GetProjectionRef())
target_ds.SetProjection(raster_srs.ExportToWkt())
# Rasterize zone polygon to raster
gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])
# Read raster as arrays
dataBandRaster = raster.GetRasterBand(band)
data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)
bandmask = target_ds.GetRasterBand(1)
datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)
# data zone of raster
dataZone = np.ma.masked_array(data, np.logical_not(datamask))
raster_srs = None
raster = None
shp = None
lyr = None
return [dataZone, [xmin,xmax,ymin,ymax]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spatial(self):",
"def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)",
"def spatialFilter(input_shp,aoi,output_shp):\n inDataSource = driver.Open(input_shp, 0)\n inlayer = inDataSource.GetLayer()\n\n # create the data source\n outdata_source = driver.CreateDataSource(output_shp)\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n # create the layer\n outlayer = outdata_source.CreateLayer(\"outlayer\", srs, ogr.wkbPolygon)\n\n # Add input Layer Fields to the output Layer if it is the one we want\n inLayerDefn = inlayer.GetLayerDefn()\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outlayer.CreateField(fieldDefn)\n\n #load spatialfilter\n inspatialfilter = driver.Open(aoi, 0)\n inspatialfilterlayer = inspatialfilter.GetLayer()\n #get geometry for spatialfilter\n for inFeature in inspatialfilterlayer:\n spatialfiltergeom = inFeature.GetGeometryRef()\n\n inlayer.SetSpatialFilter(spatialfiltergeom)\n # Get the output Layer's Feature Definition\n outLayerDefn = outlayer.GetLayerDefn()\n for inFeature in inlayer:\n # Create output Feature\n outFeature = ogr.Feature(outLayerDefn)\n try:\n # Add field values from input Layer\n for i in range(0, outLayerDefn.GetFieldCount()):\n fieldDefn = outLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(),inFeature.GetField(i))\n # Set geometry\n geom = inFeature.GetGeometryRef()\n outFeature.SetGeometry(geom.Clone())\n # Add new feature to output Layer\n outlayer.CreateFeature(outFeature)\n except Exception:\n sys.exc_clear()\n inlayer.SetSpatialFilter(None)",
"def main():\n # IMPORTANT: Specify a path to the new shapefile!\n data_dir = os.path.join(\"C:\\\\\",\"Users\",\"janni\",\"OneDrive\",\"Desktop\",\"data\")\n\n #Store route identification codes in to a list\n L_tracks=['\"tag_ident\"=72413','\"tag_ident\"=72417','\"tag_ident\"=73053','\"tag_ident\"=72364',\\\n '\"tag_ident\"=73054','\"tag_ident\"=79694','\"tag_ident\"=79698']\n\n if(os.path.isdir(data_dir)):\n print(\"Very good! You have chosen a valid directory!\")\n # load the point shapefile of the white-fronted goose manually!\n # access the active layer\n point_layer = iface.activeLayer()\n if not point_layer:\n print(\"Shape file failed to load!\")\n else:\n # 1\n addTimeAndDateObs(point_layer)\n print(\"-----------Created Date and Time objects-------------\")\n # 2\n addDistance(point_layer, L_tracks)\n print(\"-----------Distances calculation finished-------------\")\n # 3\n extractPoints(point_layer,Statistics(point_layer),data_dir)\n print(\"-----------Low distance points extracted and save to a new shapefile-------------\")\n print('Done')\n\n raster_fn = os.path.join(data_dir,\"Eurasia_Landcover.tif\")\n landuse_legend_fn = os.path.join(data_dir,'Eurasia_Landcover_Legend.csv')\n in_shape_fn = os.path.join(data_dir,\"lowDistance.shp\")\n out_shape_fn = os.path.join(data_dir,\"lowDistanceLanduseID.shp\")\n\n\n if(QgsProject.instance().mapLayersByName('lowDistanceLanduseID')==[]):\n processing.run(\"qgis:rastersampling\",\n {'COLUMN_PREFIX' : 'LanduseNr_',\n 'INPUT' : in_shape_fn,\n 'OUTPUT' : out_shape_fn,\n 'RASTERCOPY' : raster_fn})\n updated_shapefile = iface.addVectorLayer(out_shape_fn, '', 'ogr')\n else:\n updated_shapefile = QgsProject.instance().mapLayersByName('lowDistanceLanduseID')[0]\n #2\n convertIdFloatToInt(updated_shapefile)\n #3\n legend = preProcessLegend(landuse_legend_fn)\n #4\n convertIdToName(legend,updated_shapefile)\n #5\n plotLandUse(updated_shapefile,\"Pie\")\n print(\"-----------finished!-------------\")\n print(\"DONE! :)\")\n else:\n iface.messageBar().pushMessage(\"Error\", \"The directory does not exist. Please change data_dir in the code\",level = 1)\n print(\"Please specify a valid directory in the main function of Code_Distance.py!\")",
"def shp(self, outname=None):\n # the first line is the vertical band line and is thus excluded\n profiles = self.ax2.get_lines()[1:]\n if len(profiles) == 0:\n return\n \n if outname is None:\n root = Tk()\n # Hide the main window\n root.withdraw()\n outname = filedialog.asksaveasfilename(initialdir=os.path.expanduser('~'),\n defaultextension='.shp',\n filetypes=(('shp', '*.shp'),\n ('all files', '*.*')))\n if outname is None:\n return\n \n layername = os.path.splitext(os.path.basename(outname))[0]\n \n with Vector(driver='Memory') as points:\n points.addlayer(layername, self.crs, 1)\n fieldnames = ['b{}'.format(i) for i in range(0, self.bands)]\n for field in fieldnames:\n points.addfield(field, ogr.OFTReal)\n \n for i, line in enumerate(profiles):\n # get the data values from the profile\n ydata = line.get_ydata().tolist()\n \n # get the row and column indices of the profile\n legend_text = self.ax2.get_legend().texts[i].get_text()\n legend_items = re.sub('[xy: ]', '', legend_text).split(';')\n col, row = [int(x) for x in legend_items]\n \n # convert the pixel indices to map coordinates\n x, y = self.__img2map(col, row)\n \n # create a new point geometry\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x, y)\n fields = {}\n # create a field lookup dictionary\n for j, value in enumerate(ydata):\n if np.isnan(value):\n value = -9999\n fields[fieldnames[j]] = value\n \n # add the new feature to the layer\n points.addfeature(point, fields=fields)\n point = None\n points.write(outname, 'ESRI Shapefile')\n lookup = os.path.splitext(outname)[0] + '_lookup.csv'\n with open(lookup, 'w') as csv:\n content = [';'.join(x) for x in zip(fieldnames, self.bandnames)]\n csv.write('id;bandname\\n')\n csv.write('\\n'.join(content))",
"def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0):\n\tprint('=== path:', path)\n\t\n\t# load x/y/z voxel size (assumes .tif was saved with Fiji\n\txVoxel, yVoxel, zVoxel = readVoxelSize(path)\n\tprint(' xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)\n\t\n\t# load the data\n\treader = AICSImage(path) \n\tIMG = reader.data.astype(np.float32)\n\tprint(' IMG.shape:', IMG.shape)\n\n\tstructure_channel = 0\n\tstruct_img0 = IMG[0,structure_channel,:,:,:].copy()\n\n\t# give us a guess for our intensity_scaling_param parameters\n\t#from aicssegmentation.core.pre_processing_utils import suggest_normalization_param\n\t#suggest_normalization_param(struct_img0)\n\tlow_ratio, high_ratio = my_suggest_normalization_param(struct_img0)\n\n\t#intensity_scaling_param = [0.0, 22.5]\n\tintensity_scaling_param = [low_ratio, high_ratio]\n\tprint('*** intensity_normalization() intensity_scaling_param:', intensity_scaling_param)\n\t\n\t# intensity normalization\n\tprint('=== calling intensity_normalization()')\n\tstruct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)\n\n\t# smoothing with edge preserving smoothing \n\tprint('=== calling edge_preserving_smoothing_3d()')\n\tstructure_img_smooth = edge_preserving_smoothing_3d(struct_img)\n\n\t#\n\t\"\"\"\n\tsee: notebooks/playground_filament3d.ipynb\n\n\tscale_x is set based on the estimated thickness of your target filaments.\n\t\tFor example, if visually the thickness of the filaments is usually 3~4 pixels,\n\t\tthen you may want to set scale_x as 1 or something near 1 (like 1.25).\n\t\tMultiple scales can be used, if you have filaments of very different thickness.\n\tcutoff_x is a threshold applied on the actual filter reponse to get the binary result.\n\t\tSmaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,\n\t\twhile larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.\n\t\"\"\"\n\t#f3_param = [[1, 0.01]] # [scale_1, cutoff_1]\n\tprint('=== calling filament_3d_wrapper() f3_param:', f3_param)\n\tbw = filament_3d_wrapper(structure_img_smooth, f3_param)\n\t\t\n\t#\n\t#minArea = 20 # from recipe\n\tprint('=== calling remove_small_objects() minArea:', minArea)\n\tseg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)\n\n\t#\n\t# save original file again (with saveNumber\n\tsaveNumberStr = ''\n\tif saveNumber>1:\n\t\tsaveNumberStr = '_' + str(saveNumber)\n\t\t\n\t#\n\t# save mask\n\tseg = seg >0\n\tout=seg.astype(np.uint8)\n\tout[out>0]=255\n\t\n\t# save _dvMask\n\tmaskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'\n\tprint('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:', maskPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(maskPath)\n\t\twriter.save(out)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, maskPath:', maskPath)\n\t\t\n\t#\n\t# analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton\n\tretDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path)\n\tretDict = OrderedDict()\n\tretDict['tifPath'] = path\n\tretDict['maskPath'] = maskPath\n\tretDict['tifFile'] = os.path.basename(path)\n\tretDict['xVoxel'] = xVoxel\n\tretDict['yVoxel'] = yVoxel\n\tretDict['zVoxel'] = zVoxel\n\t#\n\tretDict['params'] = OrderedDict()\n\tretDict['params']['saveNumber'] = saveNumber\n\tretDict['params']['intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param\n\tretDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!\n\tretDict['params']['minArea'] = minArea\n\n\tretDict.update( retDict0 )\n\n\t# save 1-pixel skeleton: mySkeleton\n\t# save _dvSkel\n\tskelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'\n\tprint('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:', skelPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(skelPath)\n\t\twriter.save(mySkeleton)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, skelPath:', skelPath)\n\t\t\t\n\treturn retDict",
"def feature(input_dir, input_img, mask, output_dir, window_size):\n\t# Get the pixel array of the ROI\n\tws = int(float(window_size))\n\tfilename = os.path.join(input_dir, input_img)\n\tim = nib.load(filename)\n\taffine = im.affine\n\timg = im.get_data() \n\t# img= (img - img.min()) / (np.sort(img,axis=None)[-10] - img.min()) * 255\n\t# img[img<0]=0\n\t# img[img>255]=255\n\t# fea_img = nib.Nifti1Image(img, affine)\n\t# nib.save(fea_img, output_dir + 'Img_255.nii.gz')\t\n\tMask=nib.load(os.path.join(input_dir, mask)).get_data()\n\t# Slices=np.unique(np.where(Mask==1)[2])\n\t# img=img[:,:,tuple(Slices)]\n\t# Mask=Mask[:,:,tuple(Slices)]\n\t\"\"\" Local feature calculation \"\"\"\n\tif len(img.shape)>2:\n\t\tlength, width, height = img.shape\n\telse:\n\t\timg=img.reshape(img.shape[0],img.shape[1],1)\n\t\tMask=Mask.reshape(img.shape[0],img.shape[1],1)\n\t\tlength, width, height = img.shape\n\t# number of the features\n\tn_fea = 99\n\tfea = np.zeros((length, width, height, n_fea))\n\tfor h in range(height):\n\t\tfor l in range(length):\n\t\t\tfor w in range(width):\n\t\t\t\tif Mask[l,w,h]==0:\n\t\t\t\t\tcontinue\n\t\t\t\tpatch0 = img[max((l - ws),0):min(length,(l + ws)), max((w - ws),0):min(width,(w + ws)), h]\n\t\t\t\tl0, w0 = patch0.shape\n\t\t\t\tpatch = np.array(np.reshape(patch0, (1, l0 * w0)))\n\n\t\t\t\t\"\"\"\n\t\t\t\tfirst order statistics based feature\n\t\t\t\tlist {max, min, median, 25percentile, 75percentile, std, skew, kurtosis, entropy}\n\t\t\t\t\"\"\"\n\t\t\t\tfea[l, w, h, 0] = np.max(patch[0])\n\t\t\t\tfea[l, w, h, 1] = np.min(patch[0])\n\t\t\t\tfea[l, w, h, 2] = np.median(patch[0])\n\t\t\t\tfea[l, w, h, 3] = np.percentile(patch[0], 25)\n\t\t\t\tfea[l, w, h, 4] = np.percentile(patch[0], 75)\n\t\t\t\tfea[l, w, h, 5] = np.percentile(patch[0], 75)-np.percentile(patch[0], 25)\n\t\t\t\tfea[l, w, h, 6] = np.std(patch[0])\n\t\t\t\tfea[l, w, h, 7] = stats.skew(patch[0])\n\t\t\t\tfea[l, w, h, 8] = stats.kurtosis(patch[0])\n\t\t\t\thist = stats.histogram(patch[0], numbins=5)\n\t\t\t\tfea[l, w, h, 9] = stats.entropy(hist.count / np.sum(hist.count))\n\n\t\t\t\t\"\"\"\n\t\t\t\tGLCM based feature\n\t\t\t\tlist {angular second moment, contrast, correlation, variance, inverse difference moment,\n\t\t\t\tsum average, sum variance, sum entropy, entropy, difference variance, difference entropy,\n\t\t\t\tinfo. measure. of corr. 1, info. measure. of corr. 2, max. correlation coefficient}\n\t\t\t\t\"\"\"\n\t\t\t\tpatch2 = np.array((patch0 - img.min()) / (img.max() - img.min()) * 256, dtype=np.uint8)\n\t\t\t\tg_matrix = features.haralick(patch2)\n\t\t\t\tfea[l, w, h, 10:23] = np.mean(g_matrix, axis=0)\n\n\t\t\t\t\"\"\"\n\t\t\t\tLocal Binary Patterns based shape descriptors {7 first order statistics on histogram of LBP}\n\t\t\t\t\"\"\"\n\t\t\t\tlbp = local_binary_pattern(patch0, 8, ceil(ws/2), 'default')\n\t\t\t\tfea[l, w, h, 23] = np.max(lbp)\n\t\t\t\tfea[l, w, h, 24] = np.min(lbp)\n\t\t\t\tfea[l, w, h, 25] = np.median(lbp)\n\t\t\t\tfea[l, w, h, 26] = np.percentile(lbp, 25)\n\t\t\t\tfea[l, w, h, 27] = np.percentile(lbp, 75)\n\t\t\t\tfea[l, w, h, 28] = np.percentile(lbp, 75)-np.percentile(lbp, 25)\n\t\t\t\tfea[l, w, h, 29] = np.std(lbp)\n\t\t\t\t\n\t\t\t\t\"\"\"\n\t\t\t\tHu Moment based shape descriptor {7 moments}\n\t\t\t\t\"\"\"\n\t\t\t\tm = moments(np.array(patch0, dtype=np.float))\n\t\t\t\tcr = m[0, 1] / m[0, 0]\n\t\t\t\tcc = m[1, 0] / m[0, 0]\n\t\t\t\tcm = moments_central(np.array(patch0, dtype=np.float), cr, cc)\n\t\t\t\tncm = moments_normalized(cm)\n\t\t\t\thum = moments_hu(ncm)\n\t\t\t\tfea[l, w, h, 30:37] = hum\n\n\t\t\t\t\"\"\"\n\t\t\t\tZernike moment based shape descriptors {first 8 moments}\n\t\t\t\t\"\"\"\n\t\t\t\tzm = features.zernike_moments(patch0, ws)\n\t\t\t\tfea[l, w, h, 37:45] = zm[1:9]\t\t\n\t\t\t\t\n\t\t\t\t\"\"\"\n\t\t\t\tThreshold Adjacency Statistics based shape descriptors {9 statistics} * 6 = 54\n\t\t\t\t\"\"\"\n\t\t\t\ttas = features.tas(patch0)\n\t\t\t\t# lentas = len(tas)\n\t\t\t\tfea[l, w, h, 45:100] = tas[:54]\n\t# Save all the local feature maps in NIFTI format\t\n\tfeaturedir=os.path.join(output_dir,'Size'+str(window_size))\n\tif not os.path.exists(featuredir):\n\t\tos.makedirs(featuredir)\t\n\tfor ii in range(n_fea):\n\t\toutput_filename = featuredir+'/fea_'+str(ii+1)+'.nii.gz'\n\t\tdata = np.reshape(fea[:, :, :, ii], (length, width, height))\n\t\tfea_img = nib.Nifti1Image(data, affine)\n\t\tnib.save(fea_img, output_filename)",
"def main():\n state = sys.argv[1]\n metaPath = root.joinpath(\"outputs\",\"groundwater\",\"csv\",state+\"_metadata.log\")\n outputsPath = root.joinpath(\"outputs\",\"groundwater\")\n \n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n handlers=[logging.FileHandler(str(metaPath))],\n )\n \n logging.info(\"preProcessing '%s' dataset\",state)\n path,metacols = gwcfg.get_params(state)\n \n # Initialize Well Data Object (which has self.df and self.gdf (geodataframe))\n gwObj = gwmod.WellDataObj(path,metacols)\n logging.info(\"original df and gdf initialized, shape: %s\",str(gwObj.df.shape))\n \n # Subset gdf to a single state\n gwObj.subset_gdf(state)\n logging.info(\"single state subsetted : %s , no of records: %d\",state,len(gwObj.gdf))\n \n # Remove Duplicates (entire row) ,Remove Null Data Rows, Drop Duplicate geometries\n num_dups,num_nulls,num_geom_dups = gwObj.pre_process()\n logging.info(\"number of duplicates found & dropped: %d \\\n number of nulls found & dropped: %d \\\n number of duplicate geometries found & dropped: %d\",num_dups,num_nulls,num_geom_dups)\n \n # Save processed dataframe to CSV , SHP(without data) and SHP(with data) \n dfPath = outputsPath.joinpath(\"csv\", (state + '_processed' + path.suffix))\n gdfPath = outputsPath.joinpath(\"shapefiles\", (state + '_processed' + \".shp\"))\n gdfPathwData = outputsPath.joinpath(\"shapefiles\", (state + '_processed_wData' + \".shp\"))\n \n gwObj.df.to_csv(dfPath,index=False)\n logging.info(\"saved df to CSV\")\n gwObj.gdf.geometry.to_file(gdfPath,index=False)\n logging.info(\"saved gdf (only geometries) to SHP\")\n gwObj.gdf.to_file(gdfPathwData,index=False)\n logging.info(\"saved gdf (with data) to SHP\")",
"def __init__(self, geometries, surfaceRefinement, case):\n\n self.geometries = geometries\n self.surfaceRefinement = surfaceRefinement\n self.case = case\n #we set up the default settings for snappy\n self.castellate = True\n self.snap = True\n self.addLayers = True\n self.maxLocalCells = 1000000\n self.maxGlobalCells = 200000\n self.minRefinementCells = 200\n self.maxLoadUnbalance = 0.1\n self.nCellsBetweenLevels = 3\n self.edgeRefinementLevel = 6\n self.refinementSurfaceMin = 5\n self.refinementSurfaceMax = 6\n self.resolveFeatureAngle = 5\n self.distanceRefinements = [0.1, 0.2]\n self.distanceLevels = [4, 3]\n self.locationToKeep = [0.001, 0.001, 0.0015]\n self.allowFreeStandingFaces = True\n self.nSmoothPatch = 3\n self.snapTolerance = 2\n self.nSolveIter = 30\n self.nRelaxIter = 5\n self.nFeatureSnapIter = 10\n self.implicitFeatureSnap = False\n self.explicitFeatureSnap = True\n self.multiRegionFeatureSnap = False\n self.relativeSizes = True\n self.nSurfaceLayers = 1\n self.expansionRatio = 1\n self.finalLayerThickness = 0.1\n self.minThickness = 0.03\n self.nGrow = 0\n self.featureAngle = 60\n self.slipFeatureAngle = 30\n self.nRelaxIter = 3\n self.nSmoothSurfaceNormals = 1\n self.nSmoothNormals = 1\n self.nSmoothThickness = 10\n self.maxFaceThicknessratio = 0.5\n self.maxThicknessToMedialRatio = 0.3\n self.minMedianAxisAngle = 90\n self.nBufferCellsNoExtrude = 0\n self.nLayerIter = 50\n self.mergeTolerance = 1e-6",
"def Gridding(vis,uvw,image_params,obs_params,pswf):\t\n\tref_freq = obs_params['ref_freq']/1e6\n\t#print 'ref freq =', ref_freq\n\tlat \t = obs_params['lat']\n\tch_width = obs_params['ch_width']\n\tDEC \t = obs_params['DEC']\n\tStokes = image_params['Stokes']\n\t\n\tprint '--------------Gridding X stokes--------------------'\n\txgrid_wt, xgrid_uv, N = gridder(vis[0],uvw,image_params,obs_params,pswf)\n\tprint '--------------Gridding Y stokes--------------------'\n\tygrid_wt, ygrid_uv, N = gridder(vis[1],uvw,image_params,obs_params,pswf)\n\n\tN = np.shape(xgrid_wt)[0]\n\tgrid_uv = np.zeros([N, N], dtype=complex)\n\tgrid_wt = np.zeros([N, N], dtype=complex)\n\t\n\tif Stokes == 'I':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# I = (XX+YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real + xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag + xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real + xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag + xgrid_wt.imag)/2\n\n\telif Stokes == 'Q':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# Q = (XX-YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real - xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag - xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real - xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag - xgrid_wt.imag)/2\n\n\tdty_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_uv))))\n\tpsf_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_wt))))\n\n\treturn dty_image, psf_image",
"def test_image_task_spatial_features_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(SPATIAL_IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'],\n 7.5,\n 'failed to train image_seq2seq on image task with spatial features',\n )",
"def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)",
"def test_image_task_spatial_features(self):\n args = BASE_ARGS.copy()\n args.update(SPATIAL_IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'],\n 7.5,\n 'failed to train image_seq2seq on image task with spatial features',\n )",
"def data_assemble(self, x,y, r_cut, add_mask=5, pick_choice=False):\n #segmentation components\n obj_masks,center_mask_info, segments_deblend_list = self._seg_image(x, y, r_cut=r_cut)\n data_masks_center, _, xcenter, ycenter, c_index = center_mask_info\n image = self.cut_image(x,y,r_cut)\n self.raw_image = image\n src_mask = np.zeros_like(image)\n lens_mask = np.zeros_like(image)\n plu_mask = np.zeros_like(image)\n lenslight_mask_index = []\n if self.segmap is not None and self.interaction:\n segmap=self.segmap[0].data\n segdata = segmap[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n plt.imshow(segdata, origin='lower')\n nlabel = np.unique(segdata)\n for i in range(nlabel.shape[0] - 1):\n ax = (int((np.where(segdata == nlabel[i + 1])[0].max() - np.where(segdata == nlabel[i + 1])[0].min()) / 2 +\n np.where(segdata == nlabel[i + 1])[0].min()))\n ay = (int((np.where(segdata == nlabel[i + 1])[1].max() - np.where(segdata == nlabel[i + 1])[1].min()) / 3 +\n np.where(segdata == nlabel[i + 1])[1].min()))\n plt.text(ay, ax, repr(nlabel[i + 1]), color='r', fontsize=15)\n plt.title('Input segmentation map')\n plt.show()\n source_mask_index = [int(sidex) for sidex in input('Selection of data via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + segdata*(segdata==i*1)\n # lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + segdata*(segdata==i*1))\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via (inputed) segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + segdata*(segdata==i*1))\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n\n\n\n if self.segmap is None and self.interaction:\n self.plot_segmentation(image, segments_deblend_list, xcenter, ycenter, c_index)\n #source light\n if pick_choice:\n source_mask_index = [int(sidex) for sidex in input('Selection of data via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in source_mask_index:\n src_mask = src_mask + obj_masks[i]\n #lens light\n lenslightyn = input('Hint: is there lens light? (y/n): ')\n if lenslightyn == 'y':\n lenslight_mask_index = [int(lidex) for lidex in input('Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in lenslight_mask_index:\n lens_mask = (lens_mask + obj_masks[i])\n elif lenslightyn == 'n':\n lenslight_mask_index = []\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n # contamination\n pluyn = input('Hint: is there contamination? (y/n): ')\n if pluyn == 'y':\n plution_mask_index = [int(pidex) for pidex in input('Selection of contamination via segmentation index separated by space, e.g., 0 1 :').split()]\n for i in plution_mask_index:\n plu_mask = (plu_mask + obj_masks[i])\n elif pluyn == 'n':\n plu_mask = np.zeros_like(image)\n else:\n raise ValueError(\"Please input 'y' or 'n' !\")\n else:\n src_mask = data_masks_center\n\n\n #adding pixels around the selected masks\n selem = np.ones((add_mask, add_mask))\n src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)\n plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)\n plu_mask_out = (plu_mask_out - 1)*-1\n\n #select source region to fit, or to use whole observation to fit\n ##1.select source region to fit\n snr = self.snr\n source_mask = image * src_mask\n #create background image for picked\n if self.background_rms is None:\n _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)\n tshape = image.shape\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n else:\n tshape = image.shape\n std=np.mean(self.background_rms)\n img_bkg = make_noise_image(tshape, distribution='gaussian', mean=0., stddev=std, seed=12)\n\n no_source_mask = (src_mask * -1 + 1) * img_bkg\n picked_data = source_mask + no_source_mask\n\n ##2.use whole observation to fit while mask out the contamination\n maskedimg = image * plu_mask_out\n\n ##orginize the output 'kwargs_data'\n kwargs_data = {}\n if pick_choice:\n kwargs_data['image_data'] = picked_data#select source region to fit\n else:\n kwargs_data['image_data'] = maskedimg#use whole observation to fit while mask out the contamination\n\n if self.background_rms is None:\n kwargs_data['background_rms'] = std\n self.background_rms = std\n else:\n kwargs_data['background_rms'] = np.mean(self.background_rms)\n kwargs_data['exposure_time'] = self.exp_time\n kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]]) * self.deltaPix\n ra_at_xy_0 = (y - r_cut) * self.deltaPix # (ra,dec) is (y_img,x_img)\n dec_at_xy_0 = (x - r_cut) * self.deltaPix\n kwargs_data['ra_at_xy_0'] = ra_at_xy_0\n kwargs_data['dec_at_xy_0'] = dec_at_xy_0\n\n #coordinate of the lens light\n xlenlight, ylenlight = [], []\n if lenslight_mask_index !=[]:\n for i in lenslight_mask_index:\n xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix )\n ylenlight.append(dec_at_xy_0 + int(ycenter[i])* self.deltaPix )\n\n #for output\n self.data = kwargs_data['image_data']\n self.kwargs_data = kwargs_data\n self.data_mask = src_mask\n self.lens_mask = lens_mask\n self.plu_mask = plu_mask_out\n self.obj_masks = obj_masks\n imageData = ImageData(**kwargs_data)\n self.imageData = imageData\n kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]\n\n return kwargs_data, kwargs_seg, [xlenlight, ylenlight]",
"def add_feature(layer, branchID, segs, lines, lon, lat, Ttime, density, Initial_loc, solubility, flows, concentration, water_level, dist): \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\r\n # Add points individually to the line\r\n #xy = lines[i]\r\n \r\n #line.AddPoint_2D(xy[0][0],xy[0][1])\r\n #line.AddPoint_2D(xy[1][0],xy[1][1])\r\n point.AddPoint(lon[i], lat[i])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(point)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(point)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID[i])) \r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Lon', \"{:.3f}\".format(lon[i]))\r\n feature.SetField('Lat', \"{:.3f}\".format(lat[i]))\r\n #feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n #feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n feature.SetField('T (day)', int(Ttime[i]))\r\n feature.SetField('Density', density[i])\r\n feature.SetField('Initial', Initial_loc[i])\r\n feature.SetField('Solubility', solubility[i])\r\n feature.SetField('Flow', flows[i])\r\n feature.SetField('C (mg/L)', concentration[i])\r\n feature.SetField('WSE (ft)', water_level[i])\r\n feature.SetField('D (ft)', dist[i])\r\n \r\n layer.CreateFeature(feature)",
"def set_model_std_grid(self, feat=0):\n print(self.shapefile)\n with fiona.open(self.shapefile, 'r') as shapefile:\n # todo - set up an error if user has shapefile with more than one feature. GELP n STEFFI\n # shape = shapefile[0]['geometry']\n shapes = [feature[\"geometry\"] for feature in shapefile]\n\n for feature in shapefile:\n # matching the FID of the given shapefile from a typical geoJSON (Not Ordered Dict nonsense)\n if feat == feature['id']:\n shapes = [feature['geometry']]\n\n\n print(f'geoproperties file {self.geoproperties_file}')\n print('This is the shape var:', shapes)\n with rasterio.open(self.geoproperties_file, 'r') as src:\n out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)\n out_meta = src.meta\n # once the image is cropped, the image metadata dictionary is updated with the cropped transform and bounds.\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"transform\": out_transform})\n\n self.crs = out_meta['crs']\n # TODO - Set Blocksize for sample raster and other useful optimization thingys\n self.transform = out_meta['transform']\n self.left = self.transform[2]\n self.top = self .transform[5]\n self.cols = out_meta['width']\n self.rows = out_meta['height']\n self.xres = self.transform[0]\n self.yres = self.transform[4]\n # return out_meta",
"def __init__(self, instrument, scene, psf_grid, wave, background):\n # This will be the image in electrons... convert to DN at the end.\n img = np.zeros(instrument.array_size, dtype=float)\n\n # Add the background and dark current in electrons\n itime_tot = instrument.itime * instrument.coadds\n img += (background + instrument.dark_current) * itime_tot\n\n # Total readnoise in electrons\n readnoise = instrument.readnoise / math.sqrt(instrument.fowler)\n\n # i and j are the coordinates into the PSF array. Make it 0 at the center.\n psf_i = np.arange(psf_grid.psf.shape[3]) - (psf_grid.psf.shape[3] / 2)\n psf_j = np.arange(psf_grid.psf.shape[4]) - (psf_grid.psf.shape[4] / 2)\n\n psf_i_scaled = psf_i * (psf_grid.psf_scale[wave] / instrument.scale)\n psf_j_scaled = psf_j * (psf_grid.psf_scale[wave] / instrument.scale)\n\n # Add the point sources\n print 'Observation: Adding stars one by one.'\n for ii in range(len(scene.xpos)):\n # Fetch the appropriate interpolated PSF and scale by flux.\n # This is only good to a single pixel.\n psf = psf_grid.get_local_psf(scene.xpos[ii], scene.ypos[ii], wave)\n psf *= scene.flux[ii]\n\n # Project this PSF onto the detector at this position.\n # This includes sub-pixel shifts and scale changes.\n\n # Coordinates of the PSF's pixels at this star's position\n psf_i_old = psf_i_scaled + scene.xpos[ii]\n psf_j_old = psf_j_scaled + scene.ypos[ii]\n\n # Make the interpolation object.\n # Can't keep this because we have a spatially variable PSF.\n psf_interp = RectBivariateSpline(psf_i_old, psf_j_old, psf, kx=1, ky=1)\n\n # New grid of points to evaluate at for this star.\n xlo = int(psf_i_old[0])\n xhi = int(psf_i_old[-1])\n ylo = int(psf_j_old[0]) + 1\n yhi = int(psf_j_old[-1]) + 1\n\n # Remove sections that will be off the edge of the image\n if xlo < 0:\n xlo = 0\n if xhi > img.shape[0]:\n xhi = img.shape[0]\n if ylo < 0:\n ylo = 0\n if yhi > img.shape[1]:\n yhi = img.shape[1]\n \n # Interpolate the PSF onto the new grid.\n psf_i_new = np.arange(xlo, xhi)\n psf_j_new = np.arange(ylo, yhi)\n psf_star = psf_interp(psf_i_new, psf_j_new, grid=True)\n\n # Add the PSF to the image.\n img[xlo:xhi, ylo:yhi] += psf_star\n \n print 'Observation: Finished adding stars.'\n\n #####\n # ADD NOISE: Up to this point, the image is complete; but noise free.\n #####\n # Add Poisson noise from dark, sky, background, stars.\n img_noise = np.random.poisson(img, img.shape)\n\n # Add readnoise\n img_noise += np.random.normal(loc=0, scale=readnoise, size=img.shape)\n \n \n self.img = img_noise",
"def la(x) :\r\n return Feature(x, \"leaf_area\")",
"def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)",
"def writethreshold2shp(self):\n assert self.dataset!=None,'Null dataset'\n assert self.RasterSet,'Please Set Bool map in ndarray with SetRasterData() \\n, Current output polygon src band is '+str(self.imageoutput)\n shp_name = self.out_middle_tif_name + '_polygonized.shp'\n srcband = self.dataset.GetRasterBand(1)\n maskband = None\n format = 'ESRI Shapefile'\n drv = ogr.GetDriverByName(format)\n dst_ds = drv.CreateDataSource(shp_name)\n srs = osr.SpatialReference()\n srs.ImportFromWkt(self.outdataset.GetProjectionRef())\n\n dst_layer = dst_ds.CreateLayer(shp_name, geom_type=ogr.wkbPolygon, srs=srs)\n if (dst_layer == None):\n return 0, 0\n dst_field = dst_layer.GetLayerDefn().GetFieldIndex(shp_name)\n prog_func = gdal.TermProgress\n options = []\n result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field, options,\n callback=prog_func)\n dst_ds = None\n print('Shapefile has write in ',shp_name)\n return shp_name",
"def write_shapefile_branch1(self, shpname):\r\n inarrays = self.read_traveltime()\r\n \r\n Narrays = len(inarrays) \r\n \r\n \r\n westlats = []\r\n westlons = []\r\n eastlats = []\r\n eastlons = [] \r\n lines1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat, westlon = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat, eastlon = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon, westlat], [eastlon, eastlat]])\r\n westlats.append(westlat)\r\n westlons.append(westlon)\r\n eastlats.append(eastlat)\r\n eastlons.append(eastlon)\r\n \r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbLineString)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Travel_T', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, density, lines, segs, westlon, westlat, eastlon, eastlat, Ttime):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n line = osgeo.ogr.Geometry(osgeo.ogr.wkbLineString)\r\n # Add points individually to the line\r\n xy = lines[i]\r\n \r\n line.AddPoint_2D(xy[0][0],xy[0][1])\r\n line.AddPoint_2D(xy[1][0],xy[1][1])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(line)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(line)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID)) \r\n feature.SetField('Density', int(density[i]))\r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Travel_T', \"{:.1f}\".format(Ttime[i]))\r\n feature.SetField('Lon_west', \"{:.3f}\".format(westlon[i]))\r\n feature.SetField('Lat_west', \"{:.3f}\".format(westlat[i]))\r\n feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n Ttime = inarrays[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n lines1 = [lines1[i] for i in ind]*Narrays\r\n westlats = [westlats[i] for i in ind]*Narrays\r\n westlons = [westlons[i] for i in ind]*Narrays\r\n eastlats = [eastlats[i] for i in ind]*Narrays\r\n eastlons = [eastlons[i] for i in ind]*Narrays\r\n \r\n inarrays_new = [inarrays[i][ind,:] for i in range(Narrays)]\r\n inarrays_stack = np.vstack(inarrays_new)\r\n \r\n add_feature(layer, 1, inarrays_stack[:,3], np.asarray(lines1), inarrays_stack[:,1], \r\n np.asarray(westlons), np.asarray(westlats), \r\n np.asarray(eastlats), np.asarray(eastlons), inarrays_stack[:,2])",
"def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))",
"def __init__(self, average_disparity, frame_down_factor, mem_down_factor,\n fovea_shape, frame_shape, values,\n verbose=False, memory_length=1, max_n_foveas=1, **bp_args):\n self.verbose = verbose\n self.use_uncertainty = False\n self.n_past_fovea = 0\n\n# self.frame_down_factor = frame_down_factor\n self.mem_down_factor = mem_down_factor\n self.frame_step = 2**frame_down_factor\n self.mem_step = 2**mem_down_factor #step size for uncertainty and importance calculations (pixels)\n\n self.average_disparity = downsample(\n average_disparity, down_factor=mem_down_factor)\n self.frame_shape = frame_shape\n self.fovea_shape = fovea_shape\n self.memory_shape = self.average_disparity.shape\n\n self.values = values\n self.max_n_foveas = max_n_foveas\n\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # original hyperopt\n # self.params = {\n # 'data_weight': 0.15109941436798274, 'disc_max': 44.43671813879002,\n # 'data_max': 68.407170602610137, 'laplacian_ksize': 5} # hyperopt on 100 images\n # self.params = {\n # 'data_weight': 0.2715404479972163, 'disc_max': 2.603682635476145,\n # 'data_max': 156312.43116792402, 'laplacian_ksize': 3} # Bryan's hyperopt on 250 images\n # self.params = {\n # 'data_weight': 1.2, 'disc_max': 924.0,\n # 'data_max': 189.0, 'laplacian_ksize': 5} # random\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # coarse\n self.params = {\n 'data_exp': 1.09821084614, 'data_max': 112.191597317,\n 'data_weight': 0.0139569211273, 'disc_max': 12.1301410452,\n 'laplacian_ksize': 3, 'smooth': 1.84510833504e-07}\n # self.params = {\n # 'data_exp': 14.2348581842, 'data_max': 79101007093.4,\n # 'data_weight': 0.000102496570364, 'disc_max': 4.93508276126,\n # 'laplacian_ksize': 5, 'laplacian_scale': 0.38937704644,\n # 'smooth': 0.00146126755993} # optimized for frame_down: 1, mem_down: 2, fovea_levels: 1\n\n self.params.update(bp_args)\n\n self.disparity_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.uncertainty_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.fovea_memory = DisparityMemory(frame_shape, fovea_shape=fovea_shape, n=self.n_past_fovea)\n\n self._uc = UnusuallyClose(self.average_disparity)",
"def hexapodZernikeLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n\n M22realTrefoil2 = b[:,37] # for x decenter\n M22imagTrefoil1 = b[:,54] \n M22TrefoilXshift = 0.5*(M22realTrefoil2+M22imagTrefoil1)\n\n M22realTrefoil1 = b[:,34] # for y decenter\n M22imagTrefoil2 = b[:,57] \n M22TrefoilYshift = 0.5*(M22realTrefoil1 - M22imagTrefoil2)\n\n M20defocus = b[:,12] # for defocus\n\n M22realComa2 = b[:,36] # for x-tilt\n M22imagComa1 = b[:,55]\n M22ComaXtilt = 0.5*(M22realComa2+M22imagComa1)\n\n M22realComa1 = b[:,35] # for y-tilt\n M22imagComa2 = b[:,56]\n M22ComaYtilt = 0.5*(M22realComa1 - M22imagComa2)\n \n pl.figure(figsize=(21,12))\n pl.subplot(2,3,1)\n t=bp.bin_scatter(M22TrefoilXshift,xh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilXshift,xh)\n pl.plot(M22TrefoilXshift,M22TrefoilXshift*res[1]+res[0],'r,')\n pl.ylabel('x-decenter [micron]')\n pl.xlabel('(M22realTrefoil2+M22imagTrefoil1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,2)\n t=bp.bin_scatter(M22TrefoilYshift,yh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilYshift,yh)\n pl.plot(M22TrefoilYshift,M22TrefoilYshift*res[1]+res[0],'r,')\n pl.ylabel('y-decenter [micron]')\n pl.xlabel('(M22realTrefoil1 - M22imagTrefoil2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,3)\n t=bp.bin_scatter(M20defocus,zh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M20defocus,zh)\n pl.plot(M20defocus,M20defocus*res[1]+res[0],'r,')\n pl.ylabel('z-defocus [micron]')\n pl.xlabel('M20defocus')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,4)\n t=bp.bin_scatter(M22ComaXtilt,ytilth,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaXtilt,ytilth)\n pl.plot(M22ComaXtilt,M22ComaXtilt*res[1]+res[0],'r,')\n pl.ylabel('y-tilt [arcsec]') # in hexapod coordiate, xtilt and y tilt is switched from the CRAY coordiante\n pl.xlabel('(M22realComa2+M22imagComa1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,5)\n t=bp.bin_scatter(M22ComaYtilt,xtilth,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaYtilt,xtilth)\n pl.plot(M22ComaYtilt,M22ComaYtilt*res[1]+res[0],'r,')\n pl.ylabel('x-tilt [arcsec]')\n pl.xlabel('(M22realComa1 - M22imagComa2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.savefig('linearModel_hexapod_coordinate.png')\n pl.close()",
"def get_regions_mask(self, input):",
"def read_and_select(fles, var, area):\n \n ds = xr.open_mfdataset(fles)\n \n # For 20CRv2c geopotential height \n if(var=='hgt'): \n ds = ds.sel(level=150.0)\n \n try:\n ds = ds.rename({'longitude': 'lon', 'latitude': 'lat'}) \n except: \n pass\n \n \n if(ds.lon.values.max() > 350):\n ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))\n rolls = np.sum(ds.lon.values < 0); ds = ds.roll(lon=rolls*(-1))\n\n if(ds.lat.values[0] > ds.lat.values[-1]):\n ds['lat'] = np.flipud(ds['lat'])\n ds[var].values = np.flip(ds[var], axis=1)\n\n # For 20CRv2c snow cover\n if(var=='snowc'): \n ds[var] = ds[var]/100.\n ds[var] = ds[var].where(ds[var]>=0.5, other=0.0)\n ds[var] = ds[var].where(ds[var] <0.5, other=1.0)\n \n # For HadISST1\n if((var=='sst')|(var=='sic')): \n mask = ds[var].values == -1000.\n ds[var].values[mask] = np.nan\n \n if( area=='europe'): ds = ds.squeeze().sel(lat=slice( 33,73), lon=slice(-12,40)) \n elif(area=='westeu'): ds = ds.squeeze().sel(lat=slice(42,59), lon=slice(-10,17))\n elif(area=='easeur'): ds = ds.squeeze().sel(lat=slice(38,56), lon=slice(17,43))\n elif(area=='meditr'): ds = ds.squeeze().sel(lat=slice(30,45), lon=slice(0,25))\n elif(area=='scandi'): ds = ds.squeeze().sel(lat=slice( 55,71), lon=slice( 4,34)) \n elif(area=='norhem'): ds = ds.squeeze().sel(lat=slice(-10,87)) \n elif(area=='norpol'): ds = ds.squeeze().sel(lat=slice( 50,87))\n else: ds = ds.squeeze()\n \n return ds",
"def og_features(scan,filt=None,base_noise=None,thresh=-1.4781e-10,diff=1,verbose=False,scale=10):\n #get gradients of data\n der = np.array(np.gradient(scan,diff))\n \n #calculate gardient magnitudes and directions\n der_mag = np.linalg.norm(der,axis=0) \n der_uvecs = der/der_mag\n \n z_cur = np.copy(scan).ravel()\n\n #estimate noise level and set derivative filter threshold\n if filt is None:\n filt = np.mean(signaltonoise(der_mag)[-1])\n \n \n if base_noise is not None:\n filt = np.maximum(filt,base_noise)\n \n\n\n #filter directions and magnitudes\n x, y, z = der_uvecs[0].ravel(), der_uvecs[1].ravel(), der_mag.ravel()\n \n #filter using threshold and filt\n x_filt, y_filt, z_filt = x[z_cur>thresh], y[z_cur>thresh], z[z_cur>thresh]\n #x_filt, y_filt, z_filt = x, y, z\n\n \n #print(len(z_filt))\n x_filt, y_filt, z_filt = x_filt[z_filt>filt], y_filt[z_filt>filt], z_filt[z_filt>filt]\n\n \n #calculate angles\n angles_filt = np.sign(y_filt)*np.arccos(x_filt/1)\n\n \n #print(len(angles_filt))\n \n if len(angles_filt) < 2:\n return 0,0,0\n \n #fit single line\n sol1 = least_squares(ress_1line,[-np.pi/2],args=(angles_filt,),bounds=[-np.pi,0],method='dogbox',jac='2-point',max_nfev=2000)\n\n #fit two lines by grid search\n #sol_grid = grid_search(ress_2line,angles_filt,[[-np.pi,0],[-np.pi,0]])\n \n \n singleline = sol1.x[0]\n \n mx = np.minimum(np.abs(singleline-(-np.pi)),np.abs(singleline))\n \n sol_grid = grid_search(ress_2line_pm,angles_filt,[[0,mx]],umid = singleline)\n spread_lines = sol_grid[1]\n sol_grid[1] = [singleline+spread_lines,singleline-spread_lines]\n \n \n #compute average of squared residuals for both cases\n resid1 = ress_1line(sol1.x,angles_filt)\n\n grid_c11 = np.average(np.power(resid1,2))\n \n grid_c11 = np.average(np.abs(resid1))\n \n grid_c21 = sol_grid[-1]\n \n \n multip = cotunnel_score2(scan,scan>thresh,diff,scale)\n \n final_grid2 = multip*(grid_c11-grid_c21)\n \n \n \"\"\"\n plt.scatter(angles_filt,z_filt,marker='x',c='k',s=15,linewidth=0.4)\n plt.axvline(sol1.x,color='b')\n plt.axvline(sol1.x+(np.pi),color='b')\n plt.axvline(sol_grid[1][0],0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1],0,color='r', linestyle='--')\n \n plt.axvline(sol_grid[1][0]+(np.pi),0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1]+(np.pi),0,color='r', linestyle='--')\n \n plt.xlabel(\"$\\\\theta_g$ / rad\")\n \n plt.xlim([-np.pi,np.pi])\n plt.ylim([0,z.max()])\n \n \n plt.ylabel(\"$|g|$\")\n \n plt.xticks([-np.pi,0,np.pi])\n \n plt.locator_params(axis='y', nbins=2)\n \n plt.savefig(\"og_fig.svg\")\n \n plt.show()\n \"\"\"\n return final_grid2,multip,(grid_c11-grid_c21)",
"def __init__(self,hdu_list,i_bin,fsk,mask_binary,masked_fraction,contaminants=None) :\n #Read numbers map\n self.fsk,nmap=read_flat_map(None,hdu=hdu_list[2*i_bin])\n compare_infos(fsk,self.fsk)\n\n #Read N(z)\n self.nz_data=hdu_list[2*i_bin+1].data.copy()\n\n #Make sure other maps are compatible\n if not self.fsk.is_map_compatible(mask_binary) :\n raise ValueError(\"Mask size is incompatible\")\n if not self.fsk.is_map_compatible(masked_fraction) :\n raise ValueError(\"Mask size is incompatible\")\n if contaminants is not None :\n for ic,c in enumerate(contaminants) :\n if not self.fsk.is_map_compatible(c) :\n raise ValueError(\"%d-th contaminant template is incompatible\"%ic)\n \n #Translate into delta map\n self.masked_fraction=masked_fraction\n self.weight=masked_fraction*mask_binary\n goodpix=np.where(mask_binary>0.1)[0]\n self.goodpix=goodpix\n self.mask_binary=mask_binary\n self.Ngal = np.sum(nmap*mask_binary)\n ndens=np.sum(nmap*mask_binary)/np.sum(self.weight)\n self.ndens_perad=ndens/(np.radians(self.fsk.dx)*np.radians(self.fsk.dy))\n self.delta=np.zeros_like(self.weight)\n self.delta[goodpix]=nmap[goodpix]/(ndens*masked_fraction[goodpix])-1\n\n #Reshape contaminants\n conts=None\n if contaminants is not None :\n conts=[[c.reshape([self.fsk.ny,self.fsk.nx])] for c in contaminants]\n\n #Form NaMaster field\n self.field=nmt.NmtFieldFlat(np.radians(self.fsk.lx),np.radians(self.fsk.ly),\n self.weight.reshape([self.fsk.ny,self.fsk.nx]),\n [self.delta.reshape([self.fsk.ny,self.fsk.nx])],\n templates=conts)",
"def make_map(data,LatLonBox):\n\n proj = ccrs.LambertConformal(central_longitude=data.StationLongitude,\n central_latitude=data.StationLatitude)\n\n fig = plt.figure(figsize=(17,11))\n ax = plt.subplot(111,projection=proj)\n \n ax.coastlines('50m', 'black', linewidth=2, zorder=2)\n\n reader = shpreader.Reader('/Users/chowdahead/Documents/shapefiles/countyl010g_shp_nt00964/countyl010g.shp')\n counties = list(reader.geometries())\n COUNTIES = cfeature.ShapelyFeature(counties,ccrs.PlateCarree())\n ax.add_feature(COUNTIES, facecolor='none',edgecolor='w')\n # Grab state borders\n state_borders = cfeature.NaturalEarthFeature(\n category='cultural', name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\n ax.add_feature(state_borders, edgecolor='w', linewidth=1, zorder=3)\n \n ocean = cfeature.NaturalEarthFeature('physical', 'ocean', scale='50m',\n edgecolor='face',\n facecolor=cfeature.COLORS['water'])\n land = cfeature.NaturalEarthFeature('physical', 'land', scale='50m',\n edgecolor='face',\n facecolor=\"k\")\n\n ax.add_feature(ocean, zorder=-1)\n ax.add_feature(land, zorder=-1)\n ax.set_facecolor('black')\n \n ax.set_extent(LatLonBox,ccrs.PlateCarree())\n \n return fig,ax,proj",
"def save_tile_mask(label_poly_series, tile_poly, xyz, tile_size, dataset, region, zone, save_path, channels = 3, display=False):\n \n \n\n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n \n cropped_polys = [poly for poly in label_poly_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n \n fbc_mask = burn_mask(cropped_polys_gdf, tfm, tile_size, channels)\n # fbc_mask = sol.vector.mask.df_to_px_mask(df=cropped_polys_gdf,\n # channels=['footprint', 'boundary', 'contact'],\n # affine_obj=tfm, shape=(tile_size,tile_size),\n # boundary_width=5, boundary_type='inner', contact_spacing=5, meters=True)\n \n if display: \n plt.imshow(fbc_mask); plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}_mask.png',fbc_mask, check_contrast=False)"
] | [
"0.61735",
"0.6161118",
"0.60565156",
"0.5746852",
"0.57418066",
"0.5684454",
"0.55834466",
"0.55502355",
"0.54894286",
"0.54626876",
"0.5461146",
"0.54364747",
"0.5411424",
"0.540842",
"0.53969634",
"0.5377036",
"0.5342902",
"0.5341299",
"0.5308774",
"0.5307411",
"0.52977175",
"0.5294228",
"0.526561",
"0.52611285",
"0.52544034",
"0.52495223",
"0.52265245",
"0.52233124",
"0.52222764",
"0.5210078"
] | 0.6667705 | 0 |
biomass predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif' trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif' shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp' trainND = 32768 predND = 9999 trgField = 'US_L3CODE' descrField = 'US_L3NAME' outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/ecoregion_correlation' xyLim = (500, 500) xLab = 'Reference (tons/ha)' yLab = 'Prediction (tons/ha)' annoXY = (15,420) | def mainFunction(f):
#############################################################################
# biomass hexagon
predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'
trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'
shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'
trainND = -32768
predND = -9999
trgField = 'id'
descrField = 'id'
outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'
xyLim = (500, 500)
xLab = 'Reference (tons/ha)'
yLab = 'Prediction (tons/ha)'
annoXY = (15,420)
"""
# cc
predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'
trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'
#shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'
shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'
trainND = 255
predND = 255
trgField = 'id'
descrField = 'id'
#trgField = 'US_L3CODE'
#descrField = 'US_L3NAME'
#outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'
outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'
xyLim = (100, 100)
xLab = 'Reference (%)'
yLab = 'Prediction (%)'
annoXY = (5,82)
"""
#############################################################################
# get color setup
norm = colors.Normalize(vmin=0, vmax=1)
f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))
# open the shapefile
vDriver = ogr.GetDriverByName("ESRI Shapefile")
vSrc = vDriver.Open(shpF, 0)
vLayer = vSrc.GetLayer()
commonBox = get_intersec([predF, trainF])
#for f in range(vLayer.GetFeatureCount()):
feature = vLayer[f]
name = feature.GetField(trgField)
print('f: '+str(f))
outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))
if os.path.exists(outFig):
#break
return
descr = feature.GetField(descrField)
predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]
trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()
predP = ma.masked_equal(predP, predND)
trainP = ma.masked_equal(trainP, trainND)
trainP = ma.masked_equal(trainP, 0)
combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))
predP[combMask] = ma.masked
trainP[combMask] = ma.masked
predP = predP.compressed()
trainP = trainP.compressed()
if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():
predP = np.array([0,0,1,1], dtype='float64')
trainP = np.array([0,0,1,1], dtype='float64')
mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)
rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)
totPixs = trainP.shape[0]
sampSize = round(totPixs*1)
pickFrom = range(sampSize)
#sampIndex = np.random.choice(pickFrom, size=sampSize)
sampIndex = pickFrom
r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)
if (mae == 0) & (r == 1):
r = 0.0
rColor = f2hex(f2rgb, r)
p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind="hex", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)
p.ax_joint.set_xlabel(xLab)
p.ax_joint.set_ylabel(yLab)
p.ax_joint.annotate('r: '+str(r)+'\nrmse: '+str(rmse)+'\nmae: '+str(mae), annoXY)
plt.tight_layout()
outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))
p.savefig(outFig)
df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])
outCSV = outFig.replace('.png','.csv')
df.to_csv(outCSV, ',', index=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config1() :\n data_name = \"titanic\" ### in data/input/\n model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py\n n_sample = 1000\n\n def post_process_fun(y): ### After prediction is done\n return int(y)\n\n def pre_process_fun(y): ### Before the prediction is done\n return int(y)\n\n\n model_dict = {'model_pars': {\n ### LightGBM API model #######################################\n 'model_class': model_class\n ,'model_pars' : {\n 'total_time_limit' : 20,\n 'algorithms' : 'auto',\n 'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',\n 'eval_metric' : 'auto'\n\n # mode='Explain',\n # ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,\n # stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',\n # golden_features='auto', features_selection='auto', start_random_models='auto',\n # hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)\n }\n\n , 'post_process_fun' : post_process_fun ### After prediction ##########################################\n , 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################\n\n\n ### Pipeline for data processing ##############################\n 'pipe_list': [\n #### coly target prorcessing\n {'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },\n\n\n {'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },\n {'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },\n\n #### catcol INTO integer, colcat into OneHot\n {'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },\n # {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },\n\n\n ### Cross_feat = feat1 X feat2\n # {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},\n\n\n #### Example of Custom processor\n #{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' }, \n\n\n ],\n }\n },\n\n 'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']\n\n ,'mlflow_pars' : None # {} ### Not empty --> use mlflow\n },\n\n 'data_pars': { 'n_sample' : n_sample,\n\n 'download_pars' : None,\n\n\n 'cols_input_type' : cols_input_type_1,\n ### family of columns for MODEL #########################################################\n # \"colnum\", \"colnum_bin\", \"colnum_onehot\", \"colnum_binmap\", #### Colnum columns\n # \"colcat\", \"colcat_bin\", \"colcat_onehot\", \"colcat_bin_map\", #### colcat columns\n # 'colcross_single_onehot_select', \"colcross_pair_onehot\", 'colcross_pair', #### colcross columns 'coldate', 'coltext',\n 'cols_model_group': [ 'colnum_bin',\n 'colcat_bin',\n # 'coltext',\n # 'coldate',\n #'colcross_pair',\n \n ### example of custom\n # 'col_myfun'\n ]\n\n ### Filter data rows ##################################################################\n ,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }\n\n }\n }\n\n ##### Filling Global parameters ############################################################\n model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )\n return model_dict",
"def eval_pos():\n annotations_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/annotations\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n print(seq)\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n mean, covariance = kalman_filter.predict(mean, covariance)\n trace_predict_bbox.append(tlwh(mean))\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i)*100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('IoU*100')\n plt.show()",
"def main():\n ##############################\n # Options\n ##############################\n\n bev_generator = 'slices'\n slices_config = \\\n \"\"\"\n slices {\n height_lo: -0.2\n height_hi: 2.3\n num_slices: 5\n }\n \"\"\"\n # Use None for a random image\n #img_idx = None\n img_idx = 6\n\n show_ground_truth = True # Whether to overlay ground_truth boxes\n show_height_maps = False # Whether to show the five height maps\n show_images = False # Whether to show the images\n\n point_cloud_source = 'lidar'\n pre_label_dir = '/home/cecilia/leo_projects/bishe2019/3D-Detection/avod/data/outputs/pyramid_cars_with_aug_rep_loss/predictions/kitti_native_eval/0.1/112000/data/'\n ##############################\n # End of Options\n ##############################\n\n dataset_config = DatasetBuilder.copy_config(DatasetBuilder.KITTI_VAL)\n dataset_config = DatasetBuilder.merge_defaults(dataset_config)\n\n # Overwrite bev_generator\n if bev_generator == 'slices':\n text_format.Merge(slices_config,\n dataset_config.kitti_utils_config.bev_generator)\n else:\n raise ValueError('Invalid bev_generator')\n\n dataset = DatasetBuilder.build_kitti_dataset(dataset_config,\n use_defaults=False)\n\n if img_idx is None:\n img_idx = int(random.random() * dataset.num_samples)\n\n sample_name = \"{:06}\".format(img_idx)\n print('=== Showing BEV maps for image: {}.png ==='.format(sample_name))\n\n # Load image\n image = cv2.imread(dataset.get_rgb_image_path(sample_name))\n image_shape = image.shape[0:2]\n\n kitti_utils = dataset.kitti_utils\n point_cloud = kitti_utils.get_point_cloud(\n point_cloud_source, img_idx, image_shape)\n ground_plane = kitti_utils.get_ground_plane(sample_name)\n bev_images = kitti_utils.create_bev_maps(point_cloud, ground_plane)\n\n height_maps = np.array(bev_images.get(\"height_maps\"))\n density_map = np.array(bev_images.get(\"density_map\"))\n\n # Get groundtruth bev-info\n gt_box_points, gt_box_points_norm = [None, None]\n if show_ground_truth:\n gt_obj_labels = obj_utils.read_labels(dataset.label_dir, img_idx)\n gt_filtered_objs = gt_obj_labels\n\n gt_label_boxes = []\n for gt_label in gt_filtered_objs:\n gt_box = box_3d_encoder.object_label_to_box_3d(gt_label)\n gt_label_boxes.append(gt_box)\n\n gt_label_boxes = np.array(gt_label_boxes)\n gt_box_points, gt_box_points_norm = box_3d_projector.project_to_bev(\n gt_label_boxes, [[-40, 40], [0, 70]])\n\n # Get prediction bev-info\n pre_box_points, pre_box_points_norm = [None, None]\n pre_obj_labels = obj_utils.read_labels(pre_label_dir, img_idx)\n\n pre_filtered_objs = pre_obj_labels\n\n pre_label_boxes = []\n for pre_label in pre_filtered_objs:\n pre_box = box_3d_encoder.object_label_to_box_3d(pre_label)\n pre_label_boxes.append(pre_box)\n\n pre_label_boxes = np.array(pre_label_boxes)\n pre_box_points, pre_box_points_norm = box_3d_projector.project_to_bev(\n pre_label_boxes, [[-40, 40], [0, 70]])\n\n \n rgb_img_size = (np.array((1242, 375)) * 0.75).astype(np.int16)\n img_x_start = 60\n img_y_start = 330\n\n img_x = img_x_start\n img_y = img_y_start\n img_w = 400\n img_h = 350\n img_titlebar_h = 20\n\n # Show images if show_images = True\n if show_images:\n vis_utils.cv2_show_image(\"Image\", image,\n size_wh=rgb_img_size, location_xy=(img_x, 0))\n\n # Height maps if show_height_maps = True\n if show_height_maps:\n\n for map_idx in range(len(height_maps)):\n height_map = height_maps[map_idx]\n\n height_map = draw_boxes(height_map, gt_box_points_norm, pre_box_points_norm)\n vis_utils.cv2_show_image(\n \"Height Map {}\".format(map_idx), height_map, size_wh=(\n img_w, img_h), location_xy=(\n img_x, img_y))\n\n img_x += img_w\n # Wrap around\n if (img_x + img_w) > 1920:\n img_x = img_x_start\n img_y += img_h + img_titlebar_h\n\n # Density map (Normal BEV)\n density_map = draw_boxes(density_map, gt_box_points_norm, pre_box_points_norm)\n vis_utils.cv2_show_image(\n \"Density Map\", density_map, size_wh=(\n img_w, img_h), location_xy=(\n img_x, img_y))\n\n cv2.waitKey()",
"def __init__(self):\n\n print '-'*60\n #self.train_folder = '../data/preprocess_nonstopword_nonstemming/train_clean/' # folder\n #self.test_folder = '../data/preprocess_nonstopword_nonstemming/test_clean/' # folder\n self.train_folder = '../data/preprocess_6/train_clean/' # folder\n self.test_folder = '../data/preprocess_6/test_clean/' # folder\n self.label_file = '../data/train_labels.csv' # path\n #pred_file = './submission_NB.csv' # predicitons\n self.pred_file = './submission_pre_6_t0.6.csv'\n\n\n self.train_ans = []\n self.test_index = []",
"def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def __init__(\n self,\n cfg: CfgNode,\n root: Path,\n relative_path_to_list: Path = \"split1.train\",\n relative_path_to_mapping: Path = \"mapping.txt\",\n feat_dim: int = -1,\n relative_path_to_train_list: Path = None,\n ):\n super().__init__(cfg)\n self.root = root\n self.file_list = root / relative_path_to_list\n if relative_path_to_train_list is not None:\n train_file_list = root / relative_path_to_train_list\n else:\n train_file_list = None\n self.mapping_file = root / relative_path_to_mapping\n self.end_class_id = 0\n self.mof_eval_ignore_classes = []\n self.background_class_ids = [0]\n\n # following are defaults, should be set\n self.feat_dim = feat_dim\n self.convenient_name = None\n self.split = -1\n self.max_transcript_length = 100\n\n with open(self.file_list) as f:\n self.file_names = [x.strip() for x in f if len(x.strip()) > 0]\n\n self.action_id_to_name = {}\n self.action_name_to_id = {}\n if self.mapping_file is not None:\n with open(self.mapping_file) as f:\n the_mapping = [tuple(x.strip().split()) for x in f]\n\n for (i, l) in the_mapping:\n self.action_id_to_name[int(i)] = l\n self.action_name_to_id[l] = int(i)\n\n self.num_actions = len(self.action_id_to_name)\n\n self.feat_file_paths = [\n self.root / \"features\" / f\"{x}.npy\" for x in self.file_names\n ]\n self.gt_file_paths = [\n self.root / \"labels\" / f\"{x}.npy\" for x in self.file_names\n ]\n self.tr_file_paths = [\n self.root / \"transcripts\" / f\"{x}.npy\" for x in self.file_names\n ]\n\n self.eos_token = \"_EOS_\" # end of sentence\n self.sos_token = \"_SOS_\" # start of sentence\n self.eos_token_id = self.num_actions # = M, 48 for breakfast\n self.sos_token_id = self.num_actions + 1 # = M + 1, 49 for breakfast\n self.action_id_to_name[self.eos_token_id] = self.eos_token\n self.action_name_to_id[self.eos_token] = self.eos_token_id\n self.action_id_to_name[self.sos_token_id] = self.sos_token\n self.action_name_to_id[self.sos_token] = self.sos_token_id\n\n # loading the training transcripts\n if train_file_list is not None:\n with open(train_file_list) as f:\n train_file_names = [x.strip() for x in f if len(x.strip()) > 0]\n tr_train_file_paths = [\n self.root / \"transcripts\" / f\"{x}.npy\" for x in train_file_names\n ]\n training_transcripts = set()\n for tr_file_path in tr_train_file_paths:\n transcript = tuple(np.load(str(tr_file_path)))\n training_transcripts.add(transcript)\n\n self.training_transcripts_list = []\n for t in training_transcripts:\n self.training_transcripts_list.append(list(t))\n\n self.training_path_grammar = ModifiedPathGrammar(\n transcripts=self.training_transcripts_list,\n num_classes=self.num_actions\n )",
"def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))",
"def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))",
"def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)",
"def NAME():\n\n # Location of data\n base_dir = \"(Location)\" #Location of align tif --> Should be the location of the experiment's align tiff folder, ex: \"C/desktop/work/image_processing/YYYYMMDD/align_tiffs\"\n resolution = {'res_xy_nm': 100, 'res_z_nm': 70} #Resolution of a pixel (do not alter)\n thresh = 0.9 #What qualifies for final probability map (do not alter)\n number_of_datasets = 20 #Number of wells in the experiemnts, \"20\" is an example where there are 16 samples and 4 controls\n\n #Rb Antibody\n conjugate_fn_str = 'GAD2' #String segment to search in a filename\n #conjugate_fn_str should be the term used in the name of the control align tiff for a well (usually \"PSD\", \"GAD2\", or \"SYNAPSIN\")\n target_fn_str = 'L106'\n #Ms Antibody project name, no parent or subclone number needed\n #target_fn_str should be the project number, for instance if this was testing L109 samples, this would be \"L109\"\n #Takes base directory string and gives you an array of all the files within\n filenames = aa.getListOfFolders(base_dir) #Do not change\n conjugate_filenames = [] #Do not change\n target_filenames = [] #Do not change\n query_list = [] #Do not change\n folder_names = [] #Do not change\n\n for n in range(1, 17):\n #Use if dataset missing\n #This is where you put in the rangee of wells used as your test samples\n #Since we have 16 samples that are test samples for L106, the range is equal to 1 through n+1, or 1 through 17\n #If your test samples do not begin at well 1, then adjust the beginning of the range accordingly (3 through 17 if the first test sample is in well 3) \n #continue\n\n print('Well: ', str(n)) #Do not change\n folder_names.append('Test-' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n conjugate_str = str(n) + '-' + conjugate_fn_str #creates filename to search for #Creates n-conjugatename #Do not change\n target_str = str(n) + '-' + target_fn_str #Do not change\n\n # Search for file associated with the specific dataset number\n indices = [i for i, s in enumerate(filenames) if conjugate_str == s[0:len(conjugate_str)]] #Do not change\n conjugate_name = filenames[indices[0]] #Do not change\n print(conjugate_name) #Do not change\n indices = [i for i, s in enumerate(filenames) if target_str == s[0:len(target_str)]] #Do not change\n target_name = filenames[indices[0]] #Do not change\n print(target_name) #Do not change\n \n conjugate_filenames.append(conjugate_name) #Do not change\n target_filenames.append(target_name) #Do not change\n\n # Create query\n #\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\n #preIF = items that are presynaptic targets go here, because GAD2, our conjugate, is presynaptic I put the conjugate_name in this box\n #preIF_z = how many tiffs a puncta must be in to be registered, conjugate sample number is 2 so 2 goes in this box\n #postIF = items that are postsynaptic targets go here, L106 is postsynaptic so I put target_name here\n #postIF_z = how many tiffs a puncta must be in to be registered, target sample number is 1 (for now unless changed later) \n #punctumSize = size of punctum the algorithm is looking for, do not change unless directed to\n\n \"\"\"Example of a presynaptic target and presynaptic conjugate\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2],\n 'postIF': [], 'postIF_z': [],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and presynaptic conjugate\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and postsynaptic conjugate\n query = {'preIF': [], 'preIF_z': [],\n 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a presynaptic target and postsynaptic conjugate\n query = {'preIF': [target_name], 'preIF_z': [1],\n 'postIF': [conjugate_name], 'postIF_z': [2],\n 'punctumSize': 2}\"\"\"\n\n\n query_list.append(query)\n\n\n #The following n samples are controls - you can add as many of these as you want by copying the block of code and pasting it after the last one\n #The notes in the following block of code apply to all of the controls\n n = 17 #well number of control sample\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n reference_fn_str = 'GAD2' #String segment to search in a filename #refernce_fn_str is the project number/name of RB control\n target_fn_str = 'L106' #target_fn_str is the project number of the Ms control you are using\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n) #Do not alter\n conjugate_filenames.append(conjugate_name) #Do not alter\n target_filenames.append(target_name) #Do not alter\n query = {'preIF': [conjugate_name], 'preIF_z': [2], 'postIF': [target_name], 'postIF_z': [1], 'punctumSize': 2} #Se the examples and explanations above about \"query\"\n query_list.append(query) #Do not change\n\n n = 18\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'GAD2' #String segment to search in a filename\n target_fn_str = 'SP2'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2], 'postIF': [], 'postIF_z': [], 'punctumSize': 2}\n query_list.append(query)\n\n n = 19\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NP-RB' #String segment to search in a filename\n target_fn_str = 'NP-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n n = 20\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NPNS-RB' #String segment to search in a filename\n target_fn_str = 'NPNS-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n\n \n measure_list = aa.calculate_measure_lists(query_list, None, base_dir,\n thresh, resolution, target_filenames) # Run all the queries\n\n df = aa.create_df(measure_list, folder_names, target_filenames, conjugate_filenames) #Do not change\n print(df) #Do not change\n\n return df #Do not change",
"def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))",
"def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def main(gt_dir='./data/Oxford_Robot_ICCV19/anno', devkit_dir = './dataset/robot_list/'):\n with open(join(devkit_dir, 'info.json'), 'r') as fp:\n info = json.load(fp)\n image_path_list = join(devkit_dir, 'val.txt')\n label_path_list = join(devkit_dir, 'label.txt')\n mapping = np.array(info['label2train'], dtype=np.int)\n gt_imgs = open(label_path_list, 'r').read().splitlines()\n gt_imgs = [join(gt_dir, x) for x in gt_imgs]\n\n for ind in range(len(gt_imgs)):\n label = np.array(Image.open(gt_imgs[ind]))\n label = label_mapping(label, mapping)\n label = label[:,:,0].astype(np.uint8)\n name_tmp = gt_imgs[ind].replace('anno','anno_color')\n save([label, name_tmp])\n \n return",
"def main():\n\n trainData = os.getcwd() + '/data/traindata.txt'\n trainLabels = os.getcwd() + '/data/trainlabels.txt'\n\n #testData = os.getcwd() + '/data/traindata.txt'\n #testLabels = os.getcwd() + '/data/trainlabels.txt'\n\n testData = os.getcwd() + '/data/testdata.txt'\n testLabels = os.getcwd() + '/data/testlabels.txt'\n\n #trainData = os.getcwd() + '/data/toyData.txt'\n #trainLabels = os.getcwd() + '/data/toyLabel.txt'\n #testData = os.getcwd() +'/data/toyTestData.txt'\n #testLabels = os.getcwd() + '/data/toyTestLabel.txt'\n\n #print(trainData, trainLabels)\n myClassifier = NBClassifier.new(NBClassifier.MODE_BERNOULI)\n myClassifier.setTrainData(trainData, trainLabels)\n #print(myClassifier)\n\n #singleTestData = ['Chinese', 'Chinese', 'Chinese', 'Tokyo', 'Japan']\n #prediction = myClassifier.predict(singleTestData)\n #print(f'{singleTestData} >>> {prediction}')\n predictions = myClassifier.predictSet(testData)\n accuracy = myClassifier.reportAccuracy(testLabels)\n\n #print(predictions)\n print(accuracy)",
"def predict(self, datafile):",
"def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)",
"def __init__(self, data_cfg, pipeline_cfg, root_path, sel_index=0):\n\n super(DetRetailOneDataset, self).__init__(\n data_cfg, pipeline_cfg, root_path, sel_index\n )\n\n self.cat2label = {cat: i for i, cat in enumerate(self.class_names)}\n self.ORI_CLASSES = (\n \"asamu\",\n \"baishikele\",\n \"baokuangli\",\n \"aoliao\",\n \"bingqilinniunai\",\n \"chapai\",\n \"fenda\",\n \"guolicheng\",\n \"haoliyou\",\n \"heweidao\",\n \"hongniu\",\n \"hongniu2\",\n \"hongshaoniurou\",\n \"kafei\",\n \"kaomo_gali\",\n \"kaomo_jiaoyan\",\n \"kaomo_shaokao\",\n \"kaomo_xiangcon\",\n \"kele\",\n \"laotansuancai\",\n \"liaomian\",\n \"lingdukele\",\n \"maidong\",\n \"mangguoxiaolao\",\n \"moliqingcha\",\n \"niunai\",\n \"qinningshui\",\n \"quchenshixiangcao\",\n \"rousongbing\",\n \"suanlafen\",\n \"tangdaren\",\n \"wangzainiunai\",\n \"weic\",\n \"weitanai\",\n \"weitaningmeng\",\n \"wulongcha\",\n \"xuebi\",\n \"xuebi2\",\n \"yingyangkuaixian\",\n \"yuanqishui\",\n \"xuebi-b\",\n \"kebike\",\n \"tangdaren3\",\n \"chacui\",\n \"heweidao2\",\n \"youyanggudong\",\n \"baishikele-2\",\n \"heweidao3\",\n \"yibao\",\n \"kele-b\",\n \"AD\",\n \"jianjiao\",\n \"yezhi\",\n \"libaojian\",\n \"nongfushanquan\",\n \"weitanaiditang\",\n \"ufo\",\n \"zihaiguo\",\n \"nfc\",\n \"yitengyuan\",\n \"xianglaniurou\",\n \"gudasao\",\n \"buding\",\n \"ufo2\",\n \"damaicha\",\n \"chapai2\",\n \"tangdaren2\",\n \"suanlaniurou\",\n \"bingtangxueli\",\n \"weitaningmeng-bottle\",\n \"liziyuan\",\n \"yousuanru\",\n \"rancha-1\",\n \"rancha-2\",\n \"wanglaoji\",\n \"weitanai2\",\n \"qingdaowangzi-1\",\n \"qingdaowangzi-2\",\n \"binghongcha\",\n \"aerbeisi\",\n \"lujikafei\",\n \"kele-b-2\",\n \"anmuxi\",\n \"xianguolao\",\n \"haitai\",\n \"youlemei\",\n \"weiweidounai\",\n \"jindian\",\n \"3jia2\",\n \"meiniye\",\n \"rusuanjunqishui\",\n \"taipingshuda\",\n \"yida\",\n \"haochidian\",\n \"wuhounaicha\",\n \"baicha\",\n \"lingdukele-b\",\n \"jianlibao\",\n \"lujiaoxiang\",\n \"3+2-2\",\n \"luxiangniurou\",\n \"dongpeng\",\n \"dongpeng-b\",\n \"xianxiayuban\",\n \"niudufen\",\n \"zaocanmofang\",\n \"wanglaoji-c\",\n \"mengniu\",\n \"mengniuzaocan\",\n \"guolicheng2\",\n \"daofandian1\",\n \"daofandian2\",\n \"daofandian3\",\n \"daofandian4\",\n \"yingyingquqi\",\n \"lefuqiu\",\n )",
"def __init__(self):\r\n self.label = \"Step 2: FEMA BFE\"\r\n self.description = \"This tool takes the FEMA area of interest clip output from Step 1 and converts the \" \\\r\n \"feature class into a base flood elevation raster dataset based on the attribute \" \\\r\n \"'STATIC_BFE'. This raster dataset is then reclassified to remove areas that do not \" \\\r\n \"undergo flooding. NOTE: FEMA BFE raster dataset that is created has a pixel size of 30.\"\r\n self.canRunInBackground = False",
"def predictions(files, species, files2, species2, nbC=1000, nbC2=None, gating='line', showgat=False,\r\n predAn='prediction',\r\n predtype='neur', ratio=1 / 7.0, repeat=1, average=True,\r\n doubt=0, random_state=None, save='save', fc='Accuri',\r\n param=None,channels = [], dicChannels = {},cwd='Results/' ):\r\n # todo default channel name and dicChannels\r\n if param is None:\r\n param = ['FL3-A', 'FL1-A', 'FSC-A']\r\n # Create result directory\r\n if not os.path.exists(cwd):\r\n os. mkdir('Results/')\r\n cwd = 'Results/'\r\n now = datetime.now()\r\n dirName = now.strftime(\"%Y%m%d-%H_%M_%S/\")\r\n os.mkdir(cwd + dirName)\r\n cwd = cwd + dirName\r\n # Create file option with all parameter given in the input\r\n f.fileOption(cwd, files, species, files2, species2, nbC, nbC2, gating, predAn, predtype, ratio, repeat, average,\r\n doubt,channels=channels,dicChannels=dicChannels,fc=fc)\r\n\r\n if showgat and save is not None:\r\n showgat = save\r\n else:\r\n showgat = None\r\n # Gate Data: according to method\r\n Data = []\r\n target2 = []\r\n refArrays = f.importFile(files, gating=gating, save=showgat, fc=fc, cwd=cwd,channels=channels,dicChannels=dicChannels)\r\n if refArrays == [] :\r\n return 'None'\r\n predArrays = f.importFile(files2, gating=gating, save=showgat, fc=fc, cwd=cwd,channels=channels,dicChannels=dicChannels)\r\n if predArrays == []:\r\n return 'None'\r\n\r\n blankArrays = []\r\n blk =['BLANK', 'Blank', 'blank']\r\n for i in range(len(species)):\r\n if species[i] in blk:\r\n blankArrays.append(refArrays[i])\r\n if gating == 'machine':\r\n refArrays,species = f.machineGating(refArrays, species, refArrays, species,cwd=cwd,show=showgat,name='ref', predType=predtype, param=param)\r\n if predAn == 'analysis':\r\n predArrays, species2 = f.machineGating(refArrays+blankArrays, species+['blank']*len(blankArrays), predArrays, species2,cwd = cwd,show = showgat, name='pred',param=param,predType=predtype)\r\n # todo check if 10 and 5000 is not too much!! here we add the former blank from the new gating output\r\n else :\r\n refArrays = refArrays + blankArrays\r\n species = species+['blank']*len(blankArrays)\r\n # Selection of data for prediction and analysis\r\n if predAn == 'prediction':\r\n for anArray in predArrays:\r\n data2, atarget2, species2 = f.treat([anArray], species2, None, mode=predAn)\r\n Data.append(data2)\r\n target2.append(atarget2)\r\n else:\r\n data2, target2, species2 = f.treat(predArrays, species2, nbC2, mode=predAn)\r\n Data.append(data2)#TODO replace by data2 after testing feature impotance\r\n target2 = [target2]\r\n\r\n # treat data\r\n statisticsLearn = []\r\n statisticsPred = []\r\n predict_lbls = []\r\n confusionM = []\r\n confusionM2 = []\r\n acc1 = []\r\n F1 = []\r\n oldSpecies = species.copy()\r\n for i in range(repeat):\r\n stat, stat2, predict_lbl, target, conf, conf2, species, \\\r\n data2 = predictionMultiple(files, refArrays, oldSpecies, files2, Data, target2, nbC, repeat=i,\r\n param=param, predAn=predAn, predType=predtype, ratio=ratio,\r\n random_state=random_state, save=save, cwd=cwd, average=average)\r\n statisticsLearn.append(stat)\r\n statisticsPred.append(stat2)\r\n #f.save_prediction(predict_lbl,i,cwd=cwd)\r\n predict_lbls.append(predict_lbl)\r\n confusionM.append(conf)\r\n confusionM2.append(conf2)\r\n acc1.append(stat.loc['MEAN', 'ACC'])\r\n F1.append(stat.loc['MEAN', 'F1'])\r\n\r\n f.assessmentValue(statisticsLearn, species, cwd, [], 'align')\r\n confM = f.averageConfM(confusionM)\r\n confM2 = f.averageConfM(confusionM2)\r\n f.cmFile(confM, species, cwd, 'Reference CM')\r\n if save is not None:\r\n f.plotConfusionMatrix(confM, species, save, cwd, normalize=True, name=' average CM for reference',\r\n predAn='training')\r\n target = np.array(target)\r\n predict_lbls = np.array(predict_lbls)\r\n conf2 = []\r\n acc2 = []\r\n f12 = []\r\n for i in range(len(statisticsPred[0])):\r\n PredStat = []\r\n for j in range(repeat):\r\n PredStat.append(statisticsPred[j][i])\r\n if average:\r\n predict_lbl = f.bestPred(list(predict_lbls[:, i]), doubt=doubt) # average of 10\r\n if len(predict_lbl) > 0:\r\n conf2.append(confusion_matrix(target[i], predict_lbl, labels=species + ['unknown']))\r\n statistics2 = f.statAnalysis(list(predict_lbl), list(target[i]), species)\r\n acc2.append(statistics2.loc['MEAN', 'ACC'])\r\n f12.append(statistics2.loc['MEAN', 'F1'])\r\n\r\n if predAn == 'prediction':\r\n if save is not None and save != 'None':\r\n f.graph3d(data2[i], predict_lbl, list(target[i]), species + ['unknown'], param, statistics2, save,\r\n cwd, i + 1, name=files2[i].split('/')[-1][:-4], predtype=predAn)\r\n f.exportPrediction(list(predict_lbl), [files2[i].split('/')[-1][:-4]], cwd, 'predict',\r\n i + 1, 'AVERAGE', repeat)\r\n else:\r\n f.exportStatistics(statistics2, [n.split('/')[-1][:-4] for n in files2], cwd,\r\n 'predict')\r\n f.assessmentValue([statistics2], species, cwd, [files2[i].split('/')[-1][:-4]], 'predict')\r\n f.exportPrediction(list(predict_lbl), [files2[i].split('/')[-1][:-4]], cwd, 'predict',\r\n i + 1, 'AVERAGE', repeat)\r\n\r\n f.cmFile(conf2[i], species+['unknown'], cwd, 'Prediction CM')\r\n if save is not None:\r\n f.graph3d(data2[i], predict_lbl, target[i], species + ['unknown'], param, statistics2, save,\r\n cwd, i + 1, name=str(len(species)) + ' species', predtype=predAn)\r\n f.plotConfusionMatrix(conf2[i], species + ['unknown'], save, cwd, normalize=True,\r\n name='CM with ' + str(len(species)) + ' species', predAn='analysis')\r\n\r\n else:\r\n conf2 = []\r\n else:\r\n for j in range(repeat):\r\n if predAn == 'prediction':\r\n f.exportPrediction(list(predict_lbls[j][i]), [files2[i].split('/')[-1][:-4]], cwd,\r\n 'predict', i, 'SINGLE')\r\n else:\r\n f.exportStatistics(PredStat[j], [files2[i].split('/')[-1][:-4]], cwd, 'predict')\r\n f.exportPrediction(list(predict_lbls[j][i]), [files2[i].split('/')[-1][:-4]], cwd,\r\n 'predict', i, 'SINGLE')\r\n if predAn == 'analysis':\r\n f.assessmentValue(PredStat, species, cwd, [files2[i].split('/')[-1][:-4]], 'predict')\r\n f.cmFile(confM2, species+['unknown'], cwd, 'Prediction CM')\r\n if save is not None:\r\n f.plotConfusionMatrix(confM2, species + ['unknown'], save, cwd, normalize=True,\r\n name='average confusion matrix for Tool analysis', predAn='analysis')\r\n\r\n return os.getcwd().replace('\\\\', '/') + '/' + cwd",
"def predictor(path):\n # get keypoints from the image in a DF\n TEST_keypoints = []\n path = cv2.cvtColor(path, cv2.COLOR_BGR2RGB)\n img = movenet_inference_flat_v10(hub_model, path)\n TEST_keypoints.append(img)\n TEST_keypoints_df = pd.DataFrame(TEST_keypoints)\n\n # Rename columns in the DataFrames according to the values\n columns = []\n for point in kp_descriptions:\n for value in ('y', 'x', 'score'):\n columns.append(f'{point}_{value}')\n\n TEST_keypoints_df.columns = columns\n \n # add additional positional features\n TEST_keypoints_df = add_pos_features(TEST_keypoints_df, drop_scores=True)\n # predict the asana\n prediction_existing = model_fl.predict(TEST_keypoints_df)\n # initialize the predicted_asana to 107 (no asan found)\n predicted_asana = 107\n\n # assign the precited asana if accuracy more than threshold (12.5%)\n for i in range(1):\n mx = 0\n mx_label = -1\n for j in range(107):\n if(prediction_existing[i, j] > mx):\n mx_label = j\n mx = prediction_existing[i, j]\n predicted_asana = mx_label\n predicted_accuracy = prediction_existing[0, mx_label]\n if(predicted_accuracy < 0.125):\n predicted_asana = 107\n\n # print(predicted_asana)\n \n # find label from the json\n a = inv_map[str(predicted_asana)]\n # b = \"null\"\n\n print(\"predicted pose --> \", a)\n print(\"confidence = \", predicted_accuracy)\n # print(\"actual pose -->\", b)\n return a, img",
"def full_modeling(target, pre_clust_df, model_path, id_column):\n targets = [x for x in pre_clust_df.columns if x[:8] == 'default_']\n # folders for result saving\n folder_auc = model_path + '/pictures/roc_auc'\n folder_column_pics = model_path + '/pictures'\n folder_model_output = model_path + '/model_output'\n create_folder(folder_auc)\n create_folder(folder_model_output)\n \n #take only matured loans\n pre_clust_df = pre_clust_df[pre_clust_df[target]>-.5] \n pre_clust_df = pre_clust_df.set_index(id_column)\n\n #drop all target columns except current tarhet column\n drop_targets = [col for col in targets if col != target] \n drop_targets = list(set(drop_targets) & set(pre_clust_df))\n pre_clust_df = pre_clust_df.drop(drop_targets, 1)\n\n #transform continous variables to bucket columns\n dfPreWoe, clustVarsInfo = sf.continuousVariables(pre_clust_df, columnLimit=10) \n #trassform to woe columns\n dfPostWoe, woeVarsInfo = sf.woeVariables(dfPreWoe,target)\n\n #look at information value of variables\n gg = sf.giniGrowth(dfPostWoe,woeVarsInfo,target)\n #chose best columns\n goodColumns, badColumns = sf.chooseColumnsFromIT(gg, badFlag=target, min_limit=0.01)\n\n #create log regression model\n model = sf.logReg(preLR=dfPostWoe[goodColumns], badFlag=target)\n #save roc_auc picture \n model.print_roc_curve(to_file=True, folder=folder_auc)\n\n #generate doc information about model and variables\n intercept, woeOut = sf.modelOutput(folder_model_output, woeVarsInfo, goodColumns, model, gg, rewrite=True)\n\n #generate and save pictures of feature distribution\n bad_columns = woe.save_pictures(woeVarsInfo, folder = folder_column_pics, badRateLimit=100)",
"def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )",
"def run_script(sc):\n \n \n \n \n \n \n #the size of the image\n imageN = opts.imageN\n #the size of the dictionary\n dictsize = opts.dictsize\n #the number of bands in high resolution\n bands_h_N = opts.bands_h\n \n #the number of bands in low resolution\n bands_l_N = opts.bands_l\n \n \n #parameters for training. \n c1 = opts.c1 # Default value: 0.4\n \n c2 = opts.c2 # Default value: 0.4\n \n c3 = opts.c3 # Default value: 0.8\n \n maxbeta = opts.maxbeta #Default value: 1e+6\n \n delta = opts.delta #Default value: 1e-4\n beta = opts.beta #Default value: 0.01\n \n #The learning rate thresholding value\n lamda = opts.lamda\n \n #number of iterations for training\n train_iter =opts.n_iter\n \n #the number of partitions for the data parallelization into RDD blocks\n partitions_N = opts.partitions\n \n \n \n #input data are in the from (# of samples) x (# of Bands)\n #high resolution samples\n data_h = sc.parallelize(genfromtxt(opts.inputhigh, delimiter=','), partitions_N).cache()\n \n #low resolution samples\n data_l = sc.parallelize(genfromtxt(opts.inputlow, delimiter=','), partitions_N).cache()\n\n \n #initializing the dictionary @ high resolution\n dict_h_t = data_h.take(dictsize)\n \n #initializing the dictionary @ low resolution\n dict_l_t = data_l.take(dictsize)\n \n \n dict_h = np.transpose(dict_h_t)\n dict_l = np.transpose(dict_l_t)\n \n print('>>>>>>>>>>>>>>>>')\n print(data_h.count())\n print(data_l.count())\n \n #bundling the input samples together\n datain = data_h.zip(data_l).cache()\n print('>>>>>>>>>>>>>>>>')\n \n print(dict_h.shape)\n print(dict_l.shape)\n \n #optional: uncomment to save the initial values of the dictionaries\n #mat2save = './tttmpinit' + '_' + str(imageN) + 'x' + str(dictsize) + '.mat' \n #sio.savemat(mat2save, {'dicth_init':dict_h, 'dictl_init': dict_l})\n \n \n #operational parameters to be broadcasted to the cluster\n lamda_bc = sc.broadcast(lamda)\n dictsize_bc = sc.broadcast(dictsize)\n \n \n #broadcast the dictionaries....\n dict_h_bc = sc.broadcast(dict_h)\n dict_l_bc = sc.broadcast(dict_l)\n \n \n #define and initialize the CDL object for optimization.\n tmp = datain.map(lambda x: startCDL(x, dictsize_bc.value)).cache() \n \n \n \n wind = opts.window; \n \n #######################################\n ##initialize the variables for error convergence checking...\n err_h = 0;\n err_l = 0;\n \n buff_size = 10 #to do - put this in arg in...\n err_thrs = 0.01 #likewise\n \n m = 0\n mm = 0\n time_all = []\t\n ##keep the error buffer size as an even number.\n if buff_size % 2 == 1:\n buff_size += 1\n \n \n err_h_all = []\n \n err_l_all = []\n \n dictall_high = []\n dictall_low = []\n #####################################3\n \n #entering optimization....\n for k in range(train_iter):\n \n ########################################################\n #cluster calculation start! \n ttime3 = time.time()\n \n\n ttime2 = time.time()\n \n ###dictionaries calculations and broadcasting to the cluster.\n dict_ht_bc = sc.broadcast(np.squeeze(np.transpose(dict_h)))\n dict_lt_bc = sc.broadcast(np.squeeze(np.transpose(dict_l)))\n \t\n \n dtdh = np.dot(np.squeeze(np.transpose(dict_h)), np.squeeze(dict_h)) + (c1 + c3)*np.eye(np.squeeze(np.transpose(dict_h).shape[0]))\n dtdhinv_bc = sc.broadcast(inv(dtdh))\n \n dtdl = np.dot(np.squeeze(np.transpose(dict_l)), np.squeeze(dict_l)) + (c2 + c3)*np.eye(np.squeeze(np.transpose(dict_l).shape[0]))\n dtdlinv_bc = sc.broadcast(inv(dtdl))\n \n time_upd = time.time()\n \n #update the CDL object \n tmp = tmp.map(lambda x: updateCD(x,dict_ht_bc.value, dict_lt_bc.value, dtdhinv_bc.value, dtdlinv_bc.value,c1,c2,c3,k)).cache()\n \n \n print('time elapsed for updating...')\n print(time.time() - time_upd)\n \n \n time_calc = time.time()\n \n \n #extract the SxW and phi matrices for low and high resolution.\n updvals = tmp.map(lambda x: getVals(x)).reduce(lambda x,y: calcSum3(x, y))\n \n print('time elapsed for calculating...')\n print(time.time() - time_calc)\n \n print('****************************************')\n \n \n print('time elapsed:')\n print time.time()- ttime2\n #cluster calculation done!\n ##########################################################\n \n sw_h =np.array(updvals[0][0])\n sw_l =np.array(updvals[0][1]) \n print(sw_h.shape)\n phi_h = np.array(updvals[1][0])\n phi_l = np.array(updvals[1][1])\n print(phi_h.shape)\n \n phi_h = np.reshape(phi_h, ((1, len(phi_h)))) + delta\n \n #calculate and normalize the new dictionaries\n #a. high resolution\n dict_h_upd = dict_h + sw_h/(phi_h)\n dict_h = normD(dict_h_upd)\n #b. low resolution\n phi_l = np.reshape(phi_l, ((1, len(phi_l)))) + delta\n \n dict_l_upd = dict_l + sw_l/(phi_l)\n \n\n \n dict_l = normD(dict_l_upd)\n print('dict_l upd')\n print(np.array(dict_l))\n #clean up your garbage!\n sc._jvm.System.gc()\n\n #######################################################\n \n #error calculation over the cluster\n if (k + 1) % wind == 0:\n \n err_all = tmp.map(lambda x: calcErr(dict_h, dict_l,x)).reduce(lambda x,y: calcSum2(x,y))\n print('err_l')\n print(np.array(err_all[1]))\n err_h = math.sqrt(np.sum(np.array(err_all[0])) / (bands_h_N * imageN))\n err_l = math.sqrt(np.sum(np.array(err_all[1])) / (bands_l_N * imageN))\n \n \n print('ERROR HIGH:')\n print(err_h)\n \n print('ERROR LOW:')\n print(err_l)\n sc._jvm.System.gc()\n \n ##############################\n \n #append errors & dictionaries (for checking for convergence over a sliding window)\n err_h_all.append(err_h)\n err_l_all.append(err_l)\n \n dictall_high.append(dict_h)\n \n dictall_low.append(dict_l)\n \n \n ##check error convergence >>>start!\n if m >= buff_size - 1:\n print('checking for error convergence...')\n tmp_h = err_h_all[m-buff_size + 1:m+1]\n tmp_l = err_l_all[m-buff_size + 1:m+1]\n \n err_con_h = np.mean(tmp_h[:buff_size/2], axis=0) - np.mean(tmp_h[buff_size/2:], axis=0)\n err_con_l = np.mean(tmp_l[:buff_size/2], axis=0) - np.mean(tmp_l[buff_size/2:], axis=0)\n \n if (abs(err_con_h) > err_thrs or abs(err_con_l) > err_thrs) and k >= train_iter/2 + 1 :\n \n minindex_l = np.array(tmp_h).argmin()\n \n minindex_h = np.array(tmp_l).argmin()\n \n \n dict_h = np.array(dictall_high[minindex_h])\n \n dict_l = np.array(dictall_low[minindex_l])\n print('break!')\n \n break\n \n \n if mm >= buff_size:\n dictall_high = []\n dictall_low = []\n mm = 0\n print('...done!') \n \n m = m + 1\n mm = mm + 1\n ########################################\n \n #when done with this iteration remove the dictionaries from the cluster (to improve memory efficiency)\n \n dict_ht_bc.unpersist(blocking=True)\n \n \n dict_lt_bc.unpersist(blocking=True)\n \n\n \n dtdhinv_bc.unpersist(blocking=True)\n \n dtdlinv_bc.unpersist(blocking=True)\n \n ########################################\n \n print('Time elapsed for this iteration: ')\n ttime3 = time.time()-ttime3\n print(ttime3)\n time_all.append(ttime3)\n #print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n \n ##optional: save the results of each iteration in a mat file.\n #mat2save = './results' + str(imageN) + 'x' + str(k)+ '_' + str(dictsize) + '_' +str(partitions_N) + '.mat' \n \n #sio.savemat(mat2save, {'timeelapsed': ttime3, 'dicth':dict_h, 'dictl': dict_l, 'phi_h': phi_h, 'phi_l': phi_l, 'sw_h': sw_h, 'sw_l': sw_l, 'err_l':err_l, 'err_h': err_h})#, 'wh': wh, 'wl': wl})#'phih': phi_h, 'sw': sw})\n \n \n #save error values and final dictionaries in a mat file.\n \n mat2savefin = './results_fin' + str(imageN) + 'x' + str(dictsize) + '_' +str(partitions_N) + '.mat' \n \n sio.savemat(mat2savefin, {'dicth':dict_h, 'dictl': dict_l, 'err_l': err_l_all, 'err_h': err_h_all, 'time_all': time_all})\n \n return 1",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def sources_extraction(image,sextractor_pars):\n\n cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name= sextractor_pars\n sp.run('sex %s.fits -c gft.sex -CATALOG_NAME %s.cat -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME gft.param -DETECT_TYPE CCD -DETECT_MINAREA %d -DETECT_THRESH %d -ANALYSIS_THRESH %d -PHOT_APERTURES %d -SATUR_LEVEL %d -MAG_ZEROPOINT %f -GAIN %f -PIXEL_SCALE %f -SEEING_FWHM %f -BACK_TYPE %s -BACK_VALUE %f -BACK_SIZE %d -BACKPHOTO_TYPE %s -BACKPHOTO_THICK %d -BACK_FILTTHRESH %f -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s.fits ' % (image,cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name),shell=True)",
"def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)"
] | [
"0.5751322",
"0.5728104",
"0.57214856",
"0.57079554",
"0.56615245",
"0.56354",
"0.5628345",
"0.5622833",
"0.5618916",
"0.55971026",
"0.5592311",
"0.5569166",
"0.5569091",
"0.55580115",
"0.5518776",
"0.5511118",
"0.55083716",
"0.55061144",
"0.5489924",
"0.5489006",
"0.54695773",
"0.54636663",
"0.5458125",
"0.5454384",
"0.5450759",
"0.5443957",
"0.5433807",
"0.5430802",
"0.5428431",
"0.54230446"
] | 0.6835519 | 0 |
Determines if previous minimum skew and/or kurtosis should be replaced. | def checkMin(oldskew,oldkurt,newskew,newkurt,oldtransform,newtransform):
if (newskew < oldskew and newkurt < oldkurt) or (newkurt < oldkurt and (newskew-oldskew) < 2.0*(oldkurt-newkurt) ):
return (newskew,newkurt,newtransform)
elif newskew < oldskew:
return (oldskew,oldkurt,oldtransform)
elif newkurt < oldkurt:
return (oldskew,oldkurt,oldtransform)
else:
return (oldskew,oldkurt,oldtransform) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def support_skewed(self):\n\t\tif all([item in self.__class__.__dict__ for item in ['init_agg','last_agg','finalize']]):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def accept_move(misfit_current, likelihood_current, misfit_proposednext):\n if misfit_proposednext <= misfit_current:\n return True\n\n # gaussian likelihood\n P = np.exp(-misfit_proposednext) / likelihood_current\n return True if sample_uniform() < P else False",
"def stable(self):\n return(self.zeta > 0)",
"def is_lmtp_over(self):\n\t\tif self._c >= self._C:\n\t\t\treturn True\n\t\treturn False",
"def stop(self):\n return self.random.uniform(0, 1) < 1/self.k",
"def rate(skew):\r\n prob = random.random()\r\n if prob >= skew:\r\n return True\r\n if prob < skew:\r\n return False",
"def stationary(self, k):\n s1 = self.active_ssms(k)\n\n if k > 0:\n s2 = self.active_ssms(k-1)\n if s2 != s1:\n return False\n for j in s1:\n if self.scales[j] is not None:\n return False\n if not self.ssms[j].stationary(k-self.ssm_starts[j]):\n return False\n return True",
"def is_skew_symmetric(self):\n return self.all_equal(-self.transpose())",
"def is_tr(self, y, t):\n return t != 0 and y != 0",
"def test_minvar_rotation(self):\n vrot, v, w = minvar(self.rdata)\n # Determinant of rotation matrix should be = 1\n self.assertTrue((np.linalg.det(v) - 1) < self.tol)",
"def has_increased_significantly(old, new, sig_fig=10**(-4)):\n return(new > old and np.log10(1.-old/new) > -sig_fig)",
"def is_skew_component_fusion(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n skew_ob = GriddedPerm((0, 1), (fcell, scell))\n else:\n skew_ob = GriddedPerm((0, 1), (fcell, scell))\n return skew_ob in self._tiling.obstructions",
"def add_log_if_improves_skew(feature, df) :\r\n featureData = df[feature] \r\n logged = np.log(featureData)\r\n if abs(logged.skew()) >= abs(featureData.skew()) :\r\n return False\r\n df[feature+\"_log\"] = logged\r\n return True",
"def is_pos_unate(self, vs=None):\n vs = self._expect_vars(vs)\n basis = self.support - set(vs)\n mincov = [PC_ZERO] * (1 << len(basis))\n # Test whether table entries are monotonically increasing\n for cf in self.iter_cofactors(vs):\n for i, item in enumerate(cf.pcdata):\n if mincov[i] == PC_ONE and item == PC_ZERO:\n return False\n mincov[i] = item\n return True",
"def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True",
"def checkDegenerated(self):\n\n degenerated = False\n\n if np.min(isfinite(self.C)) == 0:\n degenerated = True\n\n elif not ((10**(-16)) < self.sigma_mean < (10**16)):\n degenerated = True\n\n else:\n self.D, self.B = eig(self.C)\n self.D = sqrt(self.D)\n self.D.shape = (self.n,1) # Force D to be a column vector\n if not isreal(self.D).all():\n degenerated = True\n\n if degenerated:\n self.restart()",
"def should_reset(self, current_time_step: ts.TimeStep) -> bool:\n handle_auto_reset = getattr(self, '_handle_auto_reset', False)\n return handle_auto_reset and np.all(current_time_step.is_last())",
"def isStochastic(vector):\n\n # if not equal to 1 within machine precision\n if abs(np.sum(vector) - 1) < sys.float_info.epsilon:\n return True",
"def is_versor(self) -> bool:\n return np.isclose(np.linalg.norm(self.A), 1.0)",
"def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()",
"def detect_paramchange(self,t_final):\n id1 = np.searchsorted(self.shift_times,t_final)-1\n if id1 != self.current_region:\n return True\n else:\n return False",
"def check_early_stop(self) -> bool:\n if self.args.early_stopping_steps == -1:\n return False\n return self._steps_since_new_prefix >= self.args.early_stopping_steps",
"def compare_sightings(now, before, news={}):\n logging.debug('now {0} before {1}'.format(now, before))\n if not now['size'] == before['size']:\n logging.error('sizes do not match')\n return False\n # using `now.keys()` actually checks the size again,\n # only one new hash key/value comes in at a time right now,\n # but I guess we could compute multiple checksums at once\n for check in list(now.keys()):\n if check in before:\n if now[check] != before[check]:\n logging.error('{2} differ before:{1} now:{0}'.format(\n now[check], before[check], check)\n )\n return False\n else:\n logging.info('{0} not seen before for this'.format(check))\n news[check] = now[check]\n return True",
"def metropolis ( delta ):\n\n import numpy as np\n \n exponent_guard = 75.0\n\n if delta > exponent_guard: # Too high, reject without evaluating\n return False\n elif delta < 0.0: # Downhill, accept without evaluating\n return True\n else:\n zeta = np.random.rand() # Uniform random number in range (0,1)\n return np.exp(-delta) > zeta # Metropolis test",
"def _set_ks_static(self, ks):\n self.ks = ks\n if np.max(self.ks) > self._kret:\n self._kret = np.max(self.ks)",
"def steadyYet(newg, oldg, newe, olde, newh, oldh, newf, oldf, tolerance):\n steady_yet = True\n if oldg == 0 or (abs(newg-oldg)/oldg > tolerance or\n abs(newe-olde)/olde > tolerance or\n abs(newh-oldh)/oldh > tolerance or\n abs(newf-oldf)/oldf > tolerance):\n steady_yet = False\n return steady_yet",
"def metropolis_hastings_accept(energy_prev, energy_next, s_rng):\r\n ediff = energy_prev - energy_next\r\n return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0",
"def skew_kurtosis_value(df, feature):\r\n skewness = df[feature].skew()\r\n kurtosis = df[feature].kurt()\r\n\r\n print(\"Skewness: {}\".format(round(skewness, 2)))\r\n if skewness > 0:\r\n print(\"Positive/right skewness: mean and median > mode.\")\r\n else:\r\n print(\"Negative/left skewness: mean and median < mode\")\r\n\r\n print(\"\\nKurtosis: {}\".format(round(kurtosis, 2)))\r\n if kurtosis > 3:\r\n print(\"Leptokurtic: more outliers\")\r\n else:\r\n print(\"Platykurtic: less outliers\")",
"def excesskurtosis(self):\n self._finalize()\n return self.vkurtosis-3",
"def isScalene(self):\n\t\treturn self.a != self.b != self.c"
] | [
"0.56515265",
"0.55337745",
"0.5429516",
"0.5419026",
"0.53508544",
"0.53044164",
"0.52709705",
"0.52291274",
"0.51787686",
"0.5175725",
"0.5167837",
"0.5161958",
"0.5158973",
"0.51454186",
"0.51088625",
"0.5101634",
"0.5093021",
"0.50878626",
"0.50746673",
"0.5050008",
"0.5038825",
"0.50380313",
"0.5032167",
"0.50077087",
"0.50002867",
"0.49963838",
"0.49916163",
"0.4988892",
"0.4973322",
"0.4962647"
] | 0.60877913 | 0 |
List storage accounts within a subscription or resource group. | def list_storage_accounts(resource_group_name=None):
scf = storage_client_factory()
if resource_group_name:
accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)
else:
accounts = scf.storage_accounts.list()
return list(accounts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_storage_account_list(credentials: Credentials, subscription_id: str) -> List[Dict]:\n try:\n client = get_client(credentials, subscription_id)\n storage_account_list = list(map(lambda x: x.as_dict(), client.storage_accounts.list()))\n\n # ClientAuthenticationError and ResourceNotFoundError are subclasses under HttpResponseError\n except ClientAuthenticationError as e:\n logger.warning(f\"Client Authentication Error while retrieving storage accounts - {e}\")\n return []\n except ResourceNotFoundError as e:\n logger.warning(f\"Storage Account not found error - {e}\")\n return []\n except HttpResponseError as e:\n logger.warning(f\"Error while retrieving storage accounts - {e}\")\n return []\n\n for storage_account in storage_account_list:\n x = storage_account['id'].split('/')\n storage_account['resourceGroup'] = x[x.index('resourceGroups') + 1]\n\n return storage_account_list",
"def show_storage_account_usage():\n scf = storage_client_factory()\n return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member",
"def get_storage_account_details(\n credentials: Credentials, subscription_id: str, storage_account_list: List[Dict],\n) -> Generator[Any, Any, Any]:\n for storage_account in storage_account_list:\n queue_services = get_queue_services(credentials, subscription_id, storage_account)\n table_services = get_table_services(credentials, subscription_id, storage_account)\n file_services = get_file_services(credentials, subscription_id, storage_account)\n blob_services = get_blob_services(credentials, subscription_id, storage_account)\n yield storage_account['id'], storage_account['name'], storage_account[\n 'resourceGroup'\n ], queue_services, table_services, file_services, blob_services",
"def list_accounts(self):\n pass",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )",
"def load_storage_account_data(\n neo4j_session: neo4j.Session, subscription_id: str, storage_account_list: List[Dict],\n azure_update_tag: int,\n) -> None:\n ingest_storage_account = \"\"\"\n UNWIND $storage_accounts_list as account\n MERGE (s:AzureStorageAccount{id: account.id})\n ON CREATE SET s.firstseen = timestamp(),\n s.type = account.type, s.resourcegroup = account.resourceGroup,\n s.location = account.location\n SET s.lastupdated = $azure_update_tag,\n s.kind = account.kind,\n s.name = account.name,\n s.creationtime = account.creation_time,\n s.hnsenabled = account.is_hns_enabled,\n s.primarylocation = account.primary_location,\n s.secondarylocation = account.secondary_location,\n s.provisioningstate = account.provisioning_state,\n s.statusofprimary = account.status_of_primary,\n s.statusofsecondary = account.status_of_secondary,\n s.supportshttpstrafficonly = account.enable_https_traffic_only\n WITH s\n MATCH (owner:AzureSubscription{id: $AZURE_SUBSCRIPTION_ID})\n MERGE (owner)-[r:RESOURCE]->(s)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_storage_account,\n storage_accounts_list=storage_account_list,\n AZURE_SUBSCRIPTION_ID=subscription_id,\n azure_update_tag=azure_update_tag,\n )",
"def list_filesystem(self, headers=None, **kwargs):\n logger.debug('Listing filesystem ...')\n resource = 'account'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._get(params=params, headers=headers)\n return response.json() if response.content else {}",
"def get_file_services(credentials: Credentials, subscription_id: str, storage_account: Dict) -> List[Dict]:\n try:\n client = get_client(credentials, subscription_id)\n file_service_list = client.file_services.list(\n storage_account['resourceGroup'], storage_account['name'],\n ).as_dict()['value']\n\n except ClientAuthenticationError as e:\n logger.warning(f\"Client Authentication Error while retrieving file services - {e}\")\n return []\n except ResourceNotFoundError as e:\n logger.warning(f\"File services resource not found error - {e}\")\n return []\n except HttpResponseError as e:\n logger.warning(f\"Error while retrieving file services list - {e}\")\n return []\n\n return file_service_list",
"def get_list_of_buckets():\r\n\r\n # initialize client\r\n storage_client = storage.Client()\r\n\r\n # get list of buckets\r\n buckets = storage_client.list_buckets()\r\n\r\n list_of_buckets = []\r\n for bucket in buckets:\r\n list_of_buckets.append(bucket.name)\r\n\r\n return list_of_buckets",
"def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)",
"def accounts(self):\r\n return resources.Accounts(self)",
"def storage_account(self) -> str:\n return pulumi.get(self, \"storage_account\")",
"def GetAccountList(self):\n\t\treturn self.accounts.keys()",
"def get_blob_services(credentials: Credentials, subscription_id: str, storage_account: Dict) -> List[Dict]:\n try:\n client = get_client(credentials, subscription_id)\n blob_service_list = list(\n map(\n lambda x: x.as_dict(), client.blob_services.list(\n storage_account['resourceGroup'],\n storage_account['name'],\n ),\n ),\n )\n\n except ClientAuthenticationError as e:\n logger.warning(f\"Client Authentication Error while retrieving blob services - {e}\")\n return []\n except ResourceNotFoundError as e:\n logger.warning(f\"Blob services resource not found error - {e}\")\n return []\n except HttpResponseError as e:\n logger.warning(f\"Error while retrieving blob services list - {e}\")\n return []\n\n return blob_service_list",
"def list_objects(bucket=None):\n hook = GoogleCloudStorageHook()\n storage_objects = hook.list(bucket)\n\n return storage_objects",
"def list_bucket(project: str, bucket: str, prefix: str = None) -> typing.List[typing.Any]:\n client = google.cloud.storage.Client(project)\n return list(client.list_blobs(bucket_or_name=bucket, prefix=prefix))",
"def display_accounts(cls):\n return cls.account_list",
"def Accounts(self):\n\n if not self.connected:\n return []\n\n try:\n accounts_listing = _ReadNoProxy(\n GOOGLE_GCE_METADATA_ACCOUNTS_URI + '/')\n accounts_lines = accounts_listing.split()\n accounts = []\n for account_line in accounts_lines:\n account = account_line.strip('/')\n if account == 'default':\n continue\n accounts.append(account)\n return accounts\n except urllib2.HTTPError as e:\n raise MetadataServerException(e)\n except urllib2.URLError as e:\n raise CannotConnectToMetadataServerException(e)",
"def List(self, prefix=''):\n\n bucket, bucket_path = self._ParseBucketAndPath(prefix)\n names = []\n request = self._service.objects().list(bucket=bucket, prefix=bucket_path)\n response = self._RunWithRetries(request.execute, self._CommonErrorMatcher)\n\n while response:\n if 'items' in response:\n names += [item['name'] for item in response['items']]\n\n if 'nextPageToken' in response:\n request = self._service.objects().list(\n bucket=bucket, prefix=bucket_path,\n pageToken=response['nextPageToken'])\n response = self._RunWithRetries(request.execute,\n self._CommonErrorMatcher)\n else:\n response = None\n\n return names",
"def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())",
"def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)",
"def get_storage_connectivity_groups(self):\n url = '%s/storage-connectivity-groups' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['storage-connectivity-groups']\n else:\n LOG.error('Get storage connectivity groups failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def list_accounts(self):\r\n\r\n account = self.client['Account']\r\n mask = 'cdnAccounts[%s]' % ', '.join(['id',\r\n 'createDate',\r\n 'cdnAccountName',\r\n 'cdnSolutionName',\r\n 'cdnAccountNote',\r\n 'status'])\r\n return account.getObject(mask=mask).get('cdnAccounts', [])",
"def list_key_vaults_request(self, subscription_id: str = None,\n limit: int = DEFAULT_LIMIT, offset: int = DEFAULT_OFFSET) -> list[dict]:\n ful_url = urljoin(self.azure_cloud.endpoints.resource_manager,\n f'subscriptions/{subscription_id}/providers/Microsoft.KeyVault/'\n f'vaults?$top={limit}')\n response = self.http_request(\n 'GET', full_url=ful_url, ok_codes=[200])\n return self.get_entities_independent_of_pages(response, limit, offset)",
"def list_blobs(self, prefix=''):\n return [b.name for b in self.bucket.list_blobs(prefix=prefix)]",
"def get_table_services(credentials: Credentials, subscription_id: str, storage_account: Dict) -> List[Dict]:\n try:\n client = get_client(credentials, subscription_id)\n table_service_list = client.table_services.list(\n storage_account['resourceGroup'], storage_account['name'],\n ).as_dict()['value']\n\n except ClientAuthenticationError as e:\n logger.warning(f\"Client Authentication Error while retrieving table services - {e}\")\n return []\n except ResourceNotFoundError as e:\n logger.warning(f\"Table services resource not found error - {e}\")\n return []\n except HttpResponseError as e:\n logger.warning(f\"Error while retrieving table services list - {e}\")\n return []\n\n return table_service_list",
"def list(self, filter, *args, timeout=None):\n req = AccountListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Accounts.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.accounts:\n yield plumbing.convert_account_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)",
"def storage_account_name(self) -> str:\n return pulumi.get(self, \"storage_account_name\")",
"def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')"
] | [
"0.7320318",
"0.6651165",
"0.62439924",
"0.6184162",
"0.6145911",
"0.59039",
"0.58671045",
"0.5840274",
"0.57441765",
"0.5728142",
"0.5645398",
"0.5636501",
"0.5627045",
"0.56266147",
"0.56164116",
"0.5569306",
"0.5560969",
"0.5537345",
"0.55020845",
"0.54901856",
"0.5486767",
"0.5441203",
"0.5435251",
"0.5428888",
"0.54205203",
"0.54184896",
"0.54171205",
"0.53960645",
"0.5385869",
"0.5383552"
] | 0.82980007 | 0 |
Show the current count and limit of the storage accounts under the subscription. | def show_storage_account_usage():
scf = storage_client_factory()
return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used",
"def getAccountSize(self, authenticationToken):\r\n pass",
"def subscriptions_limit(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"subscriptions_limit\")",
"def show(self, req, tenant_id, id):\n LOG.info(\"Indexing quota info for tenant '%(id)s'\\n\"\n \"req : '%(req)s'\\n\\n\", {\"id\": id, \"req\": req})\n\n context = req.environ[wsgi.CONTEXT_KEY]\n if id != tenant_id and not context.is_admin:\n raise exception.TroveOperationAuthError(\n tenant_id=tenant_id\n )\n\n usages = quota_engine.get_all_quota_usages_by_tenant(id)\n limits = quota_engine.get_all_quotas_by_tenant(id)\n for key in usages.keys():\n setattr(usages[key], \"limit\", limits[key].hard_limit)\n return wsgi.Result(views.QuotaUsageView(usages).data(), 200)",
"def show_quota(self, tenant_id, **_params):\r\n return self.get(self.quota_path % (tenant_id), params=_params)",
"def quota(self) -> int:\n return pulumi.get(self, \"quota\")",
"def get_quota(self):\n raise NotImplementedError",
"def get_storage_quota():\n return _drive_service.about().get(fields=\"storageQuota\").execute()[\"storageQuota\"]",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def storage_account(self) -> str:\n return pulumi.get(self, \"storage_account\")",
"def cmd_account_album_count(client, args):\n account_album_count = client.get_account_album_count(args.username)\n generate_output({'account_album_count': account_album_count})",
"def accounts():",
"def get_summary(self):\n mask = \"\"\"mask[\n nextInvoiceTotalAmount,\n pendingInvoice[invoiceTotalAmount],\n blockDeviceTemplateGroupCount,\n dedicatedHostCount,\n domainCount,\n hardwareCount,\n networkStorageCount,\n openTicketCount,\n networkVlanCount,\n subnetCount,\n userCount,\n virtualGuestCount\n ]\n \"\"\"\n return self.client.call('Account', 'getObject', mask=mask)",
"def get_limit(self, request, tenant_id):\n request.setResponseCode(200)\n return json.dumps(get_limit())",
"def cmd_account_image_count(client, args):\n account_image_count = client.get_account_images_count(args.username)\n generate_output({'account_image_count': account_image_count})",
"def get_statistics(self):\n url = \"https://api.imgur.com/3/account/{0}/stats\".format(self.name)\n return self._imgur._send_request(url, needs_auth=True)",
"def _get_next_limit(self):\n return self.__quota",
"def number_transfers(self, quota):\n if self.elected:\n return len(self.first_votes) - quota\n else:\n return 0",
"def total_storage(self):\n return self._total_storage",
"def display_accounts_details():\n return Credentials.display_credentials()",
"def display_accounts(cls):\n return cls.account_list",
"def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def list_accounts(self):\n pass",
"def anon_user_api_value( self, trans ):\n usage = trans.app.quota_agent.get_usage( trans )\n percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage )\n return {'total_disk_usage': int( usage ),\n 'nice_total_disk_usage': util.nice_size( usage ),\n 'quota_percent': percent}",
"def storage_account_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"storage_account_subscription_id\")",
"def quota():\n try:\n fname = os.path.join(os.path.expanduser(\"~\"), \".planet.json\")\n contents = {}\n if os.path.exists(fname):\n with open(fname, \"r\") as fp:\n contents = json.loads(fp.read())\n else:\n raise IOError(\"Escape to End and Initialize\")\n if not len(contents) != 0:\n raise IOError(\"Escape to End and Initialize\")\n else:\n k = contents[\"key\"]\n main = requests.get(\n \"https://api.planet.com/auth/v1/\" + \"experimental/public/my/subscriptions\",\n auth=HTTPBasicAuth(k, \"\"),\n )\n if main.status_code == 200:\n content = main.json()\n for item_id in content:\n print(\" \")\n print(\"Allocation Name: %s\" % item_id[\"organization\"][\"name\"])\n print(\n \"Allocation active from: %s\" % item_id[\"active_from\"].split(\"T\")[0]\n )\n print(\"Quota Enabled: %s\" % item_id[\"quota_enabled\"])\n print(\"Total Quota in SqKm: %s\" % item_id[\"quota_sqkm\"])\n print(\"Total Quota used: %s\" % item_id[\"quota_used\"])\n if (item_id[\"quota_sqkm\"]) is not None:\n leftquota = float(\n item_id[\"quota_sqkm\"] - float(item_id[\"quota_used\"])\n )\n print(\"Remaining Quota in SqKm: %s\" % leftquota)\n else:\n print(\"No Quota Allocated\")\n print(\"\")\n else:\n print(\"Failed with exception code: \" + str(main.status_code))\n\n except IOError:\n print(\"Initialize client or provide API Key\")",
"def get_storage_account_details(\n credentials: Credentials, subscription_id: str, storage_account_list: List[Dict],\n) -> Generator[Any, Any, Any]:\n for storage_account in storage_account_list:\n queue_services = get_queue_services(credentials, subscription_id, storage_account)\n table_services = get_table_services(credentials, subscription_id, storage_account)\n file_services = get_file_services(credentials, subscription_id, storage_account)\n blob_services = get_blob_services(credentials, subscription_id, storage_account)\n yield storage_account['id'], storage_account['name'], storage_account[\n 'resourceGroup'\n ], queue_services, table_services, file_services, blob_services",
"def account_summary(self):\n pass",
"def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e"
] | [
"0.626172",
"0.5928219",
"0.5769423",
"0.56741863",
"0.5654431",
"0.5513493",
"0.53687006",
"0.536599",
"0.53284514",
"0.53220856",
"0.5245537",
"0.52425355",
"0.5240312",
"0.52386",
"0.5236853",
"0.5233157",
"0.5226607",
"0.52219427",
"0.52179664",
"0.5215547",
"0.5200623",
"0.5183503",
"0.518024",
"0.5164749",
"0.5152335",
"0.51419926",
"0.51328677",
"0.5125607",
"0.5124358",
"0.51234883"
] | 0.71497154 | 0 |
Create a stored access policy on the containing object | def create_acl_policy(client, container_name, policy_name, start=None, expiry=None,
permission=None, **kwargs):
acl = _get_acl(client, container_name, **kwargs)
acl[policy_name] = AccessPolicy(permission, expiry, start)
if hasattr(acl, 'public_access'):
kwargs['public_access'] = getattr(acl, 'public_access')
return _set_acl(client, container_name, acl, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_restricted_policy(environ, bag):\n username = environ['tiddlyweb.usersign']['name']\n if username == 'GUEST':\n return\n bag.policy.owner = username\n # accept does not matter here\n for constraint in ['read', 'write', 'create', 'delete', 'manage']:\n setattr(bag.policy, constraint, [username])\n return",
"def test_create_namespaced_policy(self):\n pass",
"def get_policy(self, *args, **kwargs):\r\n pass",
"def add_to_resource_policy(self, permission: aws_cdk.aws_iam.PolicyStatement) -> None:\n ...",
"def update_policy(self):\n pass",
"def create_policy(self, policy_name, policy_document, delete=True, **kwargs):\n try:\n Oprint.info('Creating IAM policy {}'.format(policy_name), 'iam')\n \n policy = self.get_policy(policy_name=policy_name)\n if policy and policy.get('Policy'):\n if not delete:\n Oprint.info('Found existing IAM policy {}'.format(policy_name), 'iam')\n return policy\n else:\n # Can not delete a policy if it has been attached\n if policy.get('Policy').get('AttachmentCount') > 0:\n Oprint.warn('Policy {} already exists and has been attached to a role. Cannot delete'.format(policy.get('Policy').get('PolicyName')), 'iam')\n return policy\n\n self._client.delete_policy(PolicyArn=self.get_policy_arn(policy_name))\n \n policy = self._client.create_policy(PolicyName=policy_name, PolicyDocument=policy_document, **kwargs)\n\n Oprint.info('IAM policy {} has been created'.format(policy_name), 'iam')\n except Exception as e:\n Oprint.err(e, 'iam')\n\n return policy",
"def _add_policy(self, policy):\n self.by_name[policy.name.upper()] = policy\n self.by_index[int(policy)] = policy",
"def add(self, policy_name, data):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.info(\"Adding the policy: %s\", address)\n payload = json.dumps({\"policy\": data})\n response = self.vault.requests_request(\n \"POST\", address, headers=self.vault.token_header, data=payload\n )",
"def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)",
"def acquire(self, access_mode=None):",
"def pre_access_control_list_create(self, resource_dict):\n pass",
"def copy_access_level(apps, schema_editor):\n # We get the model from the versioned app registry;\n # if we directly import it, it will be the wrong version.\n State = apps.get_model(\"motions\", \"State\")\n for state in State.objects.all():\n if state.access_level == 3:\n state.restriction = [\"managers_only\"]\n elif state.access_level == 2:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n ]\n elif state.access_level == 1:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n \"is_submitter\",\n ]\n state.save(skip_autoupdate=True)",
"def update_policy(self, *args, **kwargs):\r\n pass",
"def __init__(self, policy):\n super().__init__(policy=policy, sess=policy.sess)",
"def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})",
"def build(self):\n if ((self.allowMethods is None or len(self.allowMethods) == 0) and\n (self.denyMethods is None or len(self.denyMethods) == 0)):\n raise NameError(\"No statements defined for the policy\")\n\n policy = {\n 'principalId': self.principalId,\n 'policyDocument': {\n 'Version': self.version,\n 'Statement': []\n }\n }\n\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Allow\", self.allowMethods))\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Deny\", self.denyMethods))\n\n return policy",
"def set_policy(self, name, policy):\n client = self.connect(VAULT_TOKEN)\n client.set_policy(name, policy)",
"def _wrap_policy(policy_doc):\n return {\"IAMPolicy\": policy_doc}",
"def permits(identity, obj, permission):\n return False",
"def policy(agent):",
"def _get_fake_policy_document():\n return {\n \"Version\": \"2000-01-01\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"fakeservice:*\"],\n \"Resource\": [\n \"arn:aws:fakeservice:us-east-1:861229788715:foo:bar*\",\n \"arn:aws:fakeservice:us-east-1:861229788715:foo:bar/baz*\",\n ],\n }\n ],\n }",
"def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy",
"def test_create_hyperflex_ext_fc_storage_policy(self):\n pass",
"def test_create_dispatch_policy(self):\n pass",
"def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])",
"def at_object_creation(self):\n self.locks.add(\"view:perm(Immortals)\")",
"def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)",
"def policy(cls):\n return relationship.many_to_one(cls, 'policy')",
"def rbac_policy_create(request, **kwargs):\n body = {'rbac_policy': kwargs}\n rbac_policy = neutronclient(request).create_rbac_policy(\n body=body).get('rbac_policy')\n return RBACPolicy(rbac_policy)",
"def put_container_policy(ContainerName=None, Policy=None):\n pass"
] | [
"0.6120522",
"0.5991837",
"0.5977101",
"0.58240104",
"0.5799434",
"0.5770939",
"0.5763552",
"0.5759775",
"0.5733948",
"0.56609756",
"0.5627171",
"0.56185126",
"0.56121063",
"0.56093913",
"0.55592847",
"0.5555371",
"0.55546874",
"0.55378443",
"0.5524767",
"0.5518799",
"0.55019885",
"0.55018663",
"0.5494756",
"0.54890585",
"0.5470815",
"0.5449747",
"0.5435639",
"0.54268414",
"0.5422399",
"0.54163563"
] | 0.6184479 | 0 |
Adding the semicircles so that i can use the tangent thing | def addSemicircles(self):
#axes setup
self.setup_axes(animate=False)
self.axes.move_to(ORIGIN)
self.axes.shift(LEFT*5)
#equations of circle
global equation_upper, equation_lower
equation_upper = lambda x : math.sqrt((self.x_max)**2 - x**2)
equation_lower = lambda x : -1*math.sqrt((self.x_max)**2 - x**2)
#get_graph for upper and lower semicircle
global graph_upper, graph_lower
graph_upper = self.get_graph(equation_upper, color=BLUE)
graph_lower = self.get_graph(equation_lower, color=BLUE)
#write graphs
self.add(graph_upper,graph_lower) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drawTangents(self):\n #bigger tangent\n global big_tangent\n big_tangent = always_redraw(\n lambda : self.get_secant_slope_group(self.x_max * np.cos(theta.get_value()*DEGREES), graph_upper,\n dx=0.001, secant_line_color=RED, secant_line_length=6)\n )\n\n self.play(Write(big_tangent))\n self.wait(0.5)\n self.play(theta.animate.set_value(30))\n self.wait(0.5)\n self.play(theta.animate.set_value(45))\n self.wait(0.5)\n \n #smaller tangent\n global small_tangent\n small_tangent = always_redraw(\n lambda : Line(radius_ang_end_dot.get_center(), self.coords_to_point(\n math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2),\n 0),\n )\n )\n global small_tangent_end_dot\n small_tangent_end_dot = always_redraw(\n lambda : Dot(self.coords_to_point(math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2),\n 0))\n )\n\n global angled_rad\n angled_rad = always_redraw(\n lambda : Line(radius_ang.points[-1], radius_ang.points[0])\n )\n \n #right angle\n global right_angle\n right_angle = RightAngle(angled_rad, small_tangent, length=0.4, stroke_color=YELLOW) \n \n self.play(Write(right_angle))\n self.wait(0.5)\n self.play(FadeOut(right_angle))\n self.play(Write(small_tangent), Write(small_tangent_end_dot), FadeOut(big_tangent))\n self.play(theta.animate.set_value(60))\n\n #extending horiz dot\n global dot_circ_copy\n dot_circ_copy = dot_circ.copy()\n\n #dropping perpendicular\n global dropped_dot\n dropped_dot = always_redraw(\n lambda : Dot(self.coords_to_point(self.x_max * np.cos(theta.get_value()*DEGREES), 0))\n )\n global dropped_perp\n dropped_perp = always_redraw(\n lambda : Line(radius_ang_end_dot.get_center(), dropped_dot.get_center())\n )\n\n #extended horizontal radius\n global radius_horiz_ext\n radius_horiz_ext = always_redraw(\n lambda : Line(dot_center.get_center(), self.coords_to_point(\n math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2), 0\n ))\n )\n\n self.play(Write(radius_horiz_ext), Write(dot_circ_copy), Write(dropped_dot), Write(dropped_perp))",
"def segsFromTangents(self,svgCommandsList, refNode):\n sourcepoints, svgCommandsList = toArray(svgCommandsList)\n\n d = D(sourcepoints[0],sourcepoints[-1])\n x,y,wTot,hTot = computeBox(sourcepoints)\n aR = min(wTot/hTot, hTot/wTot)\n maxDim = max(wTot, hTot)\n isClosing = aR*0.2 > d/maxDim\n debug('isClosing ', isClosing, maxDim, d)\n if d==0:\n # then we remove the last point to avoid null distance\n # in other calculations\n sourcepoints = sourcepoints[:-1]\n svgCommandsList = svgCommandsList[:-1]\n\n if len(sourcepoints) < 4:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=isClosing)\n \n tangents = buildTangents(sourcepoints, isClosing=isClosing)\n\n # global quantities :\n\n # Check if circle -----------------------\n if isClosing:\n if len(sourcepoints)<9:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=True)\n isCircle, res = self.checkForCircle( sourcepoints, tangents) \n debug(\"Is Circle = \", isCircle )\n if isCircle:\n x,y,rmin, rmax,angle = res\n debug(\"Circle -> \", rmin, rmax,angle )\n if rmin/rmax>0.7:\n circ = Circle((x,y),0.5*(rmin+rmax), refNode )\n else:\n circ = Circle((x,y),rmin, refNode, rmax=rmax, angle=angle)\n circ.points = sourcepoints\n return circ\n # -----------------------\n \n\n\n # cluster points by angle of their tangents -------------\n tgSegs = [ Segment.fromCenterAndDir( p, t ) for (p,t) in zip(sourcepoints,tangents) ]\n clustersInd = clusterAngles( [s.angle for s in tgSegs] )\n clustersInd.sort( )\n debug(\"build envelop cluster: \", clustersInd)\n\n # build Segments from clusters \n newSegs = []\n for imin, imax in clustersInd:\n if imin+1< imax: # consider clusters with more than 3 points\n seg = fitSingleSegment(sourcepoints[imin:imax+1])\n elif imin+1==imax: # 2 point path : we build a segment\n seg = Segment.from2Points(sourcepoints[imin], sourcepoints[imax] , sourcepoints[imin:imax+1])\n else:\n seg = Path( sourcepoints[imin:imax+1] )\n seg.sourcepoints = sourcepoints\n newSegs.append( seg )\n resetPrevNextSegment( newSegs )\n debug(newSegs)\n # -----------------------\n\n\n # -----------------------\n # Merge consecutive Path objects \n updatedSegs=[]\n def toMerge(p):\n l=[p]\n setattr(p, 'merged', True)\n if p.next and not p.next.isSegment():\n l += toMerge(p.next)\n return l\n \n for i,seg in enumerate(newSegs[:-1]):\n if seg.isSegment():\n updatedSegs.append( seg) \n continue\n if hasattr(seg,'merged'): continue\n mergeList = toMerge(seg)\n debug('merging ', mergeList)\n p = Path(numpy.concatenate([ p.points for p in mergeList]) )\n debug('merged == ', p.points)\n updatedSegs.append(p)\n\n if not hasattr(newSegs[-1],'merged'): updatedSegs.append( newSegs[-1]) \n debug(\"merged path\", updatedSegs)\n newSegs = resetPrevNextSegment( updatedSegs )\n\n\n # Extend segments -----------------------------------\n if self.options.segExtensionEnable:\n newSegs = SegmentExtender.extendSegments( newSegs, self.options.segExtensionDtoSeg, self.options.segExtensionQual )\n debug(\"extended segs\", newSegs)\n newSegs = resetPrevNextSegment( newSegs )\n debug(\"extended segs\", newSegs)\n\n # ----------------------------------------\n \n\n # ---------------------------------------\n # merge consecutive segments with close angle\n updatedSegs=[]\n\n if self.options.segAngleMergeEnable:\n newSegs = mergeConsecutiveCloseAngles( newSegs , mangle=0.2 )\n newSegs=resetPrevNextSegment(newSegs)\n debug(' __ 2nd angle merge')\n newSegs = mergeConsecutiveCloseAngles( newSegs, mangle=0.35 ) # 2nd pass\n newSegs=resetPrevNextSegment(newSegs)\n debug('after merge ', len(newSegs), newSegs)\n # Check if first and last also have close angles.\n if isClosing and len(newSegs)>2 :\n first ,last = newSegs[0], newSegs[-1]\n if first.isSegment() and last.isSegment():\n if closeAngleAbs( first.angle, last.angle) < 0.1:\n # force merge\n points= numpy.concatenate( [ last.points, first.points] )\n newseg = fitSingleSegment(points)\n newseg.next = first.next\n last.prev.next = None\n newSegs[0]=newseg\n newSegs.pop()\n\n # -----------------------------------------------------\n # remove negligible Path/Segments between 2 large Segments\n if self.options.segRemoveSmallEdge:\n self.removeSmallEdge(newSegs , wTot, hTot)\n newSegs=resetPrevNextSegment(newSegs)\n\n debug('after remove small ', len(newSegs),newSegs)\n # -----------------------------------------------------\n\n # -----------------------------------------------------\n # Extend segments to their intersections\n for p in newSegs:\n if p.isSegment() and p.next:\n p.setIntersectWithNext()\n # -----------------------------------------------------\n \n return PathGroup(newSegs, svgCommandsList, refNode, isClosing)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def oncircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n # This beats normalizing incircle for all sizes, even though that\n # should be the superior algorithm for compiled code.\n theta = 2.*pi * random(size + (1,))\n return concatenate((cos(theta), sin(theta)), axis=-1)",
"def tangent_circle(dist, radius):\n if dist < 3 * radius:\n if dist >= radius:\n return math.asin(radius/float(dist))\n return math.radians(100)\n return None",
"def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b",
"def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)",
"def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r",
"def _circleCircleTangentsXY(c1,c2):\n\n a = c1[1][0]\n b = c2[1][0]\n if a>b:\n bigIsOne=True\n bigC = c1\n smallC = c2\n else:\n bigIsOne=False\n bigC = c2\n smallC = c1\n ## Consdier the triangle created by the center of the small\n ## circle, the center of the large circle, and the point at the 90\n ## degree intersection of the line from the center of the small\n ## circle to the radian of the tangent point on the large circle.\n ## This is a right triangle with one leg of length d (distance of\n ## centers), one leg of length bigR-smallR, and one leg of unknown\n ## length, beta. theta is the angle formed by d and beta, which is\n ## also the angle of one of the the tangent lines, the other being\n ## -theta.\n ## \n ## we will calulate theta as follows:\n ## beta^2 - (r2-r1)^2 = d^2\n ## beta = sqrt( d^2 - (r2-r1)^2 )\n ## theta = atan ((r2-r1)/beta)\n \n r1 = smallC[1][0]\n r2 = bigC[1][0]\n\n d = dist(c1[0],c2[0])\n mpd = mpm.mpf(d)\n dr = r2-r1\n mpdr = mpm.mpf(dr)\n\n if d <= dr: #centers too close\n raise ValueError('circleCircleTangentsXY: centers of circles too close')\n \n beta = mpm.sqrt( mpd*mpd - mpdr*mpdr)\n theta = float(mpm.atan2(dr,beta))\n\n ## now, figure out the angle created by the center of the large\n ## circle with respect to the small circle\n dd = sub(bigC[0],smallC[0])\n phi = atan2(dd[1],dd[0])\n\n ## the two lines have angle phi+theta, and phi-theta. The\n ## intersection point of these lines is at the point on the circle\n ## phi+theta+90', and phi-theta-90'\n gamma1 = phi+theta+pi/2\n gamma2 = phi-theta-pi/2\n n1 = point(cos(gamma1),sin(gamma1))\n n2 = point(cos(gamma2),sin(gamma2))\n p1 = add(scale3(n1,r1),smallC[0])\n p2 = add(scale3(n1,r2),bigC[0])\n p3 = add(scale3(n2,r1),smallC[0])\n p4 = add(scale3(n2,r2),bigC[0])\n\n l1 = l2 = []\n if bigIsOne:\n l1=line(p2,p1)\n l2=line(p4,p3)\n else:\n l1 = line(p1,p2)\n l2 = line(p3,p4)\n\n return [l1,l2]",
"def circle_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n cb = subtract_vectors(b, c)\n ba = subtract_vectors(a, b)\n ca = subtract_vectors(a, c)\n ac = subtract_vectors(c, a)\n bc = subtract_vectors(c, b)\n normal = normalize_vector(cross_vectors(ab, ac))\n d = 2 * length_vector_sqrd(cross_vectors(ba, cb))\n A = length_vector_sqrd(cb) * dot_vectors(ba, ca) / d\n B = length_vector_sqrd(ca) * dot_vectors(ab, cb) / d\n C = length_vector_sqrd(ba) * dot_vectors(ac, bc) / d\n Aa = scale_vector(a, A)\n Bb = scale_vector(b, B)\n Cc = scale_vector(c, C)\n center = add_vectorlist([Aa, Bb, Cc])\n radius = distance_point_point(center, a)\n return center, radius, normal",
"def semicircle_intersection(coeffs):\n a, b = coeffs\n disc = 1 - 4 * b * (a + b)\n if disc < 0:\n raise ValueError(\n \"Discriminant < 0, there is no intersection with the semicircle.\"\n )\n else:\n disc = np.sqrt(disc)\n x = (1 - 2 * a * b + np.array([1, -1]) * disc) / (2 * (a ** 2 + 1))\n return x + 1j * np.sqrt(x - x ** 2)",
"def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos",
"def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)",
"def checkForCircle(self, points, tangents):\n if len(points)<10:\n return False, 0\n\n if all(points[0]==points[-1]): # last exactly equals the first.\n # Ignore last point for this check\n points = points[:-1]\n tangents = tangents[:-1]\n #print 'Removed last ', points\n xmin,ymin, w, h = computeBox( points)\n diag2=(w*w+h*h)\n \n diag = sqrt(diag2)*0.5\n norms = numpy.sqrt(numpy.sum( tangents**2, 1 ))\n\n angles = numpy.arctan2( tangents[:,1], tangents[:,0] ) \n #debug( 'angle = ', repr(angles))\n N = len(angles)\n \n deltas = points[1:] - points[:-1] \n deltasD = numpy.concatenate([ [D(points[0],points[-1])/diag], numpy.sqrt(numpy.sum( deltas**2, 1 )) / diag] )\n\n # locate and avoid the point when swicthing\n # from -pi to +pi. The point is around the minimum\n imin = numpy.argmin(angles)\n debug(' imin ',imin)\n angles = numpy.roll(angles, -imin)\n deltasD = numpy.roll(deltasD, -imin)\n n=int(N*0.1)\n # avoid fluctuations by removing points around the min\n angles=angles[n:-n]\n deltasD=deltasD[n:-n]\n deltasD = deltasD.cumsum()\n N = len(angles)\n\n # smooth angles to avoid artificial bumps\n angles = smoothArray(angles, n=max(int(N*0.03),2) )\n\n deltaA = angles[1:] - angles[:-1]\n deltasDD = (deltasD[1:] -deltasD[:-1])\n deltasDD[numpy.where(deltasDD==0.)] = 1e-5*deltasD[0]\n dAdD = abs(deltaA/deltasDD)\n belowT, count = True,0\n for v in dAdD:\n if v>6 and belowT:\n count+=1\n belowT = False\n belowT= (v<6)\n\n self.temp = (deltasD,angles, tangents, dAdD )\n fracStraight = numpy.sum(deltasDD[numpy.where(dAdD<0.3)])/(deltasD[-1]-deltasD[0])\n curveLength = deltasD[-1]/3.14\n #print \"SSS \",count , fracStraight\n if curveLength> 1.4 or fracStraight>0.4 or count > 6:\n isCircle =False\n else: \n isCircle= (count < 4 and fracStraight<=0.3) or \\\n (fracStraight<=0.1 and count<5)\n\n if not isCircle:\n return False, 0\n \n # It's a circle !\n radius = points - numpy.array([xmin+w*0.5,ymin+h*0.5])\n radius_n = numpy.sqrt(numpy.sum( radius**2, 1 )) # normalize\n\n mini = numpy.argmin(radius_n) \n rmin = radius_n[mini]\n maxi = numpy.argmax(radius_n) \n rmax = radius_n[maxi]\n # void points around maxi and mini to make sure the 2nd max is found\n # on the \"other\" side\n n = len(radius_n)\n radius_n[maxi]=0 \n radius_n[mini]=0 \n for i in range(1,n/8+1):\n radius_n[(maxi+i)%n]=0\n radius_n[(maxi-i)%n]=0\n radius_n[(mini+i)%n]=0\n radius_n[(mini-i)%n]=0\n radius_n_2 = [ r for r in radius_n if r>0]\n rmax_2 = max(radius_n_2)\n rmin_2 = min(radius_n_2) # not good !!\n anglemax = numpy.arccos( radius[maxi][0]/rmax)*numpy.sign(radius[maxi][1])\n return True, (xmin+w*0.5,ymin+h*0.5, 0.5*(rmin+rmin_2), 0.5*(rmax+rmax_2), anglemax)",
"def draw_two_circles(t):\n # large circle\n circle(t, 100)\n move(t, 100, 0)\n\n # another large circle\n circle(t, 100)",
"def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)",
"def incircle(self, a, b, c):\n m11, m12 = a.x - self.x, a.y - self.y\n m13 = m11 * m11 + m12 * m12\n m21, m22 = b.x - self.x, b.y - self.y\n m23 = m21 * m21 + m22 * m22\n m31, m32 = c.x - self.x, c.y - self.y\n m33 = m31 * m31 + m32 * m32\n det1 = m11 * (m22 * m33 - m23 * m32)\n det2 = m12 * (m21 * m33 - m23 * m31)\n det3 = m13 * (m21 * m32 - m22 * m31)\n return near(det1 - det2 + det3, 0)",
"def relationPointsEllipse(points, tFociStr):\r\n x1, y1, x2, y2, s = tFociStr\r\n f1 = np.array([x1, y1])\r\n f2 = np.array([x2, y2])\r\n resid = ( np.sum((points-f1)**2, axis=1)**.5 +\r\n np.sum((points-f2)**2, axis=1)**.5 - s )\r\n return np.sign(resid)",
"def generate_circle(R,center,N=100,t0=0.0,t1=2.0*np.pi):\r\n theta = np.linspace(t0,t0+t1,N)\r\n y = R*np.sin(theta) + center[1]\r\n x = R*np.cos(theta) + center[0]\r\n return x,y",
"def HollowCircle(self,center=(0,0),inner_radius=1.0,outer_radius=2.,element_type='tri',isotropic=True,nrad=5,ncirc=10):\n\n # FOR SAFETY, RESET THE CLASS\n self.__reset__()\n\n if np.allclose(inner_radius,0):\n raise ValueError('inner_radius cannot be zero')\n\n t = np.linspace(0,2*np.pi,ncirc+1)\n if isotropic is True:\n radii = np.linspace(inner_radius,outer_radius,nrad+1)\n else:\n base = 3\n radii = np.zeros(nrad+1,dtype=np.float64)\n mm = np.linspace(np.power(inner_radius,1./base),np.power(outer_radius,1./base),nrad+1)\n for i in range(0,nrad+1):\n radii[i] = mm[i]**base\n\n\n # base = 3\n # mm = np.linspace(np.power(inner_radius,1./base),np.power(2.,1./base),nrad+1)\n # mm = np.append(mm,np.linspace(2,outer_radius,nrad+1))\n # radii = np.zeros(mm.shape[0],dtype=np.float64)\n # for i in range(0,mm.shape[0]):\n # radii[i] = mm[i]**base\n\n\n # dd = np.logspace(inner_radius,outer_radius,nrad+1,base=2)/2**np.linspace(inner_radius,outer_radius,nrad+1)\n # print dd*np.linspace(inner_radius,outer_radius,nrad+1)\n # print np.logspace(0,1.5,nrad+1,base=2)\n\n\n xy = np.zeros((radii.shape[0]*t.shape[0],2),dtype=np.float64)\n for i in range(0,radii.shape[0]):\n xy[i*t.shape[0]:(i+1)*t.shape[0],0] = radii[i]*np.cos(t)\n xy[i*t.shape[0]:(i+1)*t.shape[0],1] = radii[i]*np.sin(t)\n\n\n # REMOVE DUPLICATES GENERATED BY SIN/COS OF LINSPACE\n xy = xy[np.setdiff1d( np.arange(xy.shape[0]) , np.linspace(t.shape[0]-1,xy.shape[0]-1,radii.shape[0]).astype(int) ),:]\n\n connec = np.zeros((1,4),dtype=np.int64)\n\n for j in range(1,radii.shape[0]):\n for i in range((j-1)*(t.shape[0]-1),j*(t.shape[0]-1)):\n if i<j*(t.shape[0]-1)-1:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,t.shape[0]+i,i+1 ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,t.shape[0]+i,i+1),)\n else:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1) ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1)),)\n\n connec = connec[1:,:]\n # connec = np.asarray(connec[1:])\n\n\n if element_type == 'tri':\n connec_tri = np.zeros((2*connec.shape[0],3),dtype=np.int64)\n for i in range(connec.shape[0]):\n connec_tri[2*i,:] = np.array([connec[i,0],connec[i,1],connec[i,3]])\n connec_tri[2*i+1,:] = np.array([connec[i,2],connec[i,3],connec[i,1]])\n\n self.elements = connec_tri\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n # OBTAIN MESH EDGES\n self.GetBoundaryEdgesTri()\n\n elif element_type == 'quad':\n self.elements = connec\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n self.GetBoundaryEdgesQuad()\n\n # ASSIGN NODAL COORDINATES\n self.points = xy\n # IF CENTER IS DIFFERENT FROM (0,0)\n self.points[:,0] += center[0]\n self.points[:,1] += center[1]\n # ASSIGN PROPERTIES\n self.nnode = self.points.shape[0]",
"def draw_circles_hough(image, circles):\n try:\n # Convert the circle parameters a, b and r to integers. \n detected_circles = np.uint16(np.around(circles)) \n \n for pt in detected_circles[0, :]: \n a, b, r = pt[0], pt[1], pt[2] \n \n # Draw the circumference of the circle. \n image = cv2.circle(image, (a, b), r, (0, 255, 0), 2) \n \n # Draw a small circle (of radius 1) to show the center. \n image_final = cv2.circle(image, (a, b), 1, (0, 0, 255), 3) \n \n return image_final\n except:\n print('[ERROR]: could not draw image')\n return None",
"def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3",
"def plotAtoms(x,y,radius=0.0):\n if radius:\n theta = np.linspace(0,2*np.pi,36,endpoint=True)\n xCircle = radius*np.cos(theta)\n yCircle = radius*np.sin(theta)\n for i in range(len(x)):\n xCircle += x[i]\n yCircle += y[i]\n plt.plot(xCircle,yCircle)\n xCircle -= x[i]\n yCircle -= y[i]\n else:\n plt.plot(x,y,'o')",
"def great_circle_distance(theta1,phi1,theta2,phi2):\n alt1 = np.pi/2.-theta1\n alt2 = np.pi/2.-theta2\n return np.arccos(np.sin(alt1)*np.sin(alt2)+np.cos(alt1)*np.cos(alt2)*np.cos(phi1-phi2))",
"def cylinder_circles(node_a, node_b, radius, element_number=10):\n\n vector = (np.array(node_a) - np.array(node_b)).tolist()\n pts_a = circle(node_a, vector, radius, element_number)\n pts_b = circle(node_b, vector, radius, element_number)\n\n return pts_a, pts_b",
"def test_calc_circle(self):\n t = AioBaseTurtle()\n steps, step_len, rot_step = t._calc_circle(100, extent=180)\n self.assertEqual(steps, 14)\n self.assertAlmostEqual(rot_step, 180.0 / 14.0)\n self.assertAlmostEqual(step_len, 22.3928952207)",
"def circular(m, n, radius=1.0, center = (0.0, 0.0)):\n\n\n\n from math import pi, cos, sin\n\n radius = float(radius) #Ensure floating point format\n\n #Dictionary of vertex objects and list of points\n vertices = {}\n points = [[0.0, 0.0]] #Center point\n vertices[0, 0] = 0\n\n for i in range(n):\n theta = 2*i*pi/n\n x = cos(theta)\n y = sin(theta)\n for j in range(1,m+1):\n delta = j*radius/m\n vertices[i,j] = len(points)\n points.append([delta*x, delta*y])\n\n #Construct 2 triangles per element\n elements = []\n for i in range(n):\n for j in range(1,m):\n\n i1 = (i + 1) % n #Wrap around\n\n v1 = vertices[i,j+1]\n v2 = vertices[i,j]\n v3 = vertices[i1,j+1]\n v4 = vertices[i1,j]\n\n elements.append([v4,v2,v3]) #Lower\n elements.append([v1,v3,v2]) #Upper\n\n\n #Do the center\n v1 = vertices[0,0]\n for i in range(n):\n i1 = (i + 1) % n #Wrap around\n v2 = vertices[i,1]\n v3 = vertices[i1,1]\n\n elements.append([v1,v2,v3]) #center\n\n return points, elements",
"def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]",
"def generate_circle(point_1, point_2, point_3):\r\n # Need to check if points are collinear, if so a circle can't exist\r\n line_1 = perpendicular_bisector(point_1, point_2)\r\n line_2 = perpendicular_bisector(point_2, point_3)\r\n # Find intersection of the two lines\r\n return find_intersection(line_1, line_2)"
] | [
"0.6511479",
"0.63582504",
"0.62292373",
"0.62292373",
"0.60392404",
"0.6029207",
"0.59928757",
"0.5986355",
"0.5939926",
"0.5861232",
"0.5858033",
"0.58485687",
"0.5816202",
"0.58137244",
"0.57763934",
"0.57530665",
"0.5727165",
"0.57132226",
"0.57070947",
"0.56994355",
"0.56611073",
"0.56394374",
"0.56313705",
"0.5622263",
"0.5605388",
"0.559594",
"0.5589227",
"0.5581898",
"0.55544865",
"0.5553504"
] | 0.778817 | 0 |
Drawing the tangent to the circle at the point defined by theta from above | def drawTangents(self):
#bigger tangent
global big_tangent
big_tangent = always_redraw(
lambda : self.get_secant_slope_group(self.x_max * np.cos(theta.get_value()*DEGREES), graph_upper,
dx=0.001, secant_line_color=RED, secant_line_length=6)
)
self.play(Write(big_tangent))
self.wait(0.5)
self.play(theta.animate.set_value(30))
self.wait(0.5)
self.play(theta.animate.set_value(45))
self.wait(0.5)
#smaller tangent
global small_tangent
small_tangent = always_redraw(
lambda : Line(radius_ang_end_dot.get_center(), self.coords_to_point(
math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2),
0),
)
)
global small_tangent_end_dot
small_tangent_end_dot = always_redraw(
lambda : Dot(self.coords_to_point(math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2),
0))
)
global angled_rad
angled_rad = always_redraw(
lambda : Line(radius_ang.points[-1], radius_ang.points[0])
)
#right angle
global right_angle
right_angle = RightAngle(angled_rad, small_tangent, length=0.4, stroke_color=YELLOW)
self.play(Write(right_angle))
self.wait(0.5)
self.play(FadeOut(right_angle))
self.play(Write(small_tangent), Write(small_tangent_end_dot), FadeOut(big_tangent))
self.play(theta.animate.set_value(60))
#extending horiz dot
global dot_circ_copy
dot_circ_copy = dot_circ.copy()
#dropping perpendicular
global dropped_dot
dropped_dot = always_redraw(
lambda : Dot(self.coords_to_point(self.x_max * np.cos(theta.get_value()*DEGREES), 0))
)
global dropped_perp
dropped_perp = always_redraw(
lambda : Line(radius_ang_end_dot.get_center(), dropped_dot.get_center())
)
#extended horizontal radius
global radius_horiz_ext
radius_horiz_ext = always_redraw(
lambda : Line(dot_center.get_center(), self.coords_to_point(
math.sqrt((self.x_max)**2 + (self.x_max*np.tan(theta.get_value()*DEGREES))**2), 0
))
)
self.play(Write(radius_horiz_ext), Write(dot_circ_copy), Write(dropped_dot), Write(dropped_perp)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tangent_circle(dist, radius):\n if dist < 3 * radius:\n if dist >= radius:\n return math.asin(radius/float(dist))\n return math.radians(100)\n return None",
"def tangent(self, p):\n p = array(p, float)\n v = (p - self.o)\n v /= norm(v)\n b = self.o + ((cross(v, self.N) - v) / 3)*self.r\n mb = _mirror_(self.o, p, b) \n mbb = mb - b\n return mbb/norm(mbb)",
"def animate_tangent_on_curve(self):\n\n mirror_line_work = copy.deepcopy(self.mirror_line)\n\n # create a curved mirror by an arc\n arc_1 = Arc(\n start_angle = PI,\n angle = -PI,\n radius = self.mirror_width / 2,\n color = WHITE,\n arc_center = self.mirror_origin\n )\n\n # create source tangent (on the mirror)\n tangent_color = GREEN\n tangent_stroke_width = 10\n tangent_length_coeff = 0.9\n\n # Add one tangent vector with text \"Tangent\"\n text_tangent_0 = TextMobject(r\"Tangent\").scale(1.4).move_to(-2.0 * RIGHT + -1.0 * UP)\n arrow_one_tan = Arrow(self.mirror_origin,\n self.mirror_origin + 2.0 * RIGHT,\n stroke_width = tangent_stroke_width * 8, color = tangent_color, buff=0)\n self.play(FadeIn(text_tangent_0),\n ShowCreation(arrow_one_tan))\n self.wait(self.wait_time)\n self.play(FadeOut(text_tangent_0),\n FadeOut(arrow_one_tan))\n self.wait(self.wait_time)\n\n # Many tangent vectors\n tangent_arrow_src = []\n for x in range(-3, 4, 1):\n arrow_s = Arrow(self.mirror_origin + x * RIGHT,\n self.mirror_origin + x * RIGHT + tangent_length_coeff * RIGHT, # length adjustment to a bit shorter\n stroke_width = tangent_stroke_width, color = tangent_color, buff=0)\n tangent_arrow_src.append(arrow_s)\n\n tangent_arrow_src_work = copy.deepcopy(tangent_arrow_src)\n tangent_arrow_src_org = copy.deepcopy(tangent_arrow_src)\n\n # destination tangents (on the arc)\n radius = self.mirror_width / 2\n tangent_arrow_dst = []\n for dst_x in range(-3, 4, 1):\n dst_y = math.sqrt(radius**2 - dst_x**2)\n dst_o = dst_x * RIGHT + dst_y * UP + self.mirror_origin # destination normal vector origin (start)\n dst_normal = (dst_o - self.mirror_origin)\n norm = np.linalg.norm(dst_normal) # just radius, though\n dst_normal /= norm\n tan_arrow_dst_v = np.array([dst_normal[1], -dst_normal[0], dst_normal[2]]) # rotate pi/2\n tan_arrow_dst_v = tangent_length_coeff * tan_arrow_dst_v # length adjustment to a bit shorter\n arrow_d = Arrow(dst_o, dst_o + tan_arrow_dst_v, stroke_width = tangent_stroke_width, color=tangent_color, buff=0)\n tangent_arrow_dst.append(arrow_d)\n\n tangent_src_creation = [ShowCreation(mobj) for mobj in tangent_arrow_src_work]\n self.add(mirror_line_work)\n self.play(FadeOut(self.mirror_line), *tangent_src_creation)\n\n text_tangent_1 = TextMobject(r\"Tangent is also different everywhere on a curve.\").move_to(0.0 * RIGHT + 3.0 * UP)\n text_tangent_2 = TextMobject(r\"But the same on a plane.\").move_to(0.0 * RIGHT + 3.0 * UP)\n self.play(FadeIn(text_tangent_1))\n\n tangent_transform = [ReplacementTransform(m1, m2) for (m1, m2) in zip(tangent_arrow_src_work, tangent_arrow_dst)]\n self.play(ReplacementTransform(mirror_line_work, arc_1), *tangent_transform)\n self.wait(self.wait_time)\n\n mirror_line_dst = copy.deepcopy(self.mirror_line)\n tangent_transform_reverse = [ReplacementTransform(m1, m2) for (m1, m2) in zip(tangent_arrow_dst, tangent_arrow_src_org)]\n self.play(FadeOut(text_tangent_1), FadeIn(text_tangent_2))\n self.play(ReplacementTransform(arc_1, mirror_line_dst), *tangent_transform_reverse)\n self.wait(self.wait_time)\n\n fadeout_normals = [FadeOut(mobj) for mobj in tangent_arrow_src_org]\n self.add(self.mirror_line)\n self.play(FadeOut(mirror_line_dst), *fadeout_normals)\n self.play(FadeOut(text_tangent_2))\n self.wait(self.wait_time)",
"def theta(self):\n return atan2(self.y, self.x)",
"def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90",
"def theta(self):\n return float(np.arctan2(self.y, self.x))",
"def position(t):\n return c + tangent_vec * 7 * t ** 2",
"def tangent(self, param, diff=0, xyz=False):\n return self.diff(param, diff=diff+1, xyz=xyz)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def theta(a, b):\n \n \n def norm_vec(x):\n norm_out = sqrt(dot(x, x))\n return norm_out\n \n theta = acos(dot(a, b) / (norm_vec(a) * norm_vec(b))) * 180 / pi\n \n print theta",
"def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)",
"def create_left_right_tangent(self):\n self.arc_incident_tan = Arc(\n start_angle = PI/2 + self.incident_angle,\n angle = PI/2 - self.incident_angle,\n radius = self.arc_incident_tan_radius,\n color = self.arc_incident_color,\n arc_center = self.mirror_origin\n )\n\n theta_in_tan_pos_offset = -2.0 * RIGHT + 0.8 * UP\n self.tex_theta_in_tan = TexMobject(r\"90^{\\circ}\",\n r\"-\",\n r\"\\theta_{i}\",\n color=self.tex_theta_in_color).\\\n move_to(self.mirror_origin + theta_in_tan_pos_offset)\n\n self.arc_reflected_tan = Arc(\n start_angle = 0,\n angle = PI/2 - self.reflected_angle,\n radius = self.arc_reflected_tan_radius,\n color = self.arc_reflected_color,\n arc_center = self.mirror_origin\n )\n\n theta_out_tan_pos_offset = 2.0 * RIGHT + 0.8 * UP\n self.tex_theta_ref_tan = TexMobject(r\"90^{\\circ}\",\n r\"-\",\n r\"\\theta_{r}\",\n color=self.tex_theta_ref_color).\\\n move_to(self.mirror_origin + theta_out_tan_pos_offset)",
"def circular_movement(radius = 150, theta=None):\n y = radius * np.sin(theta)\n if theta == 0:\n x = radius\n elif np.pi*0.99 < theta < np.pi*1.01:\n x = -radius\n else:\n x = y/np.tan(theta)\n return x, y",
"def tangent_points_to_circle_xy(circle, point):\n m, r = circle[0], circle[1]\n cx, cy = m[0], m[1]\n px = point[0] - cx\n py = point[1] - cy\n\n a1 = r*(px*r - py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2)\n a2 = r*(px*r + py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2)\n\n b1 = (r**2 - px*a1)/py\n b2 = (r**2 - px*a2)/py\n\n p1 = (a1 + cx, b1 + cy, 0)\n p2 = (a2 + cx, b2 + cy, 0)\n return p1, p2",
"def animate_tangent_angles_equal(self):\n\n # \\theta_i and \\theta_r\n self.play(FadeIn(self.tex_derive_ti_tr[0]), FadeIn(self.tex_derive_ti_tr[2]))\n self.wait(self.wait_time)\n\n # mirror reflection\n self.play(FadeIn(self.text_mirror), FadeIn(self.tex_derive_ti_tr[1]))\n self.wait(self.wait_time)\n\n self.play(FadeIn(self.tex_derive_ti_tr[3]), FadeIn(self.tex_derive_ti_tr[4]))\n self.wait(self.wait_time)\n\n # tangent\n tex_derive_tan_tin_tan_tr_work = copy.deepcopy(self.tex_derive_tan_tin_tan_tr)\n theta_work_1 = copy.deepcopy(self.theta_0)\n theta_work_1.move_to(self.tex_derive_tan_tin_tan_tr[2].get_center())\n theta_work_2 = copy.deepcopy(self.theta_0)\n theta_work_2.move_to(self.tex_derive_tan_tin_tan_tr[6].get_center())\n theta_i_org = copy.deepcopy(tex_derive_tan_tin_tan_tr_work[2])\n theta_r_org = copy.deepcopy(tex_derive_tan_tin_tan_tr_work[6])\n\n # Show 90 - theta_i , 90 - theta_r\n self.play(FadeIn(tex_derive_tan_tin_tan_tr_work[0:2]),\n FadeIn(tex_derive_tan_tin_tan_tr_work[4:6]),\n # Make final memory destination of ReplacementTransform\n # tex_derive_tan_tin_tan_tr_work, thus here we start with\n # the copies (theta_i_org and theta_r_org).\n FadeIn(theta_i_org),\n FadeIn(theta_r_org))\n self.wait(self.wait_time)\n\n # transform to theta_i, theta_r = theta\n self.play(ReplacementTransform(theta_i_org, theta_work_1),\n ReplacementTransform(theta_r_org, theta_work_2))\n self.wait(self.wait_time)\n\n # transform back to theta_i, theta_r\n self.play(ReplacementTransform(theta_work_1, tex_derive_tan_tin_tan_tr_work[2]),\n ReplacementTransform(theta_work_2, tex_derive_tan_tin_tan_tr_work[6]))\n self.wait(self.wait_time)\n\n # show = thera'\n self.play(FadeIn(tex_derive_tan_tin_tan_tr_work[3]),\n FadeIn(tex_derive_tan_tin_tan_tr_work[7:9]))\n self.wait(self.wait_time)\n\n # Show equal anges: theta_0\n theta_i_equal = copy.deepcopy(self.tex_derive_ti_tr[4])\n theta_r_equal = copy.deepcopy(self.tex_derive_ti_tr[4])\n self.add(theta_i_equal, theta_r_equal)\n self.play(ApplyMethod(theta_i_equal.move_to, self.tex_theta_in.get_center()),\n FadeOut(self.tex_theta_in),\n ApplyMethod(theta_r_equal.move_to, self.tex_theta_ref.get_center()),\n FadeOut(self.tex_theta_ref))\n self.wait(self.wait_time)\n\n # Show equal anges: theta_0'\n theta_i_tan_equal = copy.deepcopy(self.tex_derive_tan_tin_tan_tr[8])\n theta_r_tan_equal = copy.deepcopy(self.tex_derive_tan_tin_tan_tr[8])\n self.add(theta_i_tan_equal, theta_r_tan_equal)\n self.play(ApplyMethod(theta_i_tan_equal.move_to, self.tex_theta_in_tan. get_center()),\n FadeOut(self.tex_theta_in_tan),\n ApplyMethod(theta_r_tan_equal.move_to, self.tex_theta_ref_tan.get_center()),\n FadeOut(self.tex_theta_ref_tan))\n self.wait(self.wait_time)\n\n self.play(FadeOut(self.text_mirror),\n FadeOut(self.tex_derive_ti_tr),\n FadeOut(tex_derive_tan_tin_tan_tr_work))\n self.wait(self.wait_time)",
"def theta():\n pass",
"def computeTangent(self):\n # return np.matmul(self.examples.T, self.gradAmbient)\n return self.gradAmbient + self.centroid * minkowskiDot(self.centroid, self.gradAmbient)",
"def restrict_theta(theta):\n tnew = theta + np.pi\n tnew += -2.0*np.pi*np.floor(tnew/(2.0*np.pi))\n tnew -= np.pi\n return tnew",
"def slope_of_tangent(self, x: float, graph: ParametricFunction, **kwargs) -> float:\n\n return np.tan(self.angle_of_tangent(x, graph, **kwargs))",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def theta_finder(theta, point_a, point_b, point_c, point_c_new):\n x, y, z = parametrized_circle(point_a, point_b, point_c, theta)\n residual = (x - point_c_new[0])**2 + (y - point_c_new[1])**2 + (z - point_c_new[2])**2\n return residual",
"def _draw_arc(c, a, theta, **kwargs):\n s = np.arange(0, abs(theta), 0.01)\n s = np.sign(theta) * s\n d = a - c\n r = np.linalg.norm(d)\n alpha = DubinsUAV2D._angle(d)\n w = np.empty((len(s), 2))\n for i, t in enumerate(s):\n w[i] = c + r * np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]) @ np.array(\n [np.cos(t), np.sin(t)])\n plt.plot(w[:, 0], w[:, 1], **kwargs)",
"def _circleCircleTangentsXY(c1,c2):\n\n a = c1[1][0]\n b = c2[1][0]\n if a>b:\n bigIsOne=True\n bigC = c1\n smallC = c2\n else:\n bigIsOne=False\n bigC = c2\n smallC = c1\n ## Consdier the triangle created by the center of the small\n ## circle, the center of the large circle, and the point at the 90\n ## degree intersection of the line from the center of the small\n ## circle to the radian of the tangent point on the large circle.\n ## This is a right triangle with one leg of length d (distance of\n ## centers), one leg of length bigR-smallR, and one leg of unknown\n ## length, beta. theta is the angle formed by d and beta, which is\n ## also the angle of one of the the tangent lines, the other being\n ## -theta.\n ## \n ## we will calulate theta as follows:\n ## beta^2 - (r2-r1)^2 = d^2\n ## beta = sqrt( d^2 - (r2-r1)^2 )\n ## theta = atan ((r2-r1)/beta)\n \n r1 = smallC[1][0]\n r2 = bigC[1][0]\n\n d = dist(c1[0],c2[0])\n mpd = mpm.mpf(d)\n dr = r2-r1\n mpdr = mpm.mpf(dr)\n\n if d <= dr: #centers too close\n raise ValueError('circleCircleTangentsXY: centers of circles too close')\n \n beta = mpm.sqrt( mpd*mpd - mpdr*mpdr)\n theta = float(mpm.atan2(dr,beta))\n\n ## now, figure out the angle created by the center of the large\n ## circle with respect to the small circle\n dd = sub(bigC[0],smallC[0])\n phi = atan2(dd[1],dd[0])\n\n ## the two lines have angle phi+theta, and phi-theta. The\n ## intersection point of these lines is at the point on the circle\n ## phi+theta+90', and phi-theta-90'\n gamma1 = phi+theta+pi/2\n gamma2 = phi-theta-pi/2\n n1 = point(cos(gamma1),sin(gamma1))\n n2 = point(cos(gamma2),sin(gamma2))\n p1 = add(scale3(n1,r1),smallC[0])\n p2 = add(scale3(n1,r2),bigC[0])\n p3 = add(scale3(n2,r1),smallC[0])\n p4 = add(scale3(n2,r2),bigC[0])\n\n l1 = l2 = []\n if bigIsOne:\n l1=line(p2,p1)\n l2=line(p4,p3)\n else:\n l1 = line(p1,p2)\n l2 = line(p3,p4)\n\n return [l1,l2]",
"def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R",
"def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def world_to_tanp(self, ra, dec):\n x, y = ra, dec\n return x, y",
"def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )",
"def scattering_direction(v, theta):\r\n # Sample cos_phi and sin_phi, phi is the azimuthal angle of the scattering event\r\n continue_loop = True\r\n while continue_loop:\r\n eta1 = 1-2*random.random()\r\n eta2 = 1-2*random.random()\r\n alpha = eta1**2 + eta2**2\r\n if alpha <= 1:\r\n continue_loop = False\r\n cos_phi = eta1/np.sqrt(alpha)\r\n sin_phi = eta2/np.sqrt(alpha)\r\n \r\n new_x = v[0]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[0]*v[2]*cos_phi + v[1]*sin_phi)\r\n new_y = v[1]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[1]*v[2]*cos_phi - v[0]*sin_phi)\r\n new_z = v[2]*np.cos(theta) + np.sqrt(1-v[2]**2)*np.sin(theta)*cos_phi\r\n \r\n return [new_x, new_y, new_z]"
] | [
"0.68210936",
"0.68082553",
"0.6468239",
"0.6416218",
"0.63939244",
"0.6283899",
"0.6283792",
"0.62334484",
"0.6202951",
"0.6202951",
"0.61995333",
"0.6194441",
"0.61483556",
"0.6142514",
"0.6130193",
"0.6118884",
"0.60151887",
"0.6007657",
"0.5992",
"0.5975721",
"0.59291214",
"0.5884757",
"0.5881937",
"0.58349496",
"0.5788965",
"0.5778163",
"0.5769849",
"0.57589215",
"0.5733153",
"0.57324445"
] | 0.6885991 | 0 |
text for Consider the following areas | def considerAreasText(self):
global consider_area_text
consider_area_text = MathTex("\\text{Consider the following areas:}").scale(0.8).shift(RIGHT*3.55)
self.play(Write(consider_area_text))
self.play(consider_area_text.animate.to_edge(UP)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_formatted_text(self, n_cols):",
"def text(self, str: str, x: int, y: int, colour: int, /) -> None:",
"def plot_area_text(data_obj, test_num, yi, xpos, ha, va, fs, dfs):\n\n plt.text(xpos[0], yi, 'Test ' + str(test_num) + ': Image area', ha=ha, va=va, fontsize=fs)\n str1 = str(data_obj.image_area[0]) + ' by ' + str(data_obj.image_area[1]) + ' pixels'\n plt.text(xpos[1], yi, str1, ha=ha, va=va, fontsize=fs - dfs)\n plt.text(xpos[3], yi, '$\\geq$ 1000 by 1000 pixels', ha=ha, va=va, fontsize=fs - dfs)",
"def text_draw(self, x, y, text, style={}):",
"def draw_text(self, drawer):\n for s_details in self._strings_detail:\n drawer.text((s_details[2][0], s_details[2][1]), s_details[0],\n font=self._image_font, fill=self._color, align='center')",
"def draw_text(self, text, i, j, **params):",
"def tagview(tab,label,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n col=classifc[label]\r\n labnow=classif[label]\r\n# print (labnow, text)\r\n if label == 'back_ground':\r\n deltay=30\r\n else:\r\n# deltay=25*((labnow-1)%5)\r\n deltay=40+10*(labnow-1)\r\n\r\n viseg=cv2.putText(tab,label,(x, y+deltay), font,0.3,col,1)\r\n return viseg",
"def tagviews(tab,text,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n viseg=cv2.putText(tab,text,(x, y), font,0.3,white,1)\r\n return viseg",
"def area(self):",
"def current_area(self, value=None):\n my_area = self.my_text.index(INSERT)\n str(my_area)\n for x in range(0, len(my_area)):\n if my_area[x] == \".\":\n my_y = my_area[0:x]\n my_x = my_area[x + 1:]\n my_new_area = \"Ln: \" + my_y + \" | Col: \" + my_x\n self.my_location.config(text=my_new_area)",
"def add_annotations(self):\n for i in range(8):\n self.text.append(self.canvas.create_text(-self.width / 2,\n (self.width / 2) + (i * self.width),\n font=(\"Purisa\", 12), anchor=\"nw\"))\n self.canvas.itemconfig(self.text[i], text=str((i - 8) * -1))\n for i in range(8):\n self.text.append(self.canvas.create_text((self.width / 2) + (i * self.width),\n self.width * 8 + 10, font=(\"Purisa\", 12), anchor=\"nw\"))\n self.canvas.itemconfig(self.text[i + 8], text=string.ascii_lowercase[i])",
"def autolabel(rects):",
"def autolabel(rects, r, p):\n for j in range(len(rects)):\n rect = rects[j]\n height = rect.get_width()\n # print(\"height: \", height)\n ax.annotate( \"F1: \" + '{}'.format(height) + \" (P: \" + str(p[j]) + \"; R: \" + str(r[j]) + \")\",\n xy=(height, rect.get_y() + rect.get_height() / 2),\n xytext=(90, -9), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', fontsize=15)",
"def drawLabels(self):\r\n if self.sensors == None or self.sensors == []:\r\n return\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[0]])\r\n self.c.create_text(30,20,text=self.sensors[self.sensor_ids[0]],fill=col,anchor=tk.NW)\r\n if len(self.sensor_ids) == 2:\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[1]])\r\n self.c.create_text(30,40,text=self.sensors[self.sensor_ids[1]],fill=col,anchor=tk.NW)",
"def explainAreaSmall(self):\n \n #EXPLANATION NO. 1\n #fadeout the non-required areas\n self.play(FadeOut(area_ABC_copy), FadeOut(area_ABD_copy),\n FadeOut(geq_2), FadeOut(geq_1),\n FadeOut(area_ABC), FadeOut(area_ABD))\n \n #expand the required area\n self.play(area_ABE_copy.animate.scale(2).move_to(RIGHT*2))\n\n #surrounding text\n abe_text_1 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\text{Area of } \\\\triangle ABE\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #half base height\n abe_text_2 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\text{base}\", \"\\\\times\", \"\\\\text{height}\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #write texts\n self.play(Write(abe_text_1))\n self.wait()\n self.play(ReplacementTransform(abe_text_1[0], abe_text_2[0]),\n ReplacementTransform(abe_text_1[1:], abe_text_2[1:]))\n self.wait()\n\n #defining braces\n abe_base_brace = always_redraw(\n lambda : Brace(radius_ang, DOWN)\n )\n abe_base_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\cos\\\\theta\").scale(0.6).next_to(abe_base_brace, DOWN)\n )\n abe_height_brace = always_redraw(\n lambda : Brace(radius_ang, LEFT)\n )\n abe_height_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\sin\\\\theta\").scale(0.6).next_to(abe_height_brace, LEFT)\n )\n\n self.play(Write(abe_base_brace), Write(abe_height_brace))\n self.play(Write(abe_base_brace_label), Write(abe_height_brace_label))\n self.wait()\n\n \n #back to editing the equation\n abe_text_3 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"R\\\\cos\\\\theta\", \"\\\\times\", \"R\\\\sin\\\\theta\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n self.play(ReplacementTransform(abe_text_2[0:], abe_text_3[0:]))\n self.wait(0.5)\n self.play(FadeOut(abe_base_brace), FadeOut(abe_height_brace),\n FadeOut(abe_base_brace_label), FadeOut(abe_height_brace_label))\n \n abe_text_4 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\cos x\", \"\\\\times\", \"\\\\sin x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_3[0:], abe_text_4[0:]))\n\n abe_text_5 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\sin x\", \"\\\\cos x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_4[0:2], abe_text_5[0:2]),\n ReplacementTransform(abe_text_4[2:], abe_text_5[2:]))\n\n #vgroup for drawing box\n abe_group = VGroup(abe_text_5, area_ABE_copy)\n abe_formula_box = SurroundingRectangle(abe_group, color=PINK)\n\n self.play(Write(abe_formula_box))\n self.wait()\n\n #remove all elements\n self.play(FadeOut(abe_formula_box), FadeOut(abe_text_5), FadeOut(area_ABE_copy), FadeOut(area_ABE))",
"def text(self, text: str, xo: int, yo: int, color: int):\n for offset, letter in enumerate(text):\n template = font.get(letter)\n for x, line in enumerate(template):\n line_str = '{:08b}'.format(line).replace('0b', '')\n if self.portrait:\n line_str = reversed(line_str)\n for y, pix in enumerate(line_str):\n if pix == '1':\n self.pixel(xo + x + (offset * 8), yo + y, color)",
"def get_text(self):",
"def highlight_word(self,coordinates):\r\n for coordinate in coordinates:\r\n letter = Label(root,text = self.wordsearch[coordinate[0]][coordinate[1]],font = (\"Helvetica\", 10),fg = 'white',\r\n bg = 'red').grid(row = coordinate[0]+1, column = coordinate[1]+1,\r\n padx = 10, \r\n pady = 10)",
"def text(self, x, y, text):\n for i, char in enumerate(text):\n self.point(x + i, y, char)",
"def _hotspot_fields_plot_figtexts(self, scenario, frame):\n top, bottom, left, right = frame\n for p_ind, project in enumerate(self.projects):\n n_models = self.cfg[\"N\"][f\"{project}_{scenario}\"]\n plt.figtext(\n left + 0.18 + p_ind * (right - left) / 2,\n 0.85,\n (f\"{self.formatter(project.upper())} \"\n f\"{self.formatter(f'{project}-{scenario}')} \"\n f\"(N={n_models})\"),\n fontsize=\"large\",\n )\n for row, period in enumerate(self.cfg[\"future_periods\"]):\n ypos = top - (top - bottom) / 2 * (1 + row * 1.1) + 0.05\n plt.figtext(\n 0.005,\n ypos,\n period,\n rotation=\"vertical\",\n fontsize=\"11\",\n )",
"def display_string(text_area_no: int) -> str:\n if text_area_no == 1:\n text = ''\n for v in utterances['P1']:\n text += v + '\\n'\n return text\n elif text_area_no == 2:\n text = ''\n for v in utterances['S1']:\n text += v + '\\n'\n return text\n elif text_area_no == 3:\n text = ''\n for v in utterances['S2']:\n text += v + '\\n'\n return text\n elif text_area_no == 4:\n text = ''\n for v in utterances['S3']:\n text += v + '\\n'\n return text\n elif text_area_no == 5:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 6:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 7:\n text = ''\n for v in utterances['C1']:\n text += v + '\\n'\n return text\n elif text_area_no == 8:\n text = ''\n for v in utterances['C2']:\n text += v + '\\n'\n return text",
"def addText(img, left_curverad, right_curverad, offset):\n # Average the curvature radius of two lanes\n avg_curverad = (left_curverad + right_curverad)/2\n annotation = 'Radius of curvature: {:5.1f} m Vehicle offset from the center: {:5.1f} m'.format(avg_curverad, offset)\n result = cv2.putText(img, annotation, (30,40), 0, 1, (0,0,0), 2, cv2.LINE_AA)\n\n return result",
"def helptext(self):\n return \"\"\"<b> Edit Regions </b> <br/>\\n\n <b>R</b> to start a new region or set the edge if editing<br/>\\n\n <b>E</b> to edit nearest region<br/>\\n\n <b>D</b> to delete/cancel the current/nearest region\n \"\"\"",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def annotate_grid(self, g):\n shape = self.convert_track_to_shape(g)\n partial_track=vector(0,self.track_width/6.0)\n self.cell.add_rect(layer=\"text\",\n offset=shape[0],\n width=shape[1].x-shape[0].x,\n height=shape[1].y-shape[0].y)\n t=self.rg.map[g].get_type()\n \n # midpoint offset\n off=vector((shape[1].x+shape[0].x)/2,\n (shape[1].y+shape[0].y)/2)\n if t!=None:\n if g[2]==1:\n # Upper layer is upper right label\n type_off=off+partial_track\n else:\n # Lower layer is lower left label\n type_off=off-partial_track\n self.cell.add_label(text=str(t),\n layer=\"text\",\n offset=type_off)\n\n t=self.rg.map[g].get_cost()\n partial_track=vector(self.track_width/6.0,0) \n if t!=None:\n if g[2]==1:\n # Upper layer is right label\n type_off=off+partial_track\n else:\n # Lower layer is left label\n type_off=off-partial_track\n self.cell.add_label(text=str(t),\n layer=\"text\",\n offset=type_off)\n \n self.cell.add_label(text=\"{0},{1}\".format(g[0],g[1]),\n layer=\"text\",\n offset=shape[0],\n zoom=0.05)",
"async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")"
] | [
"0.64041543",
"0.6377654",
"0.632749",
"0.62660927",
"0.6217415",
"0.6127728",
"0.6065138",
"0.6050231",
"0.60372806",
"0.60256594",
"0.60008544",
"0.5952757",
"0.58686084",
"0.58307946",
"0.5816903",
"0.5807217",
"0.579291",
"0.57903117",
"0.5766726",
"0.57592934",
"0.5727761",
"0.5724373",
"0.5724335",
"0.57240486",
"0.57240486",
"0.57240486",
"0.57240486",
"0.57240486",
"0.5722461",
"0.5703853"
] | 0.73524785 | 0 |
outline and move shaded shapes | def areasShaded(self):
global area_ABE
area_ABE = always_redraw(
lambda : Polygon(dot_center.get_center(), radius_ang_end_dot.get_center(), dropped_dot.get_center(),
color=PINK, fill_color=PINK, fill_opacity=0.5)
)
global area_ABE_copy
area_ABE_copy = area_ABE.copy()
self.play(Write(area_ABE))
self.wait(0.5)
self.play(area_ABE_copy.animate.move_to(consider_area_text.get_center()+DOWN*1.5+LEFT*1.5))
global area_ABD
area_ABD = always_redraw(
lambda : Sector(outer_radius=self.x_max, start_angle=0, angle=theta.get_value()*DEGREES,
stroke_width=DEFAULT_STROKE_WIDTH , stroke_color=BLUE, color=BLUE, fill_opacity=0.5).shift(LEFT*5)
)
global area_ABD_copy
area_ABD_copy = area_ABD.copy()
self.play(Write(area_ABD))
self.wait(0.5)
self.play(area_ABD_copy.animate.move_to(consider_area_text.get_center()+DOWN*1.5+RIGHT*1.5))
global area_ABC
area_ABC = always_redraw(
lambda : Polygon(dot_center.get_center(), radius_ang_end_dot.get_center(), small_tangent_end_dot.get_center(),
color=GREEN, fill_color=GREEN, fill_opacity=0.5)
)
global area_ABC_copy
area_ABC_copy = area_ABC.copy()
self.play(Write(area_ABC))
self.wait(0.5)
self.play(area_ABC_copy.animate.move_to(consider_area_text.get_center()+DOWN*3.5))
self.play(FadeOut(consider_area_text)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()",
"def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()",
"def make_flower(shape, x, y, c1, c2, l, s):\n shape.penup()\n shape.speed(20)\n shape.setpos(x, y)\n shape.color(c2, c1)\n shape.begin_fill()\n shape.pendown()\n for side in range(6):\n shape.left(60)\n shape.forward(s) # s stands for short side\n shape.right(60)\n shape.forward(l) # l stands for long side\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(l)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.end_fill()\n shape.pendown()\n\n shape.color(\"green\")\n shape.right(90)\n shape.penup()\n shape.forward(10)\n shape.pendown()\n shape.forward(110)\n shape.left(90)\n\n\n\n # ...",
"def _change_shape(self,x,y,w,h):\n top = y \n left = x\n right = x + w\n bottom = y + h\n return top,right,bottom,left",
"def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)",
"def draw_housing():\r\n green.pensize(3)\r\n green.color(\"black\", \"darkgrey\")\r\n green.begin_fill()\r\n green.forward(80)\r\n green.left(90)\r\n green.forward(200)\r\n green.circle(40, 180)\r\n green.forward(200)\r\n green.left(90)\r\n green.end_fill()",
"def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()",
"def spirala(t):\n t.penup()\n t.setx(random.randrange(-200,200))\n t.sety(random.randrange(-200,200))\n t.pencolor(random.randrange(0,255),random.randrange(0,255),200)\n t.width(random.randrange(2,13))\n t.pendown()\n\n for i in range(120):\n \tt.forward(20+i)\n \tt.left(30 - i/1.5)",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def move_to_object(self, obj_img_pos, img_shape, obj_col, des_img_pos, img_thres):\n def show_binary(img_bin, des_img_pos, new_img_pos, img_thres):\n \"\"\"\n Show intermediate binary image while refining position.\n \"\"\"\n img_bgr = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB)\n #draw tolerance lines\n #left/right vertical lines\n xl = des_img_pos[0] - img_thres\n xr = des_img_pos[0] + img_thres\n y1 = 0\n y2 = img_shape[1]\n cv2.line(img_bgr,(xl,y1),(xl,y2),(0,255,0),1)\n cv2.line(img_bgr,(xr,y1),(xr,y2),(0,255,0),1)\n #top/bottom horizontal lines\n yt = des_img_pos[1] - img_thres\n yb = des_img_pos[1] + img_thres\n x1 = 0\n x2 = img_shape[0]\n cv2.line(img_bgr,(x1,yt),(x2,yt),(0,255,0),1)\n cv2.line(img_bgr,(x1,yb),(x2,yb),(0,255,0),1)\n #draw circle at detected object\n cv2.circle(img_bgr,tuple(new_img_pos),6,(255,0,0),2)\n #show image\n cv2.imshow(window_name, img_bgr)\n cv2.waitKey(1000) & 0xFF\n \n cur_arm_pos = [self.x, self.y]\n move_inc = self.move_inc\n window_name = 'Refine position'\n col_thresh = self.close_col_thresh\n init_arm_pos = [self.init_x, self.init_y]\n scale = self.scale\n \n print(' Current obj img pos: '+str(obj_img_pos))\n \n #compute desired arm position\n des_arm_pos = self.world_pos_from_img_pos(obj_img_pos, \n img_shape, init_arm_pos, scale)\n print(' Desired arm position: '+str(des_arm_pos))\n \n #move arm to approximate position\n cur_arm_pos = self.move_to(des_arm_pos[0], des_arm_pos[1], \n self.move_to_height)\n new_img = self.update_img() #wait to update image\n \n #select new colour\n peg_col_close = self.choose_colours(new_img)\n \n #refine position\n new_img_pos, img_bin = self.find_colours(new_img, peg_col_close, \n num_objects=1, ab_dist_thresh=col_thresh)\n show_binary(img_bin, des_img_pos, new_img_pos, img_thres)\n while ( abs(new_img_pos[0] - des_img_pos[0]) > img_thres or \n abs(new_img_pos[1] - des_img_pos[1]) > img_thres ):\n #refine position\n cur_arm_pos = self.move_to_refine(des_img_pos, new_img_pos, \n cur_arm_pos, move_inc, img_thres)\n \n #update image\n new_img = self.update_img()\n \n #find new image position of peg\n new_img_pos, img_bin = self.find_colours(new_img, peg_col_close, \n num_objects=1, ab_dist_thresh=col_thresh)\n \n #show binary image\n show_binary(img_bin, des_img_pos, new_img_pos, img_thres)\n \n return cur_arm_pos",
"def drawmaze(self):\n win=GraphWin(\"Perfect Maze\",600,600) \n win.setBackground(\"White\")\n scale=600/self.N #Used to generalize the size difference for the input of larger numbers. The background resolution/ grid size, N\n\n x1=scale\n y1=0\n x2=scale\n y2=scale\n\n ##VERTICAL LINES ####\n for i in range(self.N,0,-1):\n for j in range(1,self.N):\n if self.East[j][i]: #If East is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2)) #lines | |\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale #Increment causes |->|\n x2+=scale #Increment causes |->|\n y1+=scale #Used to draw two more\n y2+=scale #of the same spaced lines further down.\n x1=scale #Reset\n x2=scale #Reset\n\n\n ##HORIZONTAL LINES##\n x1=0\n y1=scale\n x2=scale\n y2=scale\n\n\n for i in range(self.N,1,-1):\n for j in range(1,self.N+1):\n if self.South[j][i]: #If South is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2))\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale\n x2+=scale\n y1+=scale\n y2+=scale\n x1=0\n x2=scale\n\n const=scale//5 #Very useful const which helps in placing circles on grid.\n x=scale//2\n y=600-scale//2\n #radius=(scale-(4*scale//self.N))/2\n radius=scale//2-(const)\n start=Point(x,y) #START POINT HERE \n circ=Circle(start,radius)\n circ.setFill(\"Red\")\n label=Text(start,\"Start\")\n label.setFill(\"Black\")\n circ.draw(win)\n label.draw(win)\n #print(self.CurrentCell)\n #Using the current cell from the finished algorithm(last place visited), a circle can be placed at that point.\n endpointx=(self.CurrentCell[0]-1)*scale +scale//2 ####MAKING END POINT X\n endpointy=600-(self.CurrentCell[1]-1)*scale-scale//2 ####MAKING END POINT Y\n endpoint=Point(endpointx,endpointy)\n circ2=Circle(endpoint,radius)\n circ2.setFill(\"White\")\n label2=Text(endpoint,\"End\")\n circ2.draw(win)\n label2.draw(win)\n \n ###############CREATE KEY########################\n \n \n keypointx=(self.MazeKey[0]-1)*scale +scale//2 ####MAKING END POINT X\n keypointy=600-(self.MazeKey[1]-1)*scale-scale//2 ####MAKING END POINT Y\n keypoint=Point(keypointx,keypointy)\n circ3=Circle(keypoint,radius)\n circ3.setFill(\"Blue\")\n label3=Text(keypoint,\"Key\")\n circ3.draw(win)\n label3.draw(win)\n pathcol=\"Yellow\"\n##\n\n \n for i in range(1,len(self.EntirePath)): \n pathpointx=(self.EntirePath[i][0]-1)*scale +scale//2 ####MAKING END POINT X\n pathpointy=600-(self.EntirePath[i][1]-1)*scale-scale//2 ####MAKING END POINT Y\n pathpoint=Point(pathpointx,pathpointy)\n drawpath=Circle(pathpoint,radius)\n drawpath.setFill(pathcol)\n if self.EntirePath[i]==self.KeyPath[-1]:\n pathcol=\"Violet\"\n label4=Text(keypoint,\"Key\")\n label4.draw(win) \n drawpath.draw(win)\n drawpath.setWidth(1)\n sleep(0.1)\n \n #drawpath.draw(win)\n \n label5=Text(endpoint,\"Maze Solved \")\n label5.draw(win)\n circ4=Circle(start,radius)\n circ4.setFill(\"Red\")\n circ4.draw(win) \n label6=Text(start,\"Start \")\n label6.draw(win)",
"def makeCircleOutline(self):\n #circle defined\n global circ_main\n circ_main = Circle(stroke_color=BLUE).scale(2).shift(LEFT*5)\n\n #dot at circle and dot at center\n global dot_circ\n dot_circ = always_redraw(\n lambda : Dot(circ_main.get_end())\n )\n global dot_center\n dot_center = Dot(LEFT*5)\n \n #line from origin to circle\n global line_circ\n line_circ = always_redraw(\n lambda : Line(start=dot_center.get_center(), end=dot_circ.get_center())\n )\n \n #write stuff\n self.play(Write(dot_circ), Write(line_circ), Write(dot_center))\n self.play(Write(circ_main), run_time=3, rate_func=double_smooth)",
"def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()",
"def drawEyes(win, winW, winH):\n# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))\n leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),\n Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))\n leftEye.setFill(\"white\")\n leftEye.setOutline(\"black\")\n leftEye.draw(win)\n leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)\n leftIris.setOutline(\"black\")\n leftIris.setFill(\"darkcyan\")\n leftIris.draw(win)\n leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)\n leftPupil.setOutline(\"black\")\n leftPupil.setFill(\"black\")\n leftPupil.draw(win)\n rightEye = leftEye.clone()\n rightEye.move(winW/2-winW/10,0)\n rightEye.draw(win)\n rightIris = leftIris.clone()\n rightIris.move(winW/2-winW/10,0)\n rightIris.draw(win)\n rightPupil = leftPupil.clone()\n rightPupil.move(winW/2-winW/10,0)\n rightPupil.draw(win)",
"def draw_arc_outline(center_x, center_y, width, height, color, start_angle,\n end_angle, border_width=1, tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_STRIP)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()",
"def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing",
"def paint(event):\n global coordinates, lastPlayedCoordinates\n\n x1, y1 = (event.x - size), (event.y - size)\n x2, y2 = (event.x + size), (event.y + size)\n c.create_oval(x1, y1, x2, y2, fill = active_colour, outline=\"\")\n \n coordinates.append([x1, x2, y1, y2, active_colour])\n lastPlayedCoordinates = []",
"def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()",
"def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)",
"def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])",
"def form(x, y, s):\n rnd = int(random(3))\n shuffle(colors) # this is my own implementation of shuffle (rn_utils)\n noStroke()\n fill(colors[0])\n pushMatrix()\n translate(x, y)\n rotate(int(random(4)) * PI * 0.5)\n if random(1) < 0.5:\n rect(0, 0, s + 0.9, s, s, 0, s, s)\n # myShape(s * 0.75, -s * 0.25, s * 0.5, 0);\n else:\n rect(0, 0, s + 0.9, s, s, s, 0, s)\n # myShape(s * 0.75, s * 0.25, s * 0.5, TAU * 0.75);\n\n fill(colors[3])\n ellipse(0, 0, s * 0.8, s * 0.8)\n\n fill(colors[1])\n ellipse(0, 0, s * 0.5, s * 0.5)\n\n # if (rnd == 0) drawVortex(0, 0, s * 0.5);\n # if (rnd == 1) ellipse(0, 0, s * 0.5, s * 0.5);\n # if (rnd == 2) {\n # \tfill(colors[1]);\n # \tellipse(0, 0, s * 0.5, s * 0.5);\n # \tdrawHeart(0, s * 0.05, s * 0.35);\n # }\n\n if random(1) < 0.1:\n fill(colors[0])\n arc(0, 0, s, s, PI, TAU)\n\n popMatrix()",
"def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )",
"def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)",
"def draw(self):\n i = 0\n self.window.fill((60,50,20))\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n if self.board[i][j] != 0:\n if self.board[i][j].player == 0:\n color = (200, 0, 0)\n else:\n color = (0, 0, 200)\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, color, (30+j*100, 40+i*100, 80, 60))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, color, (40+j*100, 30+i*100, 60, 80))\n if self.board[i][j].master:\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, (255,255,0), (40+j*100, 50+i*100, 60, 40))\n pygame.draw.ellipse(self.window, color, (45+j*100, 55+i*100, 50, 30))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, (255,255,0), (50+j*100, 40+i*100, 40, 60))\n pygame.draw.ellipse(self.window, color, (55+j*100, 45+i*100, 30, 50))\n \n if self.selected != None:\n pygame.draw.rect(self.window, (200, 200, 0), (20+self.selected[1]*100, 20+self.selected[0]*100, 100, 100), 5)\n pygame.display.flip()",
"def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)",
"def _draw_outline(self) -> None:\n stroke = self.border_thickness\n\n # draw outline rectangle\n for _w in range(self.widget_width):\n for line in range(stroke):\n self._bitmap[_w, line] = 1\n self._bitmap[_w, self.widget_height - 1 - line] = 1\n for _h in range(self.widget_height):\n for line in range(stroke):\n self._bitmap[line, _h] = 1\n self._bitmap[self.widget_width - 1 - line, _h] = 1",
"def __init__(self):\r\n Frame.__init__(self)\r\n self.master.title(\"GUIs drawing geometric shapes\")\r\n self.grid()\r\n\r\n #create a canvas and place in this frame\r\n self.canvas = Canvas(self, width = 300, height = 400)\r\n self.canvas.grid(row = 0, column = 0)\r\n\r\n self.canvas.create_rectangle(100, 50, 200, 350)\r\n self.canvas.create_oval(100, 50, 200, 150,\r\n fill = \"white\", tags = \"RED\")\r\n self.canvas.create_oval(100, 150, 200, 250,\r\n fill = \"white\", tags = \"YELLOW\")\r\n self.canvas.create_oval(100, 250, 200, 350,\r\n fill = \"green\", tags = \"GREEN\")\r\n\r\n \r\n dx = 1\r\n while True:\r\n self.canvas.after(2000) # Sleep for 15 milliseconds\r\n self.canvas.update() # Update canvas\r\n if dx == 1:\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"yellow\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"white\")\r\n dx += 1\r\n elif dx == 2:\r\n self.canvas.itemconfigure(\"RED\", fill = \"red\")\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"white\")\r\n dx += 1 \r\n else:\r\n self.canvas.itemconfigure(\"RED\", fill = \"white\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"green\")\r\n dx = 1"
] | [
"0.62152916",
"0.605251",
"0.5987781",
"0.59800446",
"0.59738743",
"0.59658647",
"0.5955795",
"0.5951562",
"0.5947045",
"0.59127516",
"0.5882004",
"0.57984614",
"0.57946116",
"0.57875484",
"0.5787153",
"0.5785315",
"0.5783228",
"0.57493037",
"0.5746614",
"0.57459676",
"0.574258",
"0.57377857",
"0.5737295",
"0.57304716",
"0.57247895",
"0.57215583",
"0.5688993",
"0.56795055",
"0.56613225",
"0.56545895"
] | 0.61928725 | 1 |
Pass through to provider supports_catalog_lookup | def supports_catalog_lookup(self):
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_lookup() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supports_catalog_lookup(self):\n return False",
"def can_lookup_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.can_lookup_bins_template\n return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()",
"def supports_catalog(self):\n return False",
"def supports_catalog_search(self):\n return False",
"def supports_catalog_query(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_query()",
"def supports_catalog_query(self):\n return False",
"def supports_catalog_admin(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_admin()",
"def get_catalog_lookup_session(self):\n raise Unimplemented()",
"def can_lookup_books(self):\n # Implemented from template for\n # osid.resource.BinLookupSession.can_lookup_bins\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_lookup_catalogs()\n return True",
"def supports_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_hierarchy()",
"def get_catalog_lookup_session(self, *args, **kwargs):\n raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))",
"def lookup():",
"def can_lookup_families(self):\n # Implemented from template for\n # osid.resource.BinLookupSession.can_lookup_bins\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_lookup_catalogs()\n return True",
"def getCatalogs():",
"def can_search_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.can_search_bins_template\n return self._get_provider_session('catalog_query_session').can_search_catalogs()",
"def get_catalog(self) -> Dict[str, str]:\n return self.catalog",
"def getCatalog(unique_name):",
"def lookup(self, **kwargs):\n raise NotImplementedError()",
"def supports_catalog_admin(self):\n return False",
"def supports_book_lookup(self):\n return False",
"def get_catalog_lookup_session(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.ResourceManager.get_resource_lookup_session_manager_template\n return self._provider_manager.get_catalog_lookup_session(*args, **kwargs)",
"def get_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bin\n return Catalog(\n self._provider_manager,\n self._get_provider_session('catalog_lookup_session').get_catalog(*args, **kwargs),\n self._runtime,\n self._proxy)",
"def test_get_hyperflex_app_catalog_list(self):\n pass",
"def catalog_exists(self, args):\n catalog = self.server.connect_ermrest(self.id)\n pp(catalog.exists())",
"def init_catalog():\n return controller.init_catalog()",
"def lookup(self, *args, **kwargs): # real signature unknown\n pass",
"def lookup(self, *args, **kwargs): # real signature unknown\n pass",
"def test_get_hyperflex_app_catalog_by_moid(self):\n pass",
"def get_catalogs_by_provider(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_by_provider\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_provider(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)",
"def catalogResolve(pubID, sysID):\n ret = libxml2mod.xmlCatalogResolve(pubID, sysID)\n return ret"
] | [
"0.8455877",
"0.74303055",
"0.73249394",
"0.7083447",
"0.7081553",
"0.66690326",
"0.6332474",
"0.6311813",
"0.62990963",
"0.6292647",
"0.6251142",
"0.6241391",
"0.6225812",
"0.62206495",
"0.61305606",
"0.61151016",
"0.60950756",
"0.6028557",
"0.6013458",
"0.5988211",
"0.594307",
"0.59135306",
"0.58917266",
"0.5869275",
"0.58095455",
"0.5796279",
"0.5796279",
"0.57260484",
"0.56818384",
"0.5653846"
] | 0.81521475 | 1 |
Pass through to provider supports_catalog_query | def supports_catalog_query(self):
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_query() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supports_catalog_query(self):\n return False",
"def supports_catalog_search(self):\n return False",
"def supports_catalog_lookup(self):\n return False",
"def can_search_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.can_search_bins_template\n return self._get_provider_session('catalog_query_session').can_search_catalogs()",
"def supports_catalog(self):\n return False",
"def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()",
"def supports_book_query(self):\n return False",
"def get_catalog_query(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.get_bin_query_template\n return self._get_provider_session('catalog_query_session').get_catalog_query()",
"def query(self, query):",
"def query(self):\r\n raise NotImplementedError",
"def get_catalog_query_session(self, *args, **kwargs):\n raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))",
"def get_catalog_query_session(self):\n raise Unimplemented()",
"def is_query_supported(request):\n return (\n request.cfg.cvsdb.enabled\n and request.pathtype == vclib.DIR\n and request.roottype in [\"cvs\", \"svn\"]\n )",
"def _get_catalog_results(self, featured=False, **kw):\n if 'context' in kw.keys():\n kw['path'] = {'query': '/'.join(kw['context'].getPhysicalPath())}\n\n types = ('Article', 'Blog Entry', )\n states = ('published', )\n sort = 'Date'\n \n results = self.qrymethod(portal_type=types,\n review_state=states,\n is_featured=featured,\n sort_on=sort, \n sort_order='descending',\n **kw)\n\n return results",
"def can_lookup_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.can_lookup_bins_template\n return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()",
"def supports_catalog_admin(self):\n return False",
"def make_query(self):",
"def _custom_filter(self, query):\r\n return query",
"def query(self):",
"def supports_catalog_admin(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_admin()",
"def get_resource_query(self):\n pass",
"def get_resource_query(self):\n pass",
"def test_get_hyperflex_app_catalog_list(self):\n pass",
"def get_catalog_query_session(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.ResourceManager.get_resource_lookup_session_manager_template\n return self._provider_manager.get_catalog_query_session(*args, **kwargs)",
"def _run_query(self):",
"def get_catalogs_by_query(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.get_bins_by_query_template\n return self._get_provider_session('catalog_query_session').get_catalogs_by_query(*args, **kwargs)",
"def query(self, **kwargs):",
"def query(self):\n pass",
"def _make_query(self):\r\n raise NotImplementedError()",
"def filter_query(self, query, request, resource):\n raise NotImplementedError()"
] | [
"0.8465524",
"0.7281689",
"0.676633",
"0.65632665",
"0.65460175",
"0.64406174",
"0.6404225",
"0.6212126",
"0.60509926",
"0.6010541",
"0.5995206",
"0.59924567",
"0.5951385",
"0.5943247",
"0.59163135",
"0.58023816",
"0.5785838",
"0.57586443",
"0.5726331",
"0.5713541",
"0.5700342",
"0.5700342",
"0.5655762",
"0.5653914",
"0.5645014",
"0.559166",
"0.5585973",
"0.5559739",
"0.5533427",
"0.5511538"
] | 0.8171043 | 1 |
Pass through to provider supports_catalog_admin | def supports_catalog_admin(self):
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_admin() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supports_catalog_admin(self):\n return False",
"def supports_catalog(self):\n return False",
"def can_manage_catalog_aliases(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceAdminSession.can_manage_resource_aliases_template\n return self._get_provider_session('catalog_admin_session').can_manage_catalog_aliases()",
"def can_create_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.can_create_bins\n return self._get_provider_session('catalog_admin_session').can_create_catalogs()",
"def supports_catalog_lookup(self):\n return False",
"def supports_catalog_search(self):\n return False",
"def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()",
"def can_update_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.can_update_bins\n return self._get_provider_session('catalog_admin_session').can_update_catalogs()",
"def supports_type_admin(self):\n return 'supports_type_admin' in profile.SUPPORTS",
"def supports_catalog_query(self):\n return False",
"def supports_book_admin(self):\n return False",
"def supports_catalog_query(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_query()",
"def getAdmin():",
"def can_lookup_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.can_lookup_bins_template\n return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()",
"def supports_catalog_assignment(self):\n return False",
"def can_search_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.can_search_bins_template\n return self._get_provider_session('catalog_query_session').can_search_catalogs()",
"def get_catalog_admin_session(self):\n raise Unimplemented()",
"def supports_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_hierarchy()",
"def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def get_catalog_admin_session(self, *args, **kwargs):\n raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))",
"def admin_only():\n return 'Super-seekrit admin page.'",
"def can_access_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.can_access_bin_hierarchy\n return self._get_provider_session('catalog_hierarchy_session').can_access_catalog_hierarchy()",
"def can_delete_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.can_delete_bins\n return self._get_provider_session('catalog_admin_session').can_delete_catalogs()",
"def __init__(self, *args, **kwargs):\n super(EnterpriseCustomerAdminForm, self).__init__(*args, **kwargs)\n\n self.fields['catalog'] = forms.ChoiceField(\n choices=self.get_catalog_options(),\n required=False,\n help_text=\"<a id='catalog-details-link' href='#' target='_blank'\"\n \"data-url-template='{catalog_admin_url}'> View catalog details.</a>\".format(\n catalog_admin_url=utils.get_catalog_admin_url_template(),\n )\n )",
"def is_admin(self):\r\n return self.admin",
"def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'",
"def admin():\n pass # pragma: no cover",
"def can_modify_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy\n return self._get_provider_session('catalog_hierarchy_design_session').can_modify_catalog_hierarchy()",
"def getCatalogs():",
"def permission(self):\n return \"core.manage_products\""
] | [
"0.8499521",
"0.69093174",
"0.64641404",
"0.6397634",
"0.637139",
"0.63698316",
"0.6339752",
"0.6287508",
"0.6243115",
"0.61334854",
"0.613139",
"0.61088973",
"0.5999095",
"0.58882666",
"0.5790415",
"0.5744554",
"0.5735928",
"0.5668989",
"0.5656294",
"0.5648281",
"0.5647352",
"0.56323576",
"0.56221944",
"0.55831265",
"0.55773586",
"0.5569074",
"0.55604583",
"0.5552206",
"0.5548497",
"0.5533368"
] | 0.8258288 | 1 |
Pass through to provider supports_catalog_hierarchy | def supports_catalog_hierarchy(self):
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_hierarchy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supports_catalog_hierarchy(self):\n return False",
"def supports_catalog_hierarchy_design(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_hierarchy_design()",
"def supports_catalog_hierarchy_design(self):\n return False",
"def can_access_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.can_access_bin_hierarchy\n return self._get_provider_session('catalog_hierarchy_session').can_access_catalog_hierarchy()",
"def can_modify_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy\n return self._get_provider_session('catalog_hierarchy_design_session').can_modify_catalog_hierarchy()",
"def create_catalog_hierarchy(self, *args, **kwargs):\n # Patched in by [email protected], Jul 23, 2014, added by birdland to template on Aug 8, 2014\n # Is not part of specs for catalog hierarchy design sessions, but may want to be in hierarchy service instead\n # Will not return an actual object, just JSON\n # since a BankHierarchy does not seem to be an OSID thing.\n return self._get_provider_session('catalog_hierarchy_design_session').create_catalog_hierarchy(*args, **kwargs)",
"def get_catalog_hierarchy(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.get_bin_hierarchy\n return self._get_provider_session('catalog_hierarchy_session').get_catalog_hierarchy()",
"def supports_book_hierarchy(self):\n return False",
"def get_catalog_hierarchy_session(self, *args, **kwargs):\n raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))",
"def get_catalog_hierarchy_session(self):\n raise Unimplemented()",
"def can_access_book_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchySession.can_access_bin_hierarchy\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_access_catalog_hierarchy()\n return True",
"def can_access_family_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchySession.can_access_bin_hierarchy\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_access_catalog_hierarchy()\n return True",
"def supports_book_hierarchy_design(self):\n return False",
"def supports_catalog(self):\n return False",
"def supports_catalog_lookup(self):\n return False",
"def get_catalog_hierarchy_session(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.ResourceManager.get_resource_lookup_session_manager_template\n return self._provider_manager.get_catalog_hierarchy_session(*args, **kwargs)",
"def create_hierarchy(self):\n\t\tpass",
"def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()",
"def get_catalog_hierarchy_design_session(self, *args, **kwargs):\n raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))",
"def getHierarchies():",
"def getHierarchies():",
"def get_catalog_hierarchy_design_session(self):\n raise Unimplemented()",
"def can_modify_book_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy_template\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_modify_catalog_hierarchy()\n return True",
"def can_lookup_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.can_lookup_bins_template\n return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()",
"def can_modify_family_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy_template\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_modify_catalog_hierarchy()\n return True",
"def supports_catalog_admin(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_admin()",
"def get_catalog_nodes(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.get_bin_nodes\n return self._get_provider_session('catalog_hierarchy_session').get_catalog_nodes(*args, **kwargs)",
"def supports_catalog_search(self):\n return False",
"def supports_catalog_admin(self):\n return False",
"def get_catalog_hierarchy_design_session(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.ResourceManager.get_resource_lookup_session_manager_template\n return self._provider_manager.get_catalog_hierarchy_design_session(*args, **kwargs)"
] | [
"0.8542173",
"0.7900961",
"0.78329295",
"0.77603096",
"0.69218767",
"0.6636653",
"0.660969",
"0.65671676",
"0.64564717",
"0.6446212",
"0.64374375",
"0.64101064",
"0.63792163",
"0.6251547",
"0.61963934",
"0.619614",
"0.60602677",
"0.60574764",
"0.60287905",
"0.5977223",
"0.5977223",
"0.59138507",
"0.59118116",
"0.5910559",
"0.5847496",
"0.5810995",
"0.580837",
"0.5754102",
"0.5734906",
"0.56906694"
] | 0.8349393 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.