query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Calculates embeddings from a given dataframe assume dataframe has title and abstract in the columns | def calculate_embeddings(df, option="lsa", n_papers=MAX_BATCH_SIZE, n_components=30):
assert option in ["lsa", "sent_embed"]
if len(df) < n_components:
print(
"Length of dataframe is less than number of projected components, \
set option to sent_embed instead"
)
option = "sent_embed"
if option == "sent_embed":
print("Download SPECTER model for creating embedding\n")
tokenizer = AutoTokenizer.from_pretrained("allenai/specter")
model = AutoModel.from_pretrained("allenai/specter")
papers = list(df["title"] + "[SEP]" + df["abstract"])
# group papers
embeddings = []
for g in tqdm(chunks(papers, chunk_size=n_papers)):
inputs = tokenizer(
g, padding=True, truncation=True, return_tensors="pt", max_length=512
)
result = model(**inputs)
embeddings.extend(result.last_hidden_state[:, 0, :])
embeddings = [emb.tolist() for emb in embeddings]
paper_embeddings = [
{"submission_id": str(pid), "embedding": embedding}
for pid, embedding in zip(df.submission_id, embeddings)
]
elif option == "lsa":
tfidf_model = TfidfVectorizer(
min_df=3,
max_df=0.85,
lowercase=True,
norm="l2",
ngram_range=(1, 2),
use_idf=True,
smooth_idf=True,
sublinear_tf=True,
stop_words="english",
)
topic_model = TruncatedSVD(n_components=n_components, algorithm="arpack")
papers = (df["title"] + " " + df["abstract"]).map(preprocess)
X_tfidf = tfidf_model.fit_transform(papers)
X_topic = topic_model.fit_transform(X_tfidf)
paper_embeddings = [
{"submission_id": str(pid), "embedding": list(embedding)}
for pid, embedding in zip(df.submission_id, X_topic)
]
else:
print("Please specify option as ``lsa`` or ``sent_embed``")
paper_embeddings = None
return paper_embeddings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_embeddings(xdf, cat, emb, w, verbose):\n if verbose:\n print(\"embeddings category: '%s' embeddings shape %s\" % (cat, np.shape(w)))\n if cat not in list(xdf.columns):\n print(\"categorical variable '%s' not found in data-frame\" % cat)\n print(type(xdf), np.shape(xdf), list(xdf.columns))\n sys.exit(1)\n\n # perform look-up in our weights matrix for given value of categorical variables\n emd_values = list(xdf[cat].apply(lookup, args=(w,)))\n\n # create vector of names for embeddings columns\n emd_names = ['%s_%s' % (cat, i) for i in range(len(w[0]))]\n\n if verbose:\n print(\"new embeddings columns\", len(emd_names), \"with shape\", np.shape(emd_values))\n emd_df = pd.DataFrame(emd_values, columns=emd_names)\n if verbose:\n print(\"xdf dim\", np.shape(xdf), \"emb dim\", np.shape(emd_df))\n\n # merge emdedded df with our data-frame column-wise, to stick two dataframes\n # together we reset their indexes\n names = list(xdf.columns) + emd_names\n xdf.reset_index(drop=True, inplace=True)\n emd_df.reset_index(drop=True, inplace=True)\n xdf = pd.concat([xdf, emd_df], axis=1, ignore_index=True)\n xdf.columns = names\n\n # drop categorical variable from our data-frame since we replace it with\n # emdedded values\n if cat in list(xdf.columns):\n xdf = xdf.drop(cat, axis=1)\n if verbose:\n print(\"new dimension of dataframe:\", np.shape(xdf), type(xdf))\n return xdf",
"def extract_keywords(df):\n df[\"key_words\"] = \"\"\n\n for index, row in df.iterrows():\n plot = row[\"Plot\"]\n\n rake = Rake()\n\n rake.extract_keywords_from_text(plot)\n\n key_words_dict_scores = rake.get_word_degrees()\n\n row[\"key_words\"] = list(key_words_dict_scores.keys())\n\n df.drop(columns=[\"Plot\"], inplace=True)\n df.set_index(\"Title\", inplace=True)\n\n return df",
"def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}",
"def text_feature_extract(df):\n return df",
"def merge_embedding_with_zeng_labels(emb_df, zeng_df):\n # get df with gene_symbols and entrez_ids from fetal data (more updated than adult probes data)\n all_genes = pd.read_csv(\n './data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv')\n all_genes = all_genes[~((all_genes.gene_symbol.str.startswith('A_')) | (\n all_genes.gene_symbol.str.startswith('CUST_')))].gene_symbol.drop_duplicates()\n all_genes_w_entrez = utils.genesymbols_2_entrezids(all_genes)\n\n emb_df = emb_df.add_prefix('emb_')\n df = emb_df.merge(all_genes_w_entrez, left_index=True,\n right_on='gene_symbol')\n df = df.merge(zeng_df, left_on='entrez_id', right_index=True)\n\n return df.set_index(['entrez_id', 'gene_symbol'])",
"def get_embeddings(vectors, text, generate_missing=False, k=300):\r\n embeddings = text.apply(lambda x: get_average_vec(x, vectors, generate_missing=generate_missing, k=k))\r\n return list(embeddings)",
"def generate_initial_embs(emb_type):\n def _get_emb_avg(g, lang):\n \"\"\"Compute the embedding of g as the average of its word embeddings\n :param g: the input genre\n :param lang: language\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _get_emb_wavg(g, lang, a=0.001):\n \"\"\"Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)\n :param g: the input genre\n :param lang: language\n :param a: a model hyper-parameter (see Arora et al. in the paper)\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _remove_pc(df_embs, npc=1):\n \"\"\"Remove the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the normalized embeddings\n \"\"\"\n pc = _compute_pc(df_embs, npc)\n if npc == 1:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()) * pc\n else:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()).dot(pc)\n return df_embs_out\n\n def _compute_pc(df_embs, npc=1):\n \"\"\"Compute the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the principal component\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_\n\n embs = {}\n known = {}\n for g in G.nodes:\n lang = g[:2]\n norm_g = TagManager.normalize_tag_wtokenization(g, tries[lang], prefixed=True)\n if emb_type == 'avg':\n embs[g], known[g] = _get_emb_avg(norm_g, lang)\n else:\n embs[g], known[g] = _get_emb_wavg(norm_g, lang)\n\n embs = pd.DataFrame(embs).T # the embeddings are columns\n if emb_type == 'sif': # the algorithm imposes a normalization\n norm_embs = _remove_pc(embs.to_numpy())\n embs = pd.DataFrame(norm_embs, columns=embs.columns, index=embs.index)\n return embs, known",
"def generate_embeddings(vae, dataset):\n data = ((torch.unsqueeze(img,0), label) for img, label in dataset)\n data = ((vae.encoder(tens), label) for tens, label in data)\n data = ((vae.codebook(emb),label) for emb, label in data)\n data = ((torch.flatten(img),label) for img, label in data)\n data = (torch.cat([inds,Tensor([label]).int()]) for inds, label in data)\n return data",
"def search(self, column: str, query: Any, top_k: int) -> gp.DataFrame:\n assert self._dataframe._db is not None\n embdedding_info = self._dataframe._db._execute(\n f\"\"\"\n WITH indexed_col_info AS (\n SELECT attrelid, attnum\n FROM pg_attribute\n WHERE \n attrelid = '{self._dataframe._qualified_table_name}'::regclass::oid AND\n attname = '{column}'\n ), reloptions AS (\n SELECT unnest(reloptions) AS option\n FROM pg_class, indexed_col_info\n WHERE pg_class.oid = attrelid\n ), embedding_info_json AS (\n SELECT split_part(option, '=', 2)::json AS val\n FROM reloptions, indexed_col_info\n WHERE option LIKE format('_pygp_emb_%s=%%', attnum)\n ), embedding_info AS (\n SELECT * \n FROM embedding_info_json, json_to_record(val) AS (attnum int4, embedding_relid oid, model text)\n )\n SELECT nspname, relname, attname, model\n FROM embedding_info, pg_class, pg_namespace, pg_attribute\n WHERE \n pg_class.oid = embedding_relid AND\n relnamespace = pg_namespace.oid AND\n embedding_relid = attrelid AND\n pg_attribute.attnum = 2;\n \"\"\"\n )\n row: Row = embdedding_info[0]\n schema: str = row[\"nspname\"]\n embedding_table_name: str = row[\"relname\"]\n model = row[\"model\"]\n embedding_col_name = row[\"attname\"]\n embedding_df = self._dataframe._db.create_dataframe(\n table_name=embedding_table_name, schema=schema\n )\n assert self._dataframe.unique_key is not None\n distance = gp.operator(\"<->\") # L2 distance is the default operator class in pgvector\n return self._dataframe.join(\n embedding_df.assign(\n distance=lambda t: distance(\n embedding_df[embedding_col_name], _generate_embedding(query, model)\n )\n ).order_by(\"distance\")[:top_k],\n how=\"inner\",\n on=self._dataframe.unique_key,\n self_columns={\"*\"},\n other_columns={},\n )",
"def embedd_data(training_data_text, e_arr, e_dict):\n num_samples = len(training_data_text)\n embedded = np.zeros([num_samples, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n for i in range(num_samples):\n review_mat = np.zeros([MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n # Iterate to either the end of the sentence of the max num of words, whichever is less\n for w in range(min(len(training_data_text[i]), MAX_WORDS_IN_REVIEW)):\n # assign embedding of that word or to the UNK token if that word isn't in the dict\n review_mat[w] = e_arr[e_dict.get(training_data_text[i][w], 0)]\n embedded[i] = review_mat\n return embedded",
"def text_features_df(spark):\n # Replaces formatted text that has already been processed\n FILLER = ''\n # Parser helper column\n COLNAME = 'processed_text'\n COL = col(COLNAME)\n \n # Data loading\n post_history_df = spark.read.parquet(\"/user/***REMOVED***/StackOverflow/PostHistory.parquet\") \\\n .select(['_PostId', '_Text', '_PostHistoryTypeId']) \\\n .filter(col('_PostHistoryTypeId') == 2) \\\n .drop('_PostHistoryTypeId')\n post_df = spark.read.parquet('/user/***REMOVED***/StackOverflow/Posts.parquet') \\\n .select(['_Id', '_PostTypeId']) \\\n .filter(col('_PostTypeId') == 1) \\\n .drop(\"_PostTypeId\")\n df = post_history_df.join(post_df, post_df['_Id'] == post_history_df['_PostId'])\n\n # Remove code snippets from the Markdown formatted text\n df = df.withColumn(COLNAME, regexp_replace(col('_Text'), regex.CODE_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.FENCED_CODE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.ESCAPE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_RE, FILLER))\n\n # Calculate features\n df = df.withColumn('#characters', length(COL)) \\\n .withColumn('#punctuation_characters', size(split(COL, r'[-\\[\\]{}()*+?.,\\\\^$|#]')) - 1) \\\n .withColumn('punctuation_ratio', col('#punctuation_characters') / col('#characters')) \\\n .withColumn('#lines', size(split(COL, r'\\n'))) \\\n .withColumn('average_line_length', col('#characters') / col('#lines')) \\\n .withColumn('#words', size(split(COL, r'\\s+'))) \\\n .withColumn('average_word_length', col('#characters') / col('#words'))\n\n # Remove unnecessary columns, including parser helper column\n df = df.drop('_Text', '_PostHistoryTypeId', '_PostId', COLNAME)\n return df",
"def tokenize_and_explode(args, df):\n df['token'] = df.word.apply(args.tokenizer.tokenize)\n df = df.explode('token', ignore_index=True)\n df['token2word'] = df['token'].apply(\n args.tokenizer.convert_tokens_to_string).str.strip().str.lower()\n df = convert_token_to_idx(df, args.tokenizer)\n df = check_token_is_root(args, df)\n df = add_glove_embeddings(df, dim=50)\n\n return df",
"def get_embeddings(self, data):\n raise NotImplementedError()",
"def process_glove_data(filename):\r\n\r\n word_list = []\r\n embed_list = []\r\n with open(filename,encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n toks = line.split(' ')\r\n word_list.append(toks[0])\r\n vec = [float(tok) for tok in toks[1:]]\r\n embed_list.append(vec)\r\n \r\n embed = np.array(embed_list,dtype=float)\r\n embed_df = pd.DataFrame(embed,index=word_list)\r\n embed_df.index = embed_df.index.str.lower()\r\n \r\n return embed_df",
"def preprocess(self, df, maxlen = 169):\n \n vocabs = self.tk.word_index.keys()\n \n df1 = self.treat_na(df)\n df2 = self.remove_punc_sw(df1)\n df3 = self.remove_numbers(df2)\n df4 = self.lemma_pos(df3)\n df5 = self.bigram(df4)\n df6 = self.combine_bigrams(df5)\n \n new_docs = []\n \n for word_list in df6:\n \n if len(word_list) == 2 and word_list[0].lower() == 'noinfo' and word_list[1].lower() == 'noinfo':\n new_docs.append(list(np.zeros(maxlen)))\n \n else:\n new_word_list = []\n for word in word_list:\n if word not in vocabs:\n word = 'UNKNOWN_TOKEN'\n new_word_list.append(word)\n \n sequence = \" \".join(new_word_list)\n vectors = self.tk.texts_to_sequences([sequence])\n padded_vectors = pad_sequences(vectors, maxlen=maxlen, padding='post', truncating='post')\n \n new_docs.append(list(padded_vectors[0]))\n \n return new_docs",
"def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df",
"def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs",
"def vectorize_text(df: pd.DataFrame):\n # Creating a stop_words list set that are common to many questions.\n common_phrases = [\n 'read the sentence from the passage',\n 'which of the following best describes',\n 'which is the best one sentence * for the section',\n 'which sentence from the passage provides the most evidence'\n 'select the sentence that does not support the central idea of the article',\n 'supports the main idea',\n 'select the paragraph from the section that explains how that shows the ',\n 'that is most relevant to be included in the summary of the article',\n 'according to the article',\n 'which of these is not one',\n ]\n stop_words = stopwords.words('english')\n [stop_words.extend(x.split()) for x in common_phrases]\n\n ct_vectorizer = CountVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n dtype='uint8')\n\n tfidf_vectorizer = TfidfVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n sublinear_tf=True, # Replace tf with 1 + log(tf).\n smooth_idf=True, # Default 1 doc for each term.\n dtype=np.float32)\n\n # Count & tf-idf vectorization learns vocab and transforms data into matrices.\n ct_vec = ct_vectorizer.fit_transform(np.array(df.text))\n tfidf = tfidf_vectorizer.fit_transform(np.array(df.text))\n # print(\"Shape of ct_vec:\", ct_vec.shape)\n # print('Size of ct_vec:', sys.getsizeof(ct_vec))\n # print(\"Shape of tfidf:\", tfidf.shape)\n # print('Size of tfidf:', sys.getsizeof(tfidf), '\\n')\n\n ct_names = ct_vectorizer.get_feature_names()\n tf_names = tfidf_vectorizer.get_feature_names()\n\n df_cv = pd.concat(\n [df, pd.DataFrame(ct_vec.toarray(), columns=ct_names)],\n axis=1)\n df_tfidf = pd.concat(\n [df, pd.DataFrame(tfidf.toarray(), columns=tf_names)],\n axis=1)\n\n return (\n df_cv,\n ct_vec,\n ct_names,\n df_tfidf,\n tfidf,\n tf_names\n )",
"def get_embeddings() -> tuple:\n # Initialize the model loading Universal Sentense Encoder\n # into a KerasLayer from Kaggle dataset file\n model = tf.keras.Sequential(\n [KerasLayer(encoder_path, input_shape=[], dtype=tf.string,\n output_shape=[512], trainable=False),\n # tf.keras.layers.Layer(512, dtype=tf.float16) # To reduce memory footprint\n ]\n )\n\n train_emb = model.predict(data_train['text'])\n print('Train texts converted into embeddings. Shape:', train_emb.shape)\n\n test_emb = model.predict(data_test['text'])\n print('Test texts converted into embeddings. Shape:', test_emb.shape)\n\n return train_emb, test_emb",
"def get_pretrained_embeddings(source_vocab,embed_df):\r\n \r\n num_tokens = len(source_vocab)\r\n embedding_dim = embed_df.shape[1]\r\n weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32)\r\n \r\n for idx in range(num_tokens):\r\n token = source_vocab.lookup_index(idx)\r\n if token in embed_df.index:\r\n weights[idx,:] = embed_df.loc[token]\r\n else:\r\n weights[idx,:] = np.random.randn(1,embedding_dim)\r\n \r\n embed_tensor = torch.FloatTensor(weights)\r\n return embed_tensor",
"def create_embedding_matrix(self):\n self.id2word = dict([(self.vocab[word]['id'], word) for word in self.vocab])\n vocab_size = len(self.vocab)\n result = np.zeros((vocab_size, self.embed_dim))\n unknown_token_set = set()\n\n found_words = 0\n avg = np.zeros(self.embed_dim)\n for _ in range(1, vocab_size): # skip PAD embedding (initialize as zero embedding)\n try:\n result[_] = self.pretrained_embedding[self.id2word[_]]\n avg += result[_]\n found_words += 1\n except:\n unknown_token_set.add(_)\n\n avg /= found_words\n for _ in unknown_token_set:\n result[_] = avg\n self.embedding = result\n return found_words, len(self.id2word)",
"def get_embeddings_from_raw_image(recognizer, frame):\n # detect faces, original frame and face boxes\n a, boxes = detector.find_face(frame, recognizer.ort_session, recognizer.input_name)\n\n\n # align facesq\n aligned_faces = []\n boxes[boxes < 0] = 0\n for i in range(boxes.shape[0]):\n box = boxes[i, :]\n f = np.copy(frame)\n y = align_face(recognizer, f, box)\n aligned_faces.append(y)\n\n embs = []\n for i in range(boxes.shape[0]):\n aligned_face = aligned_faces[i]\n embs.append(face_embedding(recognizer, aligned_face))\n\n return embs",
"def ETL(df, col_name= 'headline', class_col_name='category', tok_col_name='tok'):\n # Primer paso: Tokenizacion, stopwords y stemming\n df_tok_clean = tokenization_stopwords_stemming(df, col_name=col_name, tok_col_name=tok_col_name)\n \n # Segundo paso: Crear texto liquido del dataframe limpio\n df_liquid_text = to_liquid_text(df_tok_clean, class_col_name=class_col_name, tok_col_name=tok_col_name)\n \n return df_tok_clean,df_liquid_text",
"def _calculate_similarities(self) -> pd.DataFrame:\n\n df_encoded_articles = self._db_connection.get_dataframe(\n table_name='tfidf_representation',\n schema='encoded_articles'\n ).set_index('id')\n\n # Pandas loads the array column 'encoded' as a string e.g. \"[0.0, 0.6, 0.8]\" which needs translating to an array\n encoded_representations = np.array(df_encoded_articles['encoded'].tolist())\n\n return pd.DataFrame(\n index=df_encoded_articles.index,\n columns=df_encoded_articles.index,\n data=pairwise.cosine_similarity(encoded_representations)\n )",
"def _embedding_func(self, text: str, *, engine: str) -> List[float]:\n # handle large input text\n if self.embedding_ctx_length > 0:\n return self._get_len_safe_embeddings([text], engine=engine)[0]\n else:\n # replace newlines, which can negatively affect performance.\n text = text.replace(\"\\n\", \" \")\n return embed_with_retry(self, input=[text], engine=engine)[\"data\"][0][\n \"embedding\"\n ]",
"def predict(self, df):\n results = [] \n _ds = pdfds.DataFrameDataset(df, self.fields) \n _iter = BucketIterator(_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=False, sort=True, sort_within_batch=True)\n self.odel.eval()\n with torch.no_grad():\n for (labels, text), _ in _iter:\n labels = labels.type(torch.LongTensor)\n text = text.type(torch.LongTensor)\n _, output = self.model(text, labels)\n sm = torch.nn.Softmax(dim=1)\n results.extend( sm(output).tolist()[1] )\n return results",
"def embedding(self, seqs):\n batch_size, seqlen = seqs.shape\n seqs = np.reshape(seqs, (-1)) # convert to 1-d indexes [(batch_sz*seqlen)]\n embs = self.word2vec[seqs] # lookup [(batch_sz*seqlen) x emb_sz]\n embs = np.reshape(embs, (batch_size, seqlen, -1)) # recover the shape [batch_sz x seqlen x emb_sz]\n return embs",
"def apply(self, df):\n encoded = []\n for feature_name, encoder in zip(self.feature_names, self.encoders):\n column = df[feature_name].to_numpy().reshape(-1, 1)\n encoded.append(pd.DataFrame(\n encoder.transform(column).todense(),\n index=df.index,\n columns=encoder.categories_[0]\n ))\n df = df.drop(columns=self.feature_names)\n df = pd.concat((df, *encoded), axis=1)\n return df",
"def merge_embedding_with_GO_labels(emb_df, GO_df):\n # get df with gene_symbols and entrez_ids from fetal data (more updated than adult probes data)\n all_genes = pd.read_csv(\n './data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv')\n all_genes = all_genes[~((all_genes.gene_symbol.str.startswith('A_')) | (\n all_genes.gene_symbol.str.startswith('CUST_')))].gene_symbol.drop_duplicates()\n all_genes_w_entrez = utils.genesymbols_2_entrezids(all_genes)\n\n emb_df = emb_df.add_prefix('emb_')\n df = emb_df.merge(all_genes_w_entrez, left_index=True,\n right_on='gene_symbol')\n df = df.merge(GO_df, left_on='entrez_id', right_index=True)\n\n return df.set_index(['entrez_id', 'gene_symbol'])",
"def get_data(path: str) -> pd.DataFrame:\n df = pd.read_csv(path)\n\n # Cleaned text (lowercase, punctuation removed)\n df['text'] = df['excerpt'].apply(preprocess_text)\n\n # Features based on text excerpts\n df['n_chars'] = df['text'].apply(len) # Total number of characters without punctuation\n df['n_words'] = df['text'].apply(lambda s: len(s.split(' '))) # Total number of words\n # Number of sentences, punctuation signs, quotes and vowels\n tmp = df['excerpt'].apply(process_characters)\n df[['n_sent', 'punct_signs', 'quotes', 'n_vowels', 'n_digits']] = tmp.apply(pd.Series)\n df['quotes_per_sent'] = df['quotes'] / df['n_sent'] # Average number of quotes per sentence\n df['signs_per_sent'] = df['punct_signs'] / df['n_sent'] # Average number of signs per sentence\n df['words_per_sent'] = df['n_words'] / df['n_sent'] # Average number of words per sentence\n df['chars_per_sent'] = df['n_chars'] / df['n_sent'] # Average number of characters per sentence\n df['chars_per_word'] = df['n_chars'] / df['n_words']\n df['vowels_per_word'] = df['n_vowels'] / df['n_words'] # Average number of vowels per word\n df['vowels_ratio'] = df['n_vowels'] / df['n_chars'] # Number of vowels to the total number of characters\n df['n_unique'] = df['text'].apply(lambda x: set(x).__len__()) # Number of unique words\n df['unique_ratio'] = df['n_unique'] / df['n_words'] # Number of unique words to total n_words\n # Sounds and combinations difficult to pronounce\n tmp = df['text'].apply(difficult_sounds)\n df[['vowels_comb', 'consonants_comb', 'ths']] = tmp.apply(pd.Series)\n df['difficult'] = df[['vowels_comb', 'consonants_comb', 'ths']].sum(axis=1)\n\n return df"
] | [
"0.60098046",
"0.58753",
"0.5867118",
"0.5841953",
"0.5804004",
"0.57337356",
"0.5729218",
"0.57054985",
"0.5680929",
"0.563684",
"0.5633486",
"0.56218636",
"0.560861",
"0.55656105",
"0.5543683",
"0.55378395",
"0.55248564",
"0.54824895",
"0.5471733",
"0.5426145",
"0.54256785",
"0.540588",
"0.53964484",
"0.5394374",
"0.53939235",
"0.5387856",
"0.5375031",
"0.5363384",
"0.5350194",
"0.53394663"
] | 0.6808147 | 0 |
Uses the unicode of an input kanji to find the corresponding stroke order gif in mistval's collection | def get_gif_uri(kanji):
fileName = kanji.encode("unicode-escape").decode("utf-8").replace("\\u", '') + '.gif'
animationUri = f'https://raw.githubusercontent.com/mistval/kanji_images/master/gifs/{fileName}'
return animationUri | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def singleglyph(x):\n return [glyph(x)]",
"def lcd_string_kana(self, message, line):\n codes = u'線線線線線線線線線線線線線線線線 '\\\n u' !\"#$%&()*+,-./0123456789:;<=>?@ABCDEFG'\\\n u'HIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{'\\\n u'|}→← '\\\n u' 。「」、・ヲァィゥェォャュョッーア'\\\n u'イウエオカキクケコサシスセソタチツテトナニヌネノハヒ'\\\n u'フヘホマミムメモヤユヨラリルレロワン゛゜αäβεμσρq√陰ι'\\\n u'×¢£nöpqθ∞ΩüΣπxν千万円÷ 塗'\n dic ={u'ガ':u'カ゛',u'ギ':u'キ゛',u'グ':u'ク゛',\\\n u'ゲ':u'ケ゛',u'ゴ':u'コ゛',u'ザ':u'サ゛',\\\n u'ジ':u'シ゛',u'ズ':u'ス゛',u'ゼ':u'セ゛',\\\n u'ゾ':u'ソ゛',u'ダ':u'タ゛',u'ヂ':u'チ゛',\\\n u'ヅ':u'ツ゛',u'デ':u'テ゛',u'ド':u'ト゛',\\\n u'バ':u'ハ゛',u'ビ':u'ヒ゛',u'ブ':u'フ゛',\\\n u'ベ':u'ヘ゛',u'ボ':u'ホ゛',u'パ':u'ハ゜',\\\n u'ピ':u'ヒ゜',u'プ':u'フ゜',u'ペ':u'ヘ゜',\\\n u'ポ':u'ホ゜',u'℃':u'゜C'}\n\n self.lcd_byte(line, LCD_CMD)\n message2 = ''\n for i in range(len(message)):\n if (message[i] in dic.keys()):\n message2 += dic[message[i]]\n else:\n message2 += message[i]\n\n for i in range(len(message2)):\n if message2[i] == ' ':\n self.lcd_byte(ord(message2[i]), LCD_CHR)\n elif (codes.find(message2[i]) >= 0):\n self.lcd_byte(codes.find(message2[i]) + 1, LCD_CHR)\n elif (codes_han.find(message2[i]) >= 0):\n self.lcd_byte(codes.find(message2[i]) + 1, LCD_CHR)\n elif (message2[i] != u' '):\n self.lcd_byte('?', LCD_CHR)",
"def gk_g_checker(self, seq):\n seq = re.sub(r'гк', r'хк', seq)\n return seq",
"def db_kanji(selection=None, level=None, kanji=None):\n\n if request.method == 'POST':\n selection = request.form.split(' ')[0]\n level = request.form.split(' ')[1]\n kanji = request.form.split(' ')[2]\n\n kanji_query = \"\"\"SELECT kanj, von, vkun, transl, roma FROM info WHERE kanj = '%s'\"\"\" % kanji\n res_string = get_results(kanji_query)\n\n list_query = 'SELECT kanj FROM info WHERE %s = %s' % (selection, level)\n kanji_string = get_results(list_query)\n\n kanji_list = [l for l in kanji_string]\n current_pos = kanji_list.index(res_string[0])\n\n try:\n next_kanji = kanji_list[current_pos + 1]\n except IndexError:\n next_kanji = kanji_list[0]\n\n try:\n prev_kanji = kanji_list[current_pos - 1]\n except IndexError:\n prev_kanji = kanji_list[-1]\n\n return render_template('flashcard.html',\n res_string=res_string,\n k_list=kanji_list,\n u_level=level,\n u_selection=selection,\n c_kanji=kanji,\n p_kanji=prev_kanji,\n n_kanji=next_kanji\n )",
"def ascii2glyph(ascii_glyph):\n\n result = []\n for line in ascii_glyph:\n b = 0x00\n for char in line:\n b = b << 1\n if char in INK_CHARS:\n b |= 0x01\n\n result.append(b)\n return result",
"def get_cruftmoji(percentage):\n level = [\n # Master\n (\"\\xf0\\x9f\\x99\\x8f \\xf0\\x9f\\x8d\\xbb \\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x91\\xbe \"\n \"\\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x8d\\xbb \\xf0\\x9f\\x99\\x8f\"),\n # Snakes on a Plane\n \"\\xf0\\x9f\\x90\\x8d \\xf0\\x9f\\x90\\x8d \\xe2\\x9c\\x88\\xef\\xb8\\x8f\",\n # Furry Hat Pizza Party\n \"\\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x92\\x82 \\xf0\\x9f\\x8d\\x95\",\n \"\\xf0\\x9f\\x91\\xbb\", # Ghost\n \"\\xf0\\x9f\\x92\\xa3\", # The Bomb\n \"\\xf0\\x9f\\x90\\xa9 \\xf0\\x9f\\x92\\xa8\", # Poodle Fart\n \"\\xf0\\x9f\\x92\\x80\", # Skull\n \"\\xf0\\x9f\\x93\\xbc\", # VHS Cassette\n \"\\xf0\\x9f\\x8c\\xb5\", # Cactus\n \"\\xf0\\x9f\\x92\\xa9\", # Smiling Poo\n \"\\xf0\\x9f\\x92\\xa9 \" * 3] # Smiling Poo (For 100%)\n return level[int(percentage * 10)].decode(\"utf-8\")",
"def get_kanji_data(self, kanji):\n \n print \"Fetching data for Kanji \" + str(kanji)\n \n base_url = \"http://kanji.koohii.com/study/kanji/\"\n response = self.br.open(base_url + str(kanji))\n soup = BeautifulSoup(response.get_data(), \"html.parser\")\n\n # Extract the Kanji\n kanji = soup.find(\"span\", {\"class\":\"cj-k\"}).contents[0]\n\n # Extract Keyword\n keyword = soup.find(\"span\", {\"class\":\"JSEditKeyword\"}).contents[0]\n\n # Extract On Reading\n on = soup.findAll(\"span\",{\"class\":\"cj-k\"})\n try:\n on = on[1].contents[0]\n except IndexError:\n on = \"None\"\n\n # Extract Story\n story = soup.find(\"div\",{\"id\":\"sv-textarea\"})\n\n return [kanji, keyword, on, story]",
"def search_kanjis_in_words(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input or not japanese_helpers.is_word_japanese(_input) : return None\n kanjis_in_word = japanese_helpers.list_kanjis(input)\n kanjis_in_db = [kanji for kanji in kanjis_in_word \n if j.check_kanjis_existence(kanji)]\n diff = set(kanjis_in_word) - set(kanjis_in_db)\n if diff : \n log.error('following kanjis not found in DB : ')\n for kanji in diff : \n log.error(kanji)\n return kanjis_in_db",
"def kata2hira(kata):\n hira = [ hiragana_value(x) for x in kata.decode(\"utf-8\") ]\n return \"\".join(hira).encode(\"utf-8\")",
"def hira2kata(hira):\n kata = [ katakana_value(x) for x in hira.decode(\"utf-8\") ]\n return \"\".join(kata).encode(\"utf-8\")",
"def uCSIsKatakana(code):\n ret = libxml2mod.xmlUCSIsKatakana(code)\n return ret",
"def bb_flag(hit):\n flagcode = hit.group(1)\n flag = flagcode.lower().encode('ascii', 'ignore')\n \n if os.path.isfile(os.path.join(settings.DOCUMENT_ROOT, \"flags\", \"%s.png\" % flag)):\n return \"<img src='%sflags/%s.png' class='countryflag' alt='flag' title='%s' />\" % (settings.MEDIA_URL, flag, flag)\n \n # No flag image found, so default to Necta flag\n return \"<img src='%sflags/nectaflag.png' class='countryflag' title='flag' />\" % (settings.MEDIA_URL)",
"def findPronunciation(data: str, word: str) -> str:\n # 2 cases\n # if kotoba, then pronunciation will start with a ?\n # if kanji, then multiple pronunciation starting from 〗\n # for kotoba\n # showMessage(len(word))\n\n if len(word) != 1:\n string = '?'\n\n else:\n # locate 〗\n start = find(data, '】')\n # get the first index\n start = min(start)\n\n # now check to see if string after 〗is (\n if data[start+1] == \"(\":\n string = \")\"\n else:\n string = \"】\"\n\n\n # find the indices for start and end\n indStart = find(data, string)\n indEnd = find(data, '\\n')\n # we can assume the first index is the correct index for indStart\n indStart = indStart[0]\n # now find the closest index that is larger than indStart\n possibleIndEnd = [ind for ind in indEnd if ind > indStart]\n absolute_difference_function = lambda list_value: abs(list_value - indStart)\n indEnd = min(possibleIndEnd, key=absolute_difference_function)\n\n # get pronunciation\n pronunciation = data[indStart+1:indEnd]\n\n # lastly, get rid of spaces\n pronunciation = pronunciation.replace(\" \", \"\")\n\n # if kanji, we need to separate into 2 sections\n if len(word) == 1:\n # for onyomi\n # find all strings that are katakana\n regex = {\"from\": ord(u\"\\u30a0\"), \"to\": ord(u\"\\u30ff\")}\n kata = [regex[\"from\"] <= ord(pronunciation[i]) <= regex[\"to\"] for i in range(len(pronunciation))]\n # find all the places that are listed as TRUE\n indOn = find(kata, True)\n # now find min and max of the indices\n indStart = min(indOn)\n indEnd = max(indOn)\n onyomi = pronunciation[indStart:indEnd+1]\n # lastly, replace any ・ with ,\n onyomi.replace('・', '、')\n\n # for kunyomi\n # find all strings that are hiragana\n regex = {'from': ord(u'\\u3040'), 'to': ord(u'\\u309f')}\n hira = [regex[\"from\"] <= ord(pronunciation[i]) <= regex[\"to\"] for i in range(len(pronunciation))]\n # find all the places that are listed as TRUE\n indHi = find(hira, True)\n # now find min and max of the indices\n indStart = min(indHi)\n indEnd = max(indHi)\n kunyomi = pronunciation[indStart:indEnd+1]\n # lastly, replace any ・ with ,\n kunyomi.replace('・', '、')\n\n # lastly combine the 2 strings\n pronunciation = f\"[音] {onyomi}\\n[訓] {kunyomi}\"\n\n\n return pronunciation",
"def weather_icon(weather_category):\n if 200 <= weather_category <= 299:\n weather_category = 200\n elif 300 <= weather_category <= 399:\n weather_category = 300\n elif 500 <= weather_category <= 599:\n weather_category = 500\n elif 600 <= weather_category <= 699:\n weather_category = 600\n elif 700 <= weather_category <= 799:\n weather_category = 700\n\n icons = {\n 200: b'\\xe2\\x9b\\x88', # Thunderstorm\n 300: b'\\xf0\\x9f\\x8c\\xa6', # Drizzle\n 500: b'\\xf0\\x9f\\x8c\\xa7', # Rain\n 600: b'\\xf0\\x9f\\x8c\\xa8', # Snow\n 700: b'\\xf0\\x9f\\x92\\xa8', # Atmosphere (Smoke, Haze, Fog, etc.)\n 800: b'\\xe2\\x98\\x80\\xef\\xb8\\x8f', # Clear\n 801: b'\\xe2\\x9b\\x85', # Few Clouds\n 802: b'\\xe2\\x9b\\x85\\xef\\xb8\\x8f', # Scattered Clouds\n 803: b'\\xe2\\x98\\x81\\xef\\xb8\\x8f', # Broken Clouds\n 804: b'\\xe2\\x98\\x81\\xef\\xb8\\x8f', # Overcast Clouds\n }\n\n value = icons.get(weather_category).decode('utf-8')\n\n return value",
"def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)",
"def jpg_jump(file_map):\r\n match_list = []\r\n jpg_header = re.compile(b'(?s)(\\xff\\xd8\\xff\\xe0|\\xff\\xd8\\xff\\xe1)')\r\n sof = re.compile(b'(?s)(\\xff\\xc0|\\xff\\xc2)')\r\n sos = re.compile(b'(?s)\\xff\\xda')\r\n jpg_footer = re.compile(b'(?s)\\xff\\xd9')\r\n for match in jpg_header.finditer(file_map):\r\n end_header = match.end()\r\n end_footer = jpg_footer.search(file_map, end_header).end()\r\n start_sof = sof.search(file_map, end_header, end_footer).start()\r\n end_sos_pointer = sos.search(file_map, start_sof, end_footer).end()\r\n number_colors_components = int.from_bytes((file_map[end_sos_pointer+2:end_sos_pointer+3]), byteorder='little')\r\n start_sos_data = end_sos_pointer + 3 + (number_colors_components * 2)\r\n pattern_start_spot = start_sos_data + 5\r\n data = file_map[pattern_start_spot:end_footer]\r\n jump_size = pattern_id(data)\r\n prefix = file_map[start_sof:pattern_start_spot].hex()\r\n unique_bytes = file_map[pattern_start_spot + jump_size: pattern_start_spot + jump_size + 84].hex()\r\n if jump_size == 0:\r\n match_list.append(prefix + unique_bytes)\r\n else:\r\n jump = \" [ {} ] \".format(jump_size)\r\n match_list.append(prefix + jump + unique_bytes)\r\n return match_list",
"def testUnicodeValue(glyph):\n font = wrapFont(glyph.font)\n layer = font.getLayer(glyph.layer.name)\n glyph = layer[glyph.name]\n report = []\n uni = glyph.unicode\n name = glyph.name\n # test for uniXXXX name\n m = uniNamePattern.match(name)\n if m is not None:\n uniFromName = m.group(1)\n uniFromName = int(uniFromName, 16)\n if uni != uniFromName:\n report.append(\"The Unicode value for this glyph does not match its name.\")\n # test against AGLFN\n else:\n expectedUni = AGL2UV.get(name)\n if expectedUni != uni:\n report.append(\"The Unicode value for this glyph may not be correct.\")\n # look for duplicates\n if uni is not None:\n duplicates = []\n for name in sorted(font.keys()):\n if name == glyph.name:\n continue\n other = font[name]\n if other.unicode == uni:\n duplicates.append(name)\n if duplicates:\n report.append(\"The Unicode for this glyph is also used by: %s.\" % \" \".join(duplicates))\n return report",
"def _translate(self):\r\n\r\n for place, pseudo_binary in self.letters.items():\r\n for letter in self.alphabet:\r\n\r\n with open(os.path.join(self.training_data_folder, letter + '.json'), 'r', encoding = 'utf-8') as js:\r\n data = json.loads(js.read())\r\n\r\n if pseudo_binary in data:\r\n self.result[place] = letter\r\n break\r\n\r\n else:\r\n self.result[place] = '-'\r\n\r\n if not self.devmode:\r\n return 'Not solved'\r\n\r\n return ''.join(self.result.values())",
"def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )",
"def KingWen(font, hexid):\n\n ret = []\n ch = 0x4DC0 + hexid - 1\n exec 'char = u\\'\\u%4x\\'' % ch\n lines = GetFontPixels(font, char, 'R', ' ')\n ret.append(' ' * 8)\n for line in lines[1::2]:\n hex_chars = ''.join(line[1:-1])\n ret.append(hex_chars[1:-1])\n ret.append(' ' * 8)\n return ret",
"def uCSIsKatakanaPhoneticExtensions(code):\n ret = libxml2mod.xmlUCSIsKatakanaPhoneticExtensions(code)\n return ret",
"def flag(value):\n flag = value.lower().encode('ascii', 'ignore')\n if os.path.isfile(os.path.join(settings.DOCUMENT_ROOT, \"flags\", \"%s.png\" % flag)):\n return \"<img src='%sflags/%s.png' class='countryflag' alt='flag' title='%s' />\" % (settings.MEDIA_URL, flag, flag)\n \n # No flag image found, return the Necta flag hehe\n return \"<img src='%sflags/nectaflag.png' class='countryflag' alt='flag' />\" % (settings.MEDIA_URL)",
"def handle_emoji_extraction(\n emoji: dict, first_alias: str, path: str, force: bool, real_names: bool\n):\n\n # Extract emoji Unicode value, and format it as an hexadecimal string.\n code = \"\".join(format(ord(char), \"x\") for char in emoji[\"emoji\"])\n\n # Some emojis contain a \"variation selector\" at the end of their Unicode value.\n # VS-15 : U+FE0E || VS-16 : U+FE0F\n code = re.sub(r\"fe0[ef]$\", \"\", code, re.IGNORECASE)\n\n # For \"shrugging\" emojis only (`1f937-*`), we have to replace `200d` by a real hyphen.\n code = re.sub(r\"^(1f937)(?:200d)(.*)$\", r\"\\1-\\2\", code, re.IGNORECASE)\n\n # For \"flags\" emojis only (`1f1??1f1??`), we have to add an extra hyphen...\n code = re.sub(r\"^(1f1)(..)(1f1)(..)$\", r\"\\1\\2-\\3\\4\", code, re.IGNORECASE)\n\n logging.info(\"Inferred %s Unicode value for %s\", code, first_alias)\n\n return download_file(\n url=GITHUB_ASSETS_BASE_URL.format(\"unicode/\" + code),\n path=os.path.join(path, \"unicode\"),\n force=force,\n real_name=(first_alias if real_names else None),\n )",
"def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img",
"def get_icons(delta, delta_delta):\n\n if delta > 0:\n if delta_delta > 0:\n return ('🔴', '⬆️')\n \n if delta_delta == 0:\n return ('🔴', '➡️')\n \n if delta_delta < 0:\n return ('🔴', '↗️')\n\n\n if delta <= 0:\n if delta_delta > 0:\n return ('🟢', '↘️')\n\n if delta_delta == 0:\n return ('🟢', '➡️')\n \n if delta_delta < 0:\n return ('🟢', '⬇️')",
"def __init__(self, height=70, helper=None, limit_per_char=3000, verbose=2,\n allowed_chars=None, is_binary=False, augmentor=None,\n deterministic=True):\n\n\n super(FFGKatakanaGenerator, self).__init__(\n height=height, helper=helper, allowed_chars=allowed_chars,\n is_binary=is_binary, verbose=verbose)\n\n self.fonts = []\n\n # image configuration\n self.limit_per_char = limit_per_char\n self.char_2_imgs = {}\n self.char_choosen = {} # array of index was choosen with this char\n\n self.background_value = 1 if is_binary else 255\n\n self.interpolation = (\n cv2.INTER_NEAREST if self.is_binary else\n cv2.INTER_CUBIC)\n self.configKata = configAugment.getKanakataConfig()\n\n # utility\n # self.augment = (\n # augmentor(is_binary=is_binary) if augmentor is not None\n # else HandwritingMildAugment(is_binary=is_binary))\n self.helper = FFG_Helper() if helper is None else helper\n self.deterministic = deterministic",
"def errors_icons(self):\n msg_errors_lifes = ''\n for i in range(0,5):\n if self.letters_wrong <= i:\n msg_errors_lifes += ' ♥ '\n else:\n msg_errors_lifes += ' ☠ ' \n return msg_errors_lifes",
"def glyph(x):\n assert isinstance(x, str)\n return [x]",
"def provisional(self, positive=False, kanji=False):\n if positive:\n verb = self.kanji if kanji else self.kana\n for before, after in Verb.ENDING_U_TO_E.items():\n if verb.endswith(before):\n base = re.sub(before + '$', after, verb)\n return base + 'ば'\n else:\n verb = self.present_indicative(polite=False, positive=False, kanji=kanji)\n return re.sub('い$', 'ければ', verb)",
"def labels_to_cityscapes_palette(image):\n classes=ZHANG_classes \n result =np.zeros((img.shape[0], img.shape[1], 3),dtype=np.uint8)\n for key, value in classes.items():\n result[np.where(img == key)] = value\n return result"
] | [
"0.5736218",
"0.55576307",
"0.5534901",
"0.54400593",
"0.53849167",
"0.53792924",
"0.53267854",
"0.53198653",
"0.529424",
"0.526523",
"0.5261577",
"0.52349937",
"0.5214417",
"0.5189843",
"0.5167919",
"0.5137147",
"0.5071565",
"0.5069392",
"0.49859008",
"0.4958913",
"0.49549603",
"0.4925367",
"0.4923396",
"0.49097154",
"0.490488",
"0.4902222",
"0.48894143",
"0.4886764",
"0.4885843",
"0.4881593"
] | 0.6089763 | 0 |
Takes a word and returns true if there are hiragana or katakana present within the word | def contains_kana(word):
for k in word:
if k in hiragana or k in katakana or k in small_characters:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_japanese(x):\n pattern = re.compile(r'[\\p{IsHira}\\p{IsKatakana}]', re.UNICODE)\n ret = bool(pattern.search(x))\n return ret",
"def isValid(text):\n\n\n return any(word in text for word in [u\"我好看么\", u\"称赞\"])",
"def is_british_english_term(word: str) -> bool:\n word = process_word(word)\n return word in BRITISH_ENGLISH_ONLY_TERMS",
"def is_isogram(word):\n word = [char.lower() for char in word if char.isalpha()]\n for char in word:\n if word.count(char) > 1:\n return False\n return True",
"def is_word(trie, string: str) -> bool:\n return any(w == string for w in trie)",
"def is_hindi(word):\r\n\twordlist = []\r\n\twith open(\"HINDI_DICT.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\tline = re.sub(r'[^A-Za-z.;]','',line)\r\n\t\t\tline = line.lower()\r\n\t\t\tlist1 = line.split(\";\")\r\n\t\t\tfor element in list1:\r\n\t\t\t\tif element != '':\r\n\t\t\t\t\twordlist.append(element)\r\n\tif word in list(wordlist):\r\n\t\treturn 1\r\n\treturn 0",
"def has_word(self, word)->bool:\n if len(word) == 1:\n chars = word + GNode.CHAR_EOW\n else:\n chars = word[0] + GNode.CHAR_REV + word[1:] + GNode.CHAR_EOW\n cursor = self.root\n for c in chars.lower():\n if c not in cursor.children:\n return False\n else:\n cursor = cursor.children[c]\n return True",
"def search(self, word: str) -> bool:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return self.end_of_word in curr_chars",
"def word_check(word):\n word1 = word[1:]\n if word1 not in word_dict: return False\n if not homophones (word, word1): return False\n \n \n word2 = word[0] + word[2:]\n if word2 not in word_dict: return False\n if not homophones(word, word2): return False\n\n return True",
"def is_american_english_term(word: str) -> bool:\n word = process_word(word)\n return word in AMERICAN_ENGLISH_ONLY_TERMS",
"def is_word_character(ch):\n if (ch >= 'a' and ch <= 'z'): return True\n if (ch >= 'A' and ch <= 'Z'): return True\n if (ch >= '0' and ch <= '9'): return True\n if (ch >= 'À' and ch < 'ˀ'): return True\n if (ch == '-' or ch == '0xAD'): return True # hyphen or soft hyphen\n if (ch >= 'Ά' and ch <= 'ԓ'): return True\n return False",
"def search(self, word):\n pointer = self.tries\n for i in range(len(word)):\n ascii = ord(word[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n if word in pointer[26:]:\n return True\n else:\n return False",
"def is_a_word(self, word):\n word = word.lower()\n if word in self.data:\n return True\n else:\n # for char in word:\n # if char.isnumeric():\n # return True\n word = list(word)\n numbers = len([x for x in word if x.isnumeric()])\n # # letters = len([x for x in word if x.isalpha()])\n if numbers >= 2 or numbers/len(word) > 0.4:\n return True\n return False",
"def isUnicodeEmoji(c : str) -> bool:\n return c in UNICODE_EMOJI",
"def is_abecedarian(word):\n pass",
"def _check_pauli_word(pauli_word):\n return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in set(pauli_word))",
"def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False",
"def is_word(self, word):\r\n\r\n return self.data(word) is not None",
"def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False",
"def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False",
"def check_word(word):\n\n return bool(re.match(r'^[a-z]+$', word))",
"def _words_in_text(word, text):\n\n regexword = \"\\\\b\" + word + \"\\\\b\"\n\n return True if re.search(regexword, text, re.IGNORECASE) else False",
"def word_finder(word, text):\r\n word = word.lower()\r\n text = str(text).lower()\r\n match = re.search(word, text)\r\n if match:\r\n return True\r\n return False",
"def avoids (word, frbdn_letters):\n for letter in frbdn_letters:\n if letter in word: \n return False\n return True",
"def is_word(string):\n\n # Use a regular expression to find the correct pattern. \n matches = re.match(r'^[a-zA-Z0-9]+$', string)\n\n # Is there a match?\n if matches:\n return True\n else:\n return False",
"def isHangulSyllable(ch):\n #Java's Regex had an easy way to do this. No Python equivalent.\n #Had to invent this\n\n if len(ch) != 1: #if it isn't 1 character long\n return None #it's not true nor false\n\n #convert to an ordinal number\n chOrd = ord(ch)\n\n if (chOrd < HAN_SYL_START) or (chOrd > HAN_SYL_END):\n #if the character is outside the range for the\n #Hangul Syllables Block as defined here:\n # http://www.unicode.org/charts/PDF/UAC00.pdf\n return False\n else:\n #It must be inside the block\n return True",
"def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True",
"def isWord(word, dictionary):\n return word in dictionary",
"def has_letter(word):\r\n for char in word:\r\n if char.isalpha():\r\n return True\r\n return False",
"def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary"
] | [
"0.6930642",
"0.68723994",
"0.66538167",
"0.6580591",
"0.6552416",
"0.65398663",
"0.64157027",
"0.6405542",
"0.6381708",
"0.6371139",
"0.6359613",
"0.63581526",
"0.6336159",
"0.6310224",
"0.63045526",
"0.6291132",
"0.628089",
"0.6279967",
"0.6273224",
"0.6266891",
"0.625361",
"0.6253354",
"0.62530434",
"0.6250418",
"0.6227829",
"0.6217459",
"0.6210325",
"0.61963725",
"0.6179664",
"0.61778885"
] | 0.87406576 | 0 |
Directly use Jisho's official API to get info on a phrase (can be multiple characters) | def search_for_phrase(self, phrase):
uri = uriForPhraseSearch(phrase)
return json.loads(requests.get(uri).content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def jisho(self, ctx, word: str):\r\n search_args = await self.dict_search_args_parse(ctx, word.lower())\r\n if not search_args:\r\n return\r\n limit, query = search_args\r\n message = urllib.parse.quote(query, encoding='utf-8')\r\n url = \"http://jisho.org/api/v1/search/words?keyword=\" + message\r\n async with self.session.get(url) as response:\r\n data = await response.json()\r\n try:\r\n messages = [self.parse_data(result) for result in data[\"data\"][:limit]]\r\n except KeyError:\r\n return await ctx.send(\"I was unable to retrieve any data\")\r\n try:\r\n await ctx.send('\\n'.join(messages))\r\n except discord.HTTPException:\r\n await ctx.send(\"No data for that word.\")",
"def newPhraseInfo(phrase):\n return {\"count\":0,\n \"ids\":set(),\n \"phrase\":phrase\n }",
"async def jisho(self, ctx, *, text):\n await ctx.message.delete()\n await hf.safe_send(ctx,\n f\"Try finding the meaning to the word you're looking for here: https://jisho.org/search/{text}\")",
"async def pony(self, *text):\n if len(text) > 0:\n if len(text[0]) > 1 and len(text[0]) < 20:\n try:\n msg = \"+\".join(text)\n search = \"https://derpiboo.ru/search.json?q=\" + msg\n async with aiohttp.get(search) as r:\n result = await r.json()\n if result[\"search\"] != []:\n url = \"http:\" + result[\"search\"][0][\"image\"]\n await self.bot.say(url)\n else:\n await self.bot.say(\"Your search terms gave no results.\") \n except: \n await self.bot.say(\"Error.\")\n else: \n await self.bot.say(\"Invalid search.\")\n else: \n await self.bot.say(\"pony [text]\")",
"def translate_leet(phrase):",
"def getExamples(query):\n HREF_ITER = 'href'\n UN_URL_PART = '/kanji/details/'\n EX_FOUND = 'Found'\n JISHO_URL = 'http://jisho.org/sentences?jap='\n \n url = JISHO_URL + query #may be kanji or word\n \n try:\n soup = BeautifulSoup(urllib2.urlopen(url))\n \n check = soup.findAll('h2')\n if len(check) > 0 and len(check[0].contents) > 0:\n if EX_FOUND in check[0].contents[0]:\n \n sentences = soup.findAll('a', href=re.compile('/kanji/details'))\n translations = soup.findAll('td', attrs={'class':'english'})\n \n r_sent = []\n r_trans = []\n \n if len(sentences) == len(translations):\n for sentence in sentences:\n r_sent.append(sentence[HREF_ITER].replace(UN_URL_PART, ''))\n for translation in translations:\n r_trans.append(translation.contents[0].replace('\\t',''))\n \n return dict(zip(r_sent, r_trans))\n except Exception:\n return {}",
"def obtain_text():\n pass",
"def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in range(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results",
"def get_nouns(txt):\n query = 'https://api.textgain.com/1/tag?q='\n query += urllib.parse.quote(txt, safe='')\n query += '&lang=fr&key=***'\n resp = requests.get(query)\n\n body = json.loads(resp.text)['text'][0]\n\n nouns = {}\n for iterable_elem in body:\n for elem in iterable_elem:\n if elem['tag'] == 'NOUN':\n word = elem['word']\n if word in nouns.keys():\n nouns[word] += 1\n else:\n nouns[word] = 1\n print(nouns)\n return nouns",
"def get_sentence(self):",
"def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words",
"def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in xrange(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results",
"def key(text):\n url = \"https://bio.torre.co/api/people?q={}&limit=10\".format(text)\n request = get(url)\n print(request.json())\n if request.status_code != 200 or len(request.json()) < 1:\n abort(404, 'Not Found')\n return json.dumps(request.json())",
"def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])",
"def wikidata_search(request, str):\n url_head = 'https://www.wikidata.org/w/api.php?action=wbsearchentities&search='\n url_tail = '&language=en&format=json'\n if request.method == 'GET':\n r = requests.get(url_head+str+url_tail);\n return Response(r.json()['search'])\n #print r",
"async def get_information():\n return {\n \"message\": f\"You are the Genome Researcher. \"\n f\"You are meddling with Coronavirus Sars-Cov-2 RNA... \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"use GET /sample to see the original RNA strand \"\n f\"use COPY /sample to create exact duplicate of original to perform experiments. \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"Good luck researcher. \"\n f\"Our souls fates' depend on you! \"\n }",
"async def phrase(self, ctx):\n await self.heleus.send_command_help(ctx)",
"def getSentenceInfo(sentence):\n\tpass",
"def getTextFromSpeak(self):\n raise NotImplementedError",
"def getWords(speech):\r\n return speech.split()",
"def text_json(request):\n query = str()\n\n if request.method == 'GET':\n query = request.GET.get('q')\n\n results = list()\n\n for c in search.tokenSearch(query):\n tmp = {'category':'課程代號','title':c.token}\n results.append(tmp)\n \n for c in search.zhNameSearch(query):\n tmp = {'category':'課程名稱','title':c.name_zh}\n results.append(tmp)\n\n \n for c in search.engNameSearch(query):\n tmp = {'category':'Course Name','title':c.name_eng}\n results.append(tmp)\n \n for t in Teacher.objects.filter(name_zh__icontains=query):\n tmp = {'category':'老師','title':t.name_zh}\n results.append(tmp)\n \n for d in Department.objects.filter(name_zh__icontains=query):\n tmp = {'category':'開課單位','title':d.name_zh}\n results.append(tmp)\n\n tmp = {'results':results}\n\n return HttpResponse(json.dumps(tmp))",
"def get_introduction(length=128, words=None):",
"def get_phrase(word_count):\n phrase = ''\n\n iterations = int(math.ceil(word_count / 2))\n is_odd = False\n\n if word_count % 2 == 1:\n is_odd = True\n\n for i in range(iterations):\n driver = webdriver.Chrome()\n driver.get('http://watchout4snakes.com/wo4snakes/Random/RandomPhrase')\n two_words = driver.find_element_by_id('result').text\n\n if is_odd and i == (word_count - 1):\n one_word = two_words.split(' ')\n phrase = phrase + one_word[0]\n else:\n phrase = phrase + two_words\n\n driver.close()\n\n return phrase.replace(\" \", \"\")",
"def get_noun_phrases(blob):\n return blob.noun_phrases",
"def phrase(self):\n return self._phrase",
"def phrase(self):\n return self._phrase",
"def search_dish_name(text):\n\n # timing information, can delete later.\n start = dt.datetime.now()\n\n results = {}\n if type(text) != unicode:\n text = text.decode('utf-8')\n if len(text) > 10:\n # Most dish names are 3-5 characters. \n # If Tesseract returned more than 10 characters, something probably went wrong.\n print \"Input text is too long.\"\n return None\n else:\n # Find a matching dish, if it exists.\n match = Dish.find_match(text)\n if match:\n # If result is found, return JSON representation of dish.\n results = match.get_json()\n start = time_elapsed(\"Dish lookup\", start)\n else:\n # If no dish is found, return translation data and similar dishes, if they exist.\n translation = translate(text)\n start = time_elapsed(\"Translation\", start)\n results['translation'] = translation\n\n # Find similar dishes and add to results.\n if len(text) > 1:\n similar_dishes = Dish.find_similar(text)\n start = time_elapsed(\"Similar dish lookup\", start)\n similar_json = [] \n for similar_dish in similar_dishes:\n dish_data = similar_dish.get_json_min()\n similar_json.append(dish_data)\n\n if similar_json != []:\n results['similar'] = similar_json\n\n return results",
"def getNoteSearchText(self, authenticationToken, guid, noteOnly, tokenizeForIndexing):\r\n pass",
"def word():\n\n word = Word(random_word())\n # word = Word(\"arroyo\")\n\n word.speak()\n word.messup()\n l.debug(\"Displaying %s\", word.word)\n\n prons = sorted([word.word, word.word + \"-a\", word.word + \"-b\", word.word + \"-c\"], key=lambda x: random.random())\n\n return jsonify({\"word\": word.word, \"pron\": prons, \"correct\": prons.index(word.word)})",
"def main():\n\n args = get_args()\n words = args.phrase\n\n words = codify_phrase(words)\n display = ' '.join(words)\n\n print(display)"
] | [
"0.6383143",
"0.62670934",
"0.60265064",
"0.5882855",
"0.5850151",
"0.58406603",
"0.5805289",
"0.5768935",
"0.5695436",
"0.5689113",
"0.568847",
"0.5662847",
"0.5660174",
"0.56477916",
"0.5562004",
"0.5547414",
"0.55439466",
"0.55421555",
"0.5507159",
"0.54866064",
"0.5482057",
"0.54745877",
"0.5467093",
"0.545086",
"0.54481673",
"0.54481673",
"0.54398584",
"0.5435502",
"0.5424334",
"0.54198027"
] | 0.67336345 | 0 |
With the response, extract the HTML and store it into the object. | def _extract_html(self, url):
self.response = requests.get(url, timeout=5)
self.html = BeautifulSoup(self.response.content, "lxml") if self.response.ok else None
# return self.html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, response: BeautifulSoup):\n raise NotImplementedError",
"def parse(self, response):\n yield{\n 'url': response.url,\n 'title': response.css(\"h1.article-main-title::text\").get(),\n 'sub_title': response.css(\"h2.article-sub-title::text\").get(),\n 'article_image': (response.css(\"div.article-image img::attr(src)\").get()),\n 'body': '\\n\\n'.join(response.css(\"div.article-body-container p::text\").getall()),\n 'published_date': (response.css(\"div.article-credit::text\").get().replace('|','').replace('\\r',''))[1:],\n 'source': 'One'\n }",
"def _target(self, response: Response):\n soup = BeautifulSoup(response.text, self._parser_library)\n urls = self._url_parser(response.url, soup)\n self._file_parser(response.url, soup, urls, self._logger)",
"def parse(self, response):",
"def parse(self, response):\n\n # open('temp.html', 'w').write(response.body)\n # open('temp.txt', 'w').write(\"Request: %s\\n\\nResponse: %s\\n\"\n # % (response.request.headers\n # , response.headers))\n # Link Extraction\n self.parse_links(response)\n\n yt_item_loader = YoutubeItemLoader(YouTubeDataModel())\n yt_item_loader.add_value('url', response.url)\n yt_item_loader.add_value('title', self.get_video_title(response))\n yt_item_loader.add_value('views', self.get_video_views(response))\n yt_item_loader.add_value('likes', self.get_video_likes(response))\n yt_item_loader.add_value('dislikes', self.get_video_dislikes(response))\n yt_item_loader.add_value('channel_name'\n , self.get_video_channel_name(response))\n yt_item_loader.add_value('channel_subscriber_count'\n , self.get_subscriber_count(response))\n yt_item_loader.add_value('publish_date'\n , self.get_video_publishing_date(response))\n\n return yt_item_loader.load_item()",
"def process_response(self, response):\n return response",
"def parse_response(self):\n pass",
"def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html",
"def parse_post_content(self, response):\n post = Post()\n post['title'] = response.xpath('//h2/a/text()')[0].extract()\n post['image_url'] = response.xpath(\"//div[@class='cont group']//img/@src\")[0].extract()\n yield post",
"def from_html(self, content):\r\n pass",
"def parse(self, response):\n\t\ttc = TCArticleItem()\n\t\ttc['name'] = response.xpath(\"//meta[@name='title']/@content\").extract()\n\t\ttc['url'] = response.url\n\t\ttc['date'] = self.date(response)\n\t\ttc['description'] = response.xpath(\"//meta[@name='description']/@content\").extract()\n\t\ttc['body'] = self.body(response)\t\t\n\t\ttc['tags'] = response.xpath(\"//meta[@name='keywords'][2]/@content\").re('(\\w+)')\n\t\treturn tc",
"def parse(response):\n # print(response.text.encode('utf-8'))\n soup = BeautifulSoup(response.text, 'lxml')\n title = soup.find('title')\n answer = title.string\n return answer",
"def parse(self, response):\n return super().parse(response)",
"def parse(self, response):\n return super().parse(response)",
"def extract(self, doc, raw_html):\n super(KenyaTodayCrawler, self).extract(doc, raw_html)\n\n soup = BeautifulSoup(raw_html)\n\n # gather title\n doc.title = soup.find(attrs={\"property\":\"og:title\"})['content']\n\n #gather publish date\n date = self.extract_plaintext(soup.select(\"main.content .entry-meta .entry-time\"))\n doc.published_at = self.parse_timestamp(date)\n\n nodes = soup.select(\".content .entry-content p\")\n self.log.info(nodes)\n if len(nodes) > 1:\n doc.summary = self.extract_plaintext(nodes[0:1])\n doc.text = \"\\n\\n\".join(p.text.strip() for p in nodes[2:])\n\n doc.author = Author.unknown()",
"def parse_response(self, response):\n\n return json.loads(response.text)",
"def parse_webpage(self, response):\n item = response.meta['item']\n print(\"Request url {}, actual requested url {}\".format(item['url'], response.request.url))\n # website url\n item['website_url'] = response.request.url\n\n item['name'] = self.guess_company_name(response)\n item['domain'] = self.get_domain(response)\n\n # get website title\n item['website_title'] = self.get_webpage_title(response)\n # get description from website\n item['website_desc'] = self.get_webpage_description(response)\n\n # get keywords from website\n item['keywords'] = self.get_webpage_keywords(response)\n\n # try to get email and phones\n item['email'] = self.extract_email(response)\n item['phone'] = self.extract_phone(response)\n\n if not item['email']:\n # try to get contact info\n # check if there is kontakt link on the page\n item = self.check_webpage_for_contact_details(item, response, \"impressum\")\n\n if not item['email']:\n try:\n # try Contact\n item = self.check_webpage_for_contact_details(item, response, \"kontakt\")\n\n except Exception as e:\n print(\"Exception\", e)\n\n if item['email']:\n item['email'] = item['email'].replace(\"(at)\", \"@\")\n yield item",
"def scrape(self):\n\n self.url = self.headline.url\n\n # Should raise exception...\n if not self.parsing_template:\n return None, None, None, None, None\n\n try:\n response = self.download()\n self.source = response.text\n except:\n return None, None, None, None, None\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if soup:\n return self.parse(soup)\n else:\n return None, None, None, None, None",
"def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()",
"def _parse_response(self, response):\n if response is not None:\n return response.string\n return response",
"def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup",
"def process_response(self, request, response):\n\n if settings.DEBUG:\n return response\n\n if 'text/html' in response['Content-Type'] and settings.COMPRESS_HTML:\n response.content = strip_spaces_between_tags(\n response.content.strip())\n response.content = RE_NEWLINE.sub(\" \", response.content)\n response.content = RE_MULTISPACE.sub(\" \", response.content)\n response.content = RE_SPACETAG1.sub(\">\", response.content)\n response.content = RE_SPACETAG2.sub(\"<\", response.content)\n return response",
"def parse_full_job_page(self, response, job_dict):\n job_dict['description']=BeautifulSoup(response.xpath('//div[contains(@class, \"post-content\")]').get()).get_text()\n return Job(job_dict)",
"def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup",
"def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj",
"def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def get_html(self):\r\n pass",
"def parse(self, response):\n item = CompanyItem()\n web_addr = ''\n try:\n web_addr = response.css('img.img-thumbnail::attr(src)').extract()[0].split('/')[-1][:-4]\n status = 'image done'\n except Exception as e:\n print(\"Image get Exception\", e)\n status = ''\n item['website'] = str(response.url)\n item['web_addr'] = str(web_addr).strip()\n item['status'] = status\n yield item",
"def parse_response(self, resp):\n p, u = self.getparser()\n\n if hasattr(resp, 'text'):\n # modern requests will do this for us\n text = resp.text # this is unicode(py2)/str(py3)\n else:\n\n encoding = requests.utils.get_encoding_from_headers(resp.headers)\n if encoding is None:\n encoding = 'utf-8' # FIXME: what to do here?\n\n if sys.version_info[0] == 2:\n text = unicode( # noqa: F821\n resp.content, encoding, errors='replace')\n else:\n assert sys.version_info[0] == 3\n text = str(resp.content, encoding, errors='replace')\n p.feed(text)\n p.close()\n return u.close()"
] | [
"0.7155325",
"0.6815259",
"0.6661986",
"0.66527045",
"0.65676737",
"0.65462565",
"0.652491",
"0.6482208",
"0.6459295",
"0.64440024",
"0.64043",
"0.6329705",
"0.63162",
"0.63162",
"0.62707376",
"0.6270195",
"0.62326896",
"0.62191874",
"0.62183607",
"0.6180285",
"0.6169138",
"0.6163078",
"0.6155292",
"0.6138581",
"0.61373705",
"0.61294174",
"0.612703",
"0.61222696",
"0.60994107",
"0.6095063"
] | 0.732518 | 0 |
Take the meanings list from the DOM and clean out noninformative meanings. | def _isolate_meanings(self, meanings_list):
index = self._get_meaning_cutoff_index(meanings_list)
if index:
return [m for i, m in enumerate(meanings_list) if i < index]
else:
return meanings_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_descriptions(descriptions):\n for key, desc_list in descriptions.items():\n for i in range(len(desc_list)):\n desc = desc_list[i]\n # Tokenize.\n desc = desc.split()\n # Convert to lower case.\n desc = [word.lower() for word in desc]\n # Remove punctuation from each token.\n desc = [w.translate(None, string.punctuation) for w in desc]\n # Remove hanging 's' and 'a'.\n desc = [word for word in desc if len(word) > 1]\n # Remove tokens with numbers in them.\n desc = [word for word in desc if word.isalpha()]\n # Store as string.\n desc_list[i] = ' '.join(desc)",
"def _clean_seq_titles(self, element):\r\n return self.REMOVE_SPAN_TAG_RE.sub('', element.get_attribute('innerHTML')).strip().split('\\n')[0]",
"def normalize_html(html):\n # Replace many whitespace characters with a single space in some elements\n # kind of like a browser does.\n soup = BeautifulSoup(html, 'lxml')\n for e in soup.select(':not(script,pre,code,style)'):\n for part in e:\n if isinstance(part, NavigableString):\n crunched = NavigableString(re.sub(r'\\s+', ' ', part))\n if crunched != part:\n part.replace_with(crunched)\n # Asciidoctor adds a \"content\" wrapper. It doesn't really change the layout\n # so we're ok with it.\n for e in soup.select('#content'):\n e.unwrap()\n # Docbook adds <span class=\"emphasis\"> around <em> tags. We don't need them\n # and it isn't worth making Asciidoctor make them.\n for e in soup.select('.emphasis'):\n e.unwrap()\n # Asciidoctor adds a \"ulist\" class to all unordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.itemizedlist.ulist'):\n e['class'].remove('ulist')\n # Docbook adds type=\"disc\" to ul which is the default and isn't needed.\n for e in soup.select('ul'):\n if 'type' in e.attrs and e['type'] == 'disc':\n del e['type']\n # Asciidoctor adds a \"olist\" class to all ordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.orderedlist.olist'):\n e['class'].remove('olist')\n # Docbook adds type=\"1\" to ol which is the default and isn't needed.\n for e in soup.select('ol'):\n if 'type' in e.attrs and e['type'] == '1':\n del e['type']\n # Docbook emits images with the 'inlinemediaobject' class and Asciidoctor\n # has the 'image' class. We've updated our styles to make both work.\n for e in soup.select('.inlinemediaobject'):\n e['class'].remove('inlinemediaobject')\n e['class'].append('image')\n # Docbook links with `<a class=\"link\"` when linking from one page of a book\n # to another. Asciidoctor emits `<a class=\"link\"`. Both look fine.\n for e in soup.select('a.xref'):\n if '.html#' in e['href']:\n e['class'].remove('xref')\n e['class'].append('link')\n # Format the html with indentation so we can *see* things\n html = soup.prettify()\n # docbook spits out the long-form charset and asciidoctor spits out the\n # short form but they are equivalent\n html = html.replace(\n '<meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>',\n '<meta charset=\"utf-8\"/>')\n return html",
"def clean(self):\n # Calls handle_starttag, handle_endtag, and handle_data\n self.feed()\n\n # Clean up any parent tags left open\n if self.current_parent_element['tag'] != '':\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n\n # Remove empty <p> added after lists\n self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\\g<1>', self.cleaned_html)\n\n self._remove_pre_formatting()\n\n return self.cleaned_html",
"def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens",
"def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result",
"def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)",
"def clean(self):\n pass\n #TODO check whether short name is really clean and short!",
"def _preprocess(self):\n _words = []\n \n # Preprocess Token List.\n wasCaps = False\n nwords = len(self._words)\n for index in range(nwords):\n word = self._words[index]\n length = len(word['word'])\n\n # (1) Remove periods from abbreviations\n if word['word'] == '.':\n # Preceded by a single letter\n if len(_words) > 0 and len(_words[-1]['word']) == 1 and _words[-1]['word'].isalpha():\n # Set previous word as Abbreviation\n if _words[-1]['tag'] not in [Vocabulary.NAME, Vocabulary.TITLE]:\n _words[-1]['tag'] = Vocabulary.ABBR\n # Drop the punct!\n # Proceeded by an abbreviated name title\n elif self._punct == False and len(_words) > 0 and (_words[-1]['tag'] in [Vocabulary.NAME, Vocabulary.TITLE] or _words[-1]['tag'] == Vocabulary.DATE):\n # Drop the punct!\n pass\n else:\n _words.append(word)\n \n # Single character\n elif length == 1:\n # Lowercase the single letter\n if word['word'].isupper():\n word['word'] = word['word'].lower()\n \n if word['word'].isalpha():\n # Continuation of a Name\n if len(_words) > 0 and _words[-1]['tag'] == Vocabulary.NAME:\n word['tag'] = Vocabulary.NAME\n \n # Keep single letter word\n _words.append(word)\n \n wasCaps = False\n \n # Multiple Character \n else:\n # All Uppercased (can't start with digit)\n if word['word'].isupper() and not word['word'][0].isdigit() and not word['word'][0] == '°':\n # (2) Identify Acronyms\n # If the next word is uppercased, it is a title line, not an acronym\n # If last word is uppercased, it is a title line, not an acronym\n word['word'] = word['word'].lower()\n if not (index+1 < nwords and self._words[index+1]['word'].isupper()) and (index+1 != nwords or wasCaps == False):\n try:\n v = vocab[word['word']]\n if Vocabulary.NAME in v['tag']:\n word['tag'] = Vocabulary.NAME\n # Word is a title (e.g., CEO)\n elif Vocabulary.TITLE in v['tag']:\n word['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n word['word'] = v['lemma'][itag]\n else:\n word['tag'] = Vocabulary.ACRONYM\n except:\n word['tag'] = Vocabulary.ACRONYM\n \n wasCaps = True\n \n # First Letter is Capitalized\n elif word['word'][0].isupper():\n # First Word \n if len(_words) == 0:\n pass\n # Follows abbreviated title\n elif len(_words) > 1 and _words[-1]['word'] == '.' and _words[-2]['tag'] == Vocabulary.TITLE:\n word['tag'] = Vocabulary.NAME\n # Start of Sentence\n elif _words[-1]['tag'] == Vocabulary.PUNCT and _words[-1]['word'] not in [',', ':']:\n pass\n elif word['word'] in ['Jan', 'January', 'Feb', 'February', 'Mar', 'March', 'Apr', 'April', 'May', 'Jun', 'June', 'Jul', 'July', 'Aug', 'August', 'Sep', 'Sept', 'September', 'Oct', 'October', 'Nov', 'November', 'Dec', 'December']:\n word['tag'] = Vocabulary.DATE\n # (3) Identify Proper Names\n # Word is capitalized and not proceeded by period (.), question (?) or exclamation (!)\n # or single/double quote\n else:\n word['tag'] = Vocabulary.NAME\n # Proceeding Acronym is a really part of a name\n if len(_words) > 0 and _words[-1]['tag'] == Vocabulary.ACRONYM:\n _words[-1]['tag'] = Vocabulary.NAME\n # Proceeding Word is a Title of a name (e.g., Mr)\n else:\n try:\n v = vocab[_words[-1]['word']]\n if Vocabulary.TITLE in v['tag']:\n _words[-1]['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n _words[-1]['word'] = v['lemma'][itag]\n \n except:\n # Word is an ending title in a name\n try:\n v = vocab[word['word'].lower()]\n if Vocabulary.TITLE in v['tag'] and Vocabulary.STREET_TYPE not in v['tag'] and Vocabulary.STATE not in v['tag']:\n word['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n word['word'] = v['lemma'][itag]\n except: pass\n wasCaps = False\n \n # First Letter is a Digit\n elif word['word'][0].isdigit():\n cont = False\n # Check if this is a number combined with a unit\n for i in range(1, len(word['word'])):\n # Separate the number from the proceeding text\n if word['word'][i].isalpha():\n token = word['word'][i:].lower()\n # Check if the proceeding text is a Unit of Measurement\n try:\n v = vocab[token]\n if Vocabulary.UNIT in v['tag']:\n itag = v['tag'].index(Vocabulary.UNIT)\n _words.append( { 'word': word['word'][0:i], 'tag': Vocabulary.NUMBER } )\n _words.append( { 'word': v['lemma'][itag], 'tag': Vocabulary.UNIT } )\n cont = True\n except: pass\n break\n elif not word['word'][i].isdigit() and word['word'][i] != Words.DECIMAL:\n break\n if cont == True:\n continue\n \n # lowercase\n word['word'] = word['word'].lower()\n # romanize\n if self._roman:\n word['word'] = unidecode(word['word'])\n _words.append(word)\n \n self._words = _words",
"def cleanup(self):\n for element in self.root.iter():\n element.tag = element.tag.partition('}')[-1]",
"def clean_spam(doc):\n for tag in doc.find_all([\"div\",\"ol\", \"dl\", \"ul\", \"table\", \"section\"]):\n if no_block_children(tag) and is_ad_block(tag):\n tag.extract()",
"def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names",
"def clean_mentions(self, tweet):\n self.mentions = [tag.strip('@') for tag in tweet.split() if tag.startswith('@')]\n\n for mention in self.mentions:\n tweet = tweet.replace('@'+mention, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n\n return tweet",
"def cleaning (data):",
"def clean_up_details(self, w_details):\n for i, val in enumerate(w_details):\n for k in ('when', 'where', 'why'):\n clean_text = [x.strip() for x in val[k]]\n clean_text = '\\n'.join(clean_text)\n w_details[i][k] = clean_text.strip()\n yield w_details",
"def strip_illustrations(parser):\n\tfrom BeautifulSoup import BeautifulSoup\n\tsoup = BeautifulSoup(parser)\n\tillustrations = soup.findAll(\"p\", {\"class\": \"illustration\"})\n\t[illustration.extract() for illustration in illustrations]\n\treturn str(soup)",
"def outlierCleaner(predictions, ages, net_worths):\n assert len(predictions) == len(ages) == len(net_worths)\n features_with_error = [(ages[index], net_worths[index], abs(predictions[index] - net_worths[index])) for index in range(0, len(predictions))]\n features_with_error.sort(key = lambda (age, net_worth, error): error)\n cleaned_data = features_with_error[:81]\n return cleaned_data",
"def filter_some_usages(EN):\n bad_markers = [\n # 'ecclesiastical', actually not a good idea:\n # reachtaire\n # - rector (ecclesiastical)\n # - master of ceremonies\n ]\n ret = '\\n'.join([line for line in EN.split('\\n') if\n (not line.endswith(')')\n or\n line.rsplit('(', 1)[1].rstrip(')')\n not in bad_markers)])\n if ret:\n return ret\n return EN",
"def clean_lexicon_list_words(l: list) -> list:\n\n clean_l = map(str.strip, l) #Remove trailing '\\n' chars\n clean_l = [s for s in clean_l if not s.startswith(';')] #Remove initial explanation text\n\n return clean_l",
"def dropMarks(markup, category=\"exclusion\"):\n markupNew = markup.copy()\n dnodes = [n for n in markupNew.nodes() if n.isA( category )]\n markupNew.remove_nodes_from(dnodes)\n return markupNew",
"def sanitize_sample_descriptions(sample_description_list, sanitize_fn=sanitize_text):\n filtered_sample_desc_list = []\n for text in sample_description_list:\n filtered_sample_desc_list.append(sanitize_fn(text))\n\n return filtered_sample_desc_list",
"def strip_proper_pos(text: Union[List[str], str]) -> List[str]:\n\n text = __join_if_list(text)\n try:\n tagged = pos_tag(text.split())\n except LookupError:\n nltk.download('averaged_perceptron_tagger')\n tagged = pos_tag(text.split())\n\n without_propernouns = [word for word, pos in tagged if pos is not 'NPP' and pos is not 'NNPS']\n return without_propernouns",
"def outlierCleaner(predictions, ages, net_worths):\n\n cleaned_data = []\n\n ### your code goes here\n\n predictionsList = getList(predictions)\n agesList = getList(ages)\n netWorthsList = getList(net_worths)\n\n for x in range(0, len(predictionsList)):\n cleaned_data.append((agesList[x],\n netWorthsList[x],\n getResidualError(predictions[x],\n netWorthsList[x])))\n\n cleaned_data = sorted(cleaned_data, key=lambda error: error[2])\n\n cleanElement = (len(cleaned_data) * 10) / 100\n\n cleaned_data = cleaned_data[:len(cleaned_data) - cleanElement]\n\n print \"Tamanio: \", len(cleaned_data)\n\n return cleaned_data",
"def outlierCleaner(predictions, ages, net_worths):\n\n ### your code goes here\n errors = [abs(p-n) for p,n in zip(predictions, net_worths)]\n cleaned_data = sorted([(a,n,e) for a,n,e in zip(ages, net_worths, errors)], key=lambda i: i[2])\n cleaned_data = cleaned_data[:len(cleaned_data)/10*9]\n \n return cleaned_data",
"def _clean_up(hadith_text: str) -> str:\n punctuations = ''.join([\n # Collected from https://en.wikipedia.org/wiki/Arabic_script_in_Unicode#Punctuation_and_ornaments\n chr(int('060C', 16)), # ARABIC COMMA\n chr(int('060D', 16)), # ARABIC DATE SEPARATOR\n chr(int('060E', 16)), # ARABIC POETIC VERSE SIGN\n chr(int('060F', 16)), # ARABIC SIGN MISRA\n chr(int('061B', 16)), # ARABIC SEMICOLON\n chr(int('061E', 16)), # ARABIC TRIPLE DOT PUNCTUATION MARK\n chr(int('061F', 16)), # ARABIC QUESTION MARK\n chr(int('066D', 16)), # ARABIC FIVE POINTED STAR\n chr(int('06D4', 16)), # ARABIC FULL STOP\n chr(int('06DD', 16)), # ARABIC END OF AYAH\n chr(int('06DE', 16)), # ARABIC START OF RUB EL HIZB\n chr(int('06E9', 16)), # ARABIC PLACE OF SAJDAH\n chr(int('06FD', 16)), # ARABIC SIGN SINDHI AMPERSAND\n chr(int('FD3E', 16)), # Arabic ornate left parenthesis\n chr(int('FD3F', 16)), # Arabic ornate right parenthesis\n ])\n\n # Removing punctuations\n cleaned_text = re.sub('[' + punctuations + ']', ' ', hadith_text)\n\n # Removing any html markup\n cleaned_text = BeautifulSoup(cleaned_text, 'lxml').text\n\n # Removing multiple consecutive whitespaces, including newlines\n cleaned_text = ' '.join(cleaned_text.split())\n\n return cleaned_text",
"def normalize_whitespace(doc):\n doc.xml_normalize() # Merge text nodes where possible\n for text in list(doc.xml_select('descendant::text()')):\n # If text node is all whitespace or empty, remove it.\n if not text.xml_value.strip():\n text.xml_parent.xml_remove(text)",
"def clean_text(s, remove_stop_words=True, correct_spelling_mistakes=True):\n if type(s) is float: # some elements in Visite_ZNS are \"nan\"\n return \"\"\n \n s = s.lower() #s lowercase\n\n s = s.replace('4/4', '44') # 4/4 [Extremitäten] würde sonst zu 2 separaten tokens werden.\n s = s.replace('/', '/ ') # extra leerzeichen, sodass Worte die\n # vorher durch '/' getrennt waren nicht\n # zu einem gemeinsamen Token werden\n\n # filter invalid characters from tect:\n filtered_str = ''.join(filter(lambda ch: ch in allowed_chars, s))\n \n # remove common ambiguities through substitutions:\n replacements = [\n ('v a', 'va'),\n ]\n for to, fro in replacements:\n filtered_str = filtered_str.replace(f' {to} ', f' {fro} ') # vor allem.\n tokens = filtered_str.split()\n\n # remove '-' from all tokens, except tokens such as '-n'\n filter_hyphens_inside_words = lambda t: t.replace('-', '') if not (len(t) > 1 and t.find('-') == 0 and t[1].isdigit()) else t\n tokens = [filter_hyphens_inside_words(t) for t in tokens]\n \n # remove tokens with only 1 character:\n tokens = [t for t in tokens if len(t) > 1]\n\n # finally, correct spelling mistakes for tokens longer than 3 chars (ie. no abbreviations):\n # takes reaally long\n if correct_spelling_mistakes:\n for tested_token in filter(lambda token: len(token)>3, tokens):\n if not tested_token.isalpha(): # consider only tokens with only letters!\n continue\n\n cor = correction(tested_token)\n if tested_token == cor:\n continue\n \n # spelling mistake found! replace all occurences in the text.\n tokens = [cor if t == tested_token else t for t in tokens]\n # print(f\"'{token}' > {colored(cor, 'red')}\")\n\n if not remove_stop_words:\n return \" \".join(tokens) # remove multiple whitespaces in a row\n\n tokens = [word.replace('=', '') for word in tokens if not word in stop_words] #removes stop words from tokens and '=' from individual tokens\n return \" \".join(tokens)",
"def outlierCleaner(predictions, ages, net_worths):\n errors = [abs(predictions[i]-net_worths[i]) for i in range(len(net_worths))]\n\n # top_errors = list(sorted(errors, reverse=True))[:len(errors)//10]\n\n data = [(ages[i], net_worths[i], errors[i]) for i in range(len(net_worths))]\n\n cleaned_data = list(sorted(data, key=takeError))[:len(errors)*9//10]\n\n\n return cleaned_data",
"def recipe12_6():\n from xml import dom\n def remove_whitespace_nodes(node):\n \"\"\" Removes all of the whitespace-only text descendants of a DOM node. \"\"\"\n # prepare the list of text nodes to remove (and recurse when needed)\n remove_list=[]\n for child in node.childNodes:\n if child.nodeType==dom.Node.TEXT_NODE and not child.data.strip():\n # add this text node to the to-be-removed list\n remove_list.append(chid)\n elif child.hasChildNodes():\n # recurse, it's the simplest way to deal with the subtree\n remove_whitespace_nodes(child)\n # perform the removals\n for node in remove_list:\n node.parentNode.removeChild(node)\n node.unlink()",
"def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break"
] | [
"0.54715055",
"0.5397452",
"0.5360758",
"0.53188324",
"0.5145506",
"0.51151055",
"0.51028174",
"0.5073956",
"0.5072407",
"0.5066381",
"0.5064213",
"0.5051223",
"0.50442636",
"0.5030571",
"0.5029992",
"0.50089633",
"0.5005092",
"0.49978328",
"0.4987968",
"0.4981853",
"0.49782136",
"0.49649915",
"0.49537915",
"0.4953607",
"0.49454063",
"0.4938298",
"0.4932586",
"0.4913626",
"0.49004385",
"0.48949355"
] | 0.5666595 | 0 |
Return the full furigana of a word from the html. | def _get_full_vocabulary_string(self, html):
# The kana represntation of the Jisho entry is contained in this div
text_markup = html.select_one(".concept_light-representation")
upper_furigana = text_markup.select_one(".furigana").find_all('span')
# inset_furigana needs more formatting due to potential bits of kanji sticking together
inset_furigana_list = []
# For some reason, creating the iterator "inset_furigana" and then accessing it here
# causes it to change, like observing it causes it to change. I feel like Schrodinger
for f in text_markup.select_one(".text").children:
cleaned_text = f.string.replace("\n", "").replace(" ", "")
if cleaned_text == "":
continue
elif len(cleaned_text) > 1:
for s in cleaned_text:
inset_furigana_list.append(s)
else:
inset_furigana_list.append(cleaned_text)
children = zip_longest(upper_furigana, inset_furigana_list)
full_word = []
for c in children:
if c[0].text != '':
full_word.append(c[0].text)
elif c[0].text == '' and contains_kana(c[1]):
full_word.append(c[1])
else:
continue
# print(''.join(full_word))
# print("====")
return ''.join(full_word) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def word_of_the_day():\n r = requests.get(\"http://www.urbandictionary.com\") # link is always homepage\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\" # header is the word we are defining\n # def_header = def_header[0:len(def_header) - 10] # header always ends in \"unknown\" this removes it\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n # formatting TODO move to controller\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n for x in [\"v.\", \"n.\"]:\n meaning = meaning.replace(x, x.upper()[:-1])\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n output = def_header + \": \" + \"```\" + meaning + \"\\nEx: \" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example",
"def _extract_dictionary_information(self, entry):\n # Clean up the furigana for the result\n furigana = \"\".join([f.text for f in entry.select(\".kanji\")])\n\n # Cleans the vocabulary word for the result\n vocabulary = self._get_full_vocabulary_string(entry) if not entry.select(\".concept_light-representation .furigana rt\") else entry.select_one(\".concept_light-representation .furigana rt\").text\n\n # The fact that this needs to exist is really annoying.\n # If you go to a page like this: https://jisho.org/word/%E5%8D%B0%E5%BA%A6\n # you'll see that this is a word whose furigana is actually in katakana\n # I didn't realize this happens (it makes sense now), and the huge issue\n # is that there's different HTML in this case, so the previous parsing method\n # doesn't work, so we need a new method...\n\n # Now there could be *really* weird cases where there's a word with both\n # katakana furigana and hiragana furigana (which would be cool), but tbh this\n # I'm satisfied with assuming the whole word corresponds with the whole furigana.\n\n # Grab the difficulty tags for the result\n diff_tags = [m.text for m in entry.select(\".concept_light-tag.label\")]\n\n # Grab each of the meanings associated with the result\n cleaned_meanings = self._isolate_meanings(entry.select_one(\".meanings-wrapper\"))\n meanings = [m.select_one(\".meaning-meaning\") for m in cleaned_meanings]\n meanings_texts = [m.text for m in meanings if m != None]\n\n # Romanize the furigana\n halpern = kana_to_halpern(furigana)\n\n information = {\n \"furigana\": furigana,\n \"vocabulary\": vocabulary,\n \"difficulty_tags\": diff_tags,\n \"meanings\": dict(zip(range(1, len(meanings_texts) + 1), meanings_texts)),\n \"n_meanings\": len(meanings_texts),\n \"halpern\": halpern\n }\n\n return information",
"def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]",
"def get_spell_tag(page):\n soup = BeautifulSoup(page.text, 'html.parser')\n spell_tag = soup.find('a', {'class': 'spell'})\n\n return spell_tag",
"def fry(word):\n\n # looks for a Y or y which will be (captured) followed and ended by an 'ou'\n match_you = re.match('([Yy])ou$', word)\n\n # First group will be the (captured) group so either 'Y' or 'y'\n if match_you:\n return match_you.group(1) + \"'all\"\n\n # looks for anyword ending in 'ing'\n match_ing = re.search('(.+)ing$', word)\n\n # checks if vowel exists before the 'ing'\n if match_ing:\n vowel_check = re.search('[aeiouy]', match_ing.group(1))\n # First group will be the (captured) group so everything before the 'ing'\n if vowel_check:\n return match_ing.group(1) + \"in'\"\n\n return word",
"def get_result_ft(wd):\n try:\n result = wd.find_element_by_id(\"js-partial\").text\n ft = clean_goals(result)\n ft = ft.split(\",\")\n return ft[1]\n except:\n return \"N/A FT Result\"",
"def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form",
"def define(word: str):\n try:\n r = requests.get(\"http://www.urbandictionary.com/define.php?term={}\".format(word)) # goes to link for word\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\"\n # header is the word we are defining\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n meaning = \"```\" + meaning + \"```\"\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n example = example.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n output = def_header + \": \" + meaning + \" \" + \"\\nExample: \" + \"```\" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example\n except AttributeError:\n return \"No results\"",
"def PROPER(text):\n return text.title()",
"def get_from_html_text(resultset, target):\n index = resultset.find(target)+len(target)+2\n return resultset[index:index+140].split(\"'\")[0].lower()",
"def fuzz(text):\r\n\r\n return ' '.join([fuzz_word(word) for word in text.split()])",
"def parser(self, value):\n value = self.lowercase(value)\n value = self.punctuation(value)\n value = self.tokenization(value)\n value = self.remove_stopwords(value)\n value.append(\"wiki\")\n searched_words = \" \"\n return searched_words.join(value)",
"def med_in_hindi(word):\r\n\treturn int(med(correction(word),word))",
"def find_abecedarian_words():\n pass",
"def first(word):\n\treturn word[0]",
"def review_to_word(review):\n # Get text only\n review_text = BeautifulSoup(review).get_text()\n # Remove non-letters \n letters_only = re.sub(\"[^a-zA-Z]\", \" \", review_text)\n # Convert to lower case, split into individual words\n words = letters_only.lower().split()\n # searching in a set rather than a list is faster in python\n stops = set(stopwords.words(\"english\"))\n # Remove stop words\n meaningful_words = [w for w in words if not w in stops]\n # Join the words back into one string\n return( \" \".join( meaningful_words ))",
"def _get_faction(text):\n for faction in _FACTIONS:\n if faction in text:\n return faction\n return None",
"def med_in_english(word):\r\n\treturn int(med(TextBlob(word).correct(), word))",
"async def get_demon(self, ctx, game: str, name: str):\n\n name = await self.nearest_spelling(ctx, name.lower(), self.names[game])\n if name is not None:\n name = \" \".join([i.capitalize() for i in name.split()])\n return name",
"def first(word):\n return word[0]",
"def return_wikipedia_term(res):\n rst = []\n if res['spotted']:\n for s in [s['spot'] for s in res['value']['spots']]:\n r = TagMeService.retrieve_taggings(s.encode('utf-8'), method='POST')\n if len(r['annotations']) != 0:\n for n in r['annotations']:\n if 'title' in n.keys():\n title = n['title'].replace(' ', '_') # strip whitespaces from dbpedia tag\n rst.append(title)\n else:\n print \"Cannot find title in annotations: \" + str(n)\n return rst",
"def get_result_office(soup):\n return soup.find('h2').text",
"def correct_word(word, cutoff):\n if WORDS is not None:\n result = difflib.get_close_matches(word, WORDS, n=1, cutoff=cutoff)\n if len(result) > 0:\n return result[0]\n\n return word",
"def get_word(self) -> str: \n #return str(choice(word_list).upper())\n return \"ANONYMOUS\"",
"def get_abbr(self, word):\n assert (self.collection is not None)\n\n for conv in self.collection:\n ln = conv.split(\"*\")\n if word == ln[0]:\n return ln[1]\n\n return None",
"def stem(self, word):\n word = word.lower()\n\n step1_success = False\n\n # All acute accents are replaced by grave accents.\n word = (word.replace(u(\"\\xE1\"), u(\"\\xE0\"))\n .replace(u(\"\\xE9\"), u(\"\\xE8\"))\n .replace(u(\"\\xED\"), u(\"\\xEC\"))\n .replace(u(\"\\xF3\"), u(\"\\xF2\"))\n .replace(u(\"\\xFA\"), u(\"\\xF9\")))\n\n # Every occurrence of 'u' after 'q'\n # is put into upper case.\n for i in range(1, len(word)):\n if word[i - 1] == \"q\" and word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n\n # Every occurrence of 'u' and 'i'\n # between vowels is put into upper case.\n for i in range(1, len(word) - 1):\n if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:\n if word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n elif word[i] == \"i\":\n word = \"\".join((word[:i], \"I\", word[i + 1:]))\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if rv.endswith(suffix):\n if rv[-len(suffix) - 4:-len(suffix)] in (\"ando\", \"endo\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n elif (rv[-len(suffix) - 2:-len(suffix)] in\n (\"ar\", \"er\", \"ir\")):\n word = \"\".join((word[:-len(suffix)], \"e\"))\n r1 = \"\".join((r1[:-len(suffix)], \"e\"))\n r2 = \"\".join((r2[:-len(suffix)], \"e\"))\n rv = \"\".join((rv[:-len(suffix)], \"e\"))\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if word.endswith(suffix):\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2 .endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif (suffix in (\"amento\", \"amenti\",\n \"imento\", \"imenti\") and\n rv.endswith(suffix)):\n step1_success = True\n word = word[:-6]\n rv = rv[:-6]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\"azione\", \"azioni\", \"atore\", \"atori\"):\n word = word[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logie\"):\n word = word[:-2]\n rv = word[:-2]\n\n elif suffix in (\"uzione\", \"uzioni\",\n \"usione\", \"usioni\"):\n word = word[:-5]\n rv = rv[:-5]\n\n elif suffix in (\"enza\", \"enze\"):\n word = \"\".join((word[:-2], \"te\"))\n rv = \"\".join((rv[:-2], \"te\"))\n\n elif suffix == u(\"it\\xE0\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith((\"ic\", \"iv\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"ivo\", \"ivi\", \"iva\", \"ive\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 2: Verb suffixes\n if not step1_success:\n for suffix in self.__step2_suffixes:\n if rv.endswith(suffix):\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 3a\n if rv.endswith((\"a\", \"e\", \"i\", \"o\", u(\"\\xE0\"), u(\"\\xE8\"),\n u(\"\\xEC\"), u(\"\\xF2\"))):\n word = word[:-1]\n rv = rv[:-1]\n\n if rv.endswith(\"i\"):\n word = word[:-1]\n rv = rv[:-1]\n\n # STEP 3b\n if rv.endswith((\"ch\", \"gh\")):\n word = word[:-1]\n\n word = word.replace(\"I\", \"i\").replace(\"U\", \"u\")\n return word",
"def urban_dict(word):\n\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n\n querystring = {}\n\n querystring[\"term\"] = word\n\n headers = config.headers\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)",
"def parse_bestfu_alias(alias, gfyear):\n\n alias = alias.upper()\n prefix_pattern = r\"(?P<pre>(?:[KGBOT][KGBOT0-9]*)?)\"\n postfix_pattern = r\"(?P<post>(?:[0-9]{2}|[0-9]{4})?)\"\n letter = '[A-Z]|Æ|Ø|Å|AE|OE|AA'\n letter_map = dict(AE='Æ', OE='Ø', AA='Å')\n title_patterns = [\n ('BEST', 'CERM|FORM|INKA|KASS|NF|PR|SEKR|VC'),\n ('FU', '(?P<a>E?FU)(?P<b>%s)(?P<c>%s)' % (letter, letter)),\n ]\n for kind, p in title_patterns:\n pattern = '^%s(?P<root>%s)%s$' % (prefix_pattern, p, postfix_pattern)\n mo = re.match(pattern, alias)\n if mo is not None:\n period = get_period(mo.group(\"pre\"), mo.group(\"post\"), gfyear)\n root = mo.group('root')\n if kind == 'FU':\n fu_kind = mo.group('a')\n letter1 = mo.group('b')\n letter2 = mo.group('c')\n assert root == fu_kind + letter1 + letter2\n # Translate AE OE AA\n letter1_int = letter_map.get(letter1, letter1)\n letter2_int = letter_map.get(letter2, letter2)\n root = fu_kind + letter1_int + letter2_int\n return kind, root, period\n raise ValueError(alias)",
"def do(text):\n return freeling_stemming(text)",
"def urbandic(self, irc, msg, args, req):\n dict = {' ':'+'}\n req = self.replace_all(req, dict)\n if req:\n url = 'http://www.urbandictionary.com/define.php?term=' + req\n else:\n url = 'http://www.urbandictionary.com/random.php'\n try:\n website = urllib2.urlopen(url)\n except urllib2.HTTPError, e:\n irc.reply('A problem occured. Please try again.')\n return\n soup = BeautifulSoup(website,\n convertEntities=BeautifulSoup.HTML_ENTITIES)\n td_word = soup.findAll(name='td',\n attrs={'class':'word'},\n limit=1)\n div_def = soup.findAll(name='div',\n attrs={'class':'definition'},\n limit=1)\n for t in td_word:\n if t.string:\n word = string.replace(t.string, '\\n', '')\n irc.reply('Word: ' + word, prefixNick=False)\n else:\n irc.reply('No word found.')\n return\n defn = ''\n for d in div_def:\n for c in d.contents:\n if c.string:\n defn += c.string\n irc.reply('Def.: ' + defn, prefixNick=False)"
] | [
"0.6115667",
"0.5743542",
"0.55236655",
"0.53933805",
"0.5346994",
"0.5319101",
"0.5280804",
"0.5258494",
"0.5191958",
"0.5186769",
"0.5165909",
"0.51648223",
"0.5127701",
"0.5116987",
"0.51016253",
"0.50996876",
"0.50930995",
"0.5036981",
"0.502899",
"0.5027194",
"0.5025558",
"0.50213337",
"0.49934414",
"0.496753",
"0.49599412",
"0.4952158",
"0.49286336",
"0.4920616",
"0.49068043",
"0.48811972"
] | 0.6967511 | 0 |
Prefetch the approvals, so that we don't do a query perprescription on the regional summary page. | def queryset(self, request):
qs = super(PrescriptionAdmin, self).queryset(request)
qs.prefetch_related('approval_set')
return qs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"poll\")\n .prefetch_related(\"votes\")\n )",
"def get_prefetched_queryset(self, *args, **kwargs):\n\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .prefetch_related(\n \"assignment_related_users\",\n \"agenda_items\",\n \"lists_of_speakers\",\n \"tags\",\n \"attachments\",\n \"polls\",\n \"polls__options\",\n )\n )",
"def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"assignment\")\n .prefetch_related(\n \"options\", \"options__user\", \"options__votes\", \"voted\", \"groups\"\n )\n )",
"def prefetch(self, oids):\n self.timeline.reset()\n self.timeline.start(\"prefetch\")\n fetch(oids)\n self.timeline.end(\"prefetch\")",
"def select_proposals(self):\r\n print \"Selecting proposals... \"\r\n global MAX_NUMBER_PROJECTS\r\n proposals_sorted = sorted(self.proposals, key=lambda project:project.likes, reverse=True)\r\n for i in range(MAX_NUMBER_PROJECTS):\r\n self.projects_for_vote.append(proposals_sorted[i])",
"def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"option\", \"option__poll\")\n )",
"def reap_recent_assignments():\n from anubis.config import config\n\n recent_assignments = Assignment.query.filter(\n Assignment.release_date > datetime.now(),\n Assignment.due_date > datetime.now() - config.STATS_REAP_DURATION,\n ).all()\n\n print(json.dumps({\n 'reaping assignments:': [assignment.data for assignment in recent_assignments]\n }, indent=2))\n\n for assignment in recent_assignments:\n for submission in Submission.query.filter(\n Submission.assignment_id == assignment.id,\n Submission.build == None,\n ).all():\n if submission.build is None:\n init_submission(submission)\n enqueue_autograde_pipeline(submission.id)\n\n for assignment in recent_assignments:\n bulk_autograde(assignment.id)",
"def test_get_proposal_demand(self):\n pass",
"def testPrefetchProvenance(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath)\n pD = provU.fetch()\n logger.debug(\"pD keys %r\", list(pD.keys()))\n self.assertGreaterEqual(len(pD.keys()), 1)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")",
"def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Financial').order_by('objective')",
"def revise_agreements_expirations_planned(self):\n for agreement in self.search([('prolong', '=', 'unlimited')]):\n if agreement.next_expiration_date <= fields.Date.today():\n # force recalculate next_expiration_date\n agreement.write({'prolong': 'unlimited'})\n return True",
"def paginated(self) -> global___Snippet.Paginated:",
"def get_queryset(self):\n return Initiative.objects.order_by('objective')",
"def queryset(self, request):\n qs = super(AdRepConsumerAdmin, self).queryset(request)\n qs = AdRepConsumer.objects.select_related().filter(id__in=qs\n ).defer('consumer__site__envelope',\n 'consumer__site__geom',\n 'consumer__site__point')\n return qs",
"def get_queryset(self, request):\n qs = super().get_queryset(request)\n if not request.user.is_superuser and request.user.approved_organisations.exists():\n qs = qs.filter(organisation__in=request.user.approved_organisations.all()).distinct()\n return qs",
"def assign_priorities_and_dp(self):\r\n taskset_copy = copy(self.taskset)\r\n tasks = taskset_copy.sorted_by_crit()",
"def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Customer').order_by('objective')",
"def get_queryset(self):\n # Retrieve post request payload\n json_request = self.request.query_params\n\n # Retrieve fiscal_year & agency_identifier from request\n fiscal_year = json_request.get(\"fiscal_year\")\n funding_agency_id = json_request.get(\"funding_agency_id\")\n\n # Raise exception if required query parameter not provided\n if not funding_agency_id or not fiscal_year:\n raise InvalidParameterException(\"Missing required query parameters: fiscal_year & funding_agency_id\")\n\n queryset = (\n AppropriationAccountBalances.objects.filter(\n treasury_account_identifier__funding_toptier_agency__agency__id=funding_agency_id,\n submission__reporting_fiscal_year=fiscal_year,\n submission__is_final_balances_for_fy=True,\n )\n .annotate(\n account_title=F(\"treasury_account_identifier__federal_account__account_title\"),\n id=F(\"treasury_account_identifier__federal_account\"),\n )\n .values(\"id\", \"account_title\")\n .annotate(\n account_number=F(\"treasury_account_identifier__federal_account__federal_account_code\"),\n obligated_amount=Sum(\"obligations_incurred_total_by_tas_cpe\"),\n )\n .order_by(\"-obligated_amount\", \"treasury_account_identifier__federal_account__federal_account_code\")\n )\n\n return queryset",
"def queryset(self, request):\n qs = super(AdRepAdmin, self).queryset(request)\n qs = AdRep.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs",
"def _fetch_items(self):\n url = self._api.router.publication['search'].format(\n project_id=self.project_id\n )\n res_data = self._api.post(url, data=self.search_param)\n self.total = res_data['total']\n self._items = (\n Publication(item, self.project_id)\n for item in res_data['hits']\n )\n div = self.total // self.search_param['limit']\n reste = self.total % self.search_param['limit']\n self.total_page = div\n if reste != 0: self.total_page += 1\n self.search_param = self.search_param.next_page()",
"def get_queryset(self):\n reviews = Review.objects \\\n .filter(reviewer=self.request.user) \\\n .filter(closed_on=None) \\\n .order_by('due_date') \\\n .select_related()\n\n reviews = self.step_filter(reviews)\n\n self.search_form = self.get_search_form(reviews)\n reviews = self.search_form.filter_reviews()\n\n return reviews",
"def scrape_trustpilot_reviews(company, PATH, n_pages):\n\n # Review properties\n names = []\n ratings = []\n headers = []\n reviews = []\n dates = []\n locations = []\n\n # Setup monitoring variables\n start_time = time()\n requests = 0\n request_limit = 50\n\n # For each page specified, get reviews\n for p in range(1, n_pages+1):\n\n url = f'{PATH}{p}'\n response = get(url)\n print(f'URL: {url}')\n\n # Pause the loop to limit access to the server\n sleep(randint(8, 15))\n\n # Monitor the request\n requests += 1\n elapsed_time = time() - start_time\n print('Request:{}; Frequency: {} requests/s'.format(requests, requests / elapsed_time))\n os.system('clear')\n\n if response.status_code != 200:\n warn(f'Request: {requests}l; Status Code: {response}')\n\n if requests > request_limit:\n warn('Number of requests have exceeded expectation')\n break\n\n # Identify page areas of interest\n page_html = BeautifulSoup(response.text, 'html.parser')\n review_containers = page_html.find_all('div', class_='review-content__body')\n user_containers = page_html.find_all('div', class_='consumer-information__details')\n rating_container = page_html.find_all('div', class_='review-content-header')\n dates_container = page_html.find_all(\"section\", {\"class\": \"review__content\"})\n profile_container = page_html.find_all('aside', class_='review__consumer-information')\n\n print(f'Containers for request: {len(rating_container)}')\n for x in range(len(rating_container)):\n review_c = review_containers[x]\n headers.append(review_c.h2.a.text)\n r = review_c.p\n if r:\n reviews.append(review_c.p.text)\n else:\n reviews.append('')\n\n reviewer = user_containers[x]\n names.append(reviewer.div.text)\n\n rating = rating_container[x]\n ratings.append(rating.img.get('alt'))\n\n date = dates_container[x]\n date_json = json.loads(date.find('script').string)\n date_j = date_json['publishedDate']\n dates.append(date_j)\n\n prof = profile_container[x]\n link = 'https://www.trustpilot.com' + prof.a['href']\n c_profile = get(f'{link}')\n if c_profile:\n profile_html = BeautifulSoup(c_profile.text, 'html.parser')\n cust_container = profile_html.find('div', class_='user-summary-location')\n locations.append(cust_container.text)\n\n reviews_df = pd.DataFrame(\n {\n 'Company': company,\n 'Header': headers,\n 'Review': reviews,\n 'Rating': ratings,\n 'Name': names,\n 'Location': locations,\n 'Date': dates\n }\n )\n\n reviews_df.Header = clean_string(reviews_df.Header)\n reviews_df.Review = clean_string(reviews_df.Review)\n reviews_df.Name = clean_string(reviews_df.Name)\n reviews_df.Location = clean_string(reviews_df.Location)\n reviews_df.Location = reviews_df.Location.apply(lambda x: x.split(',', 1)[-1])\n reviews_df.Date = pd.to_datetime(reviews_df.Date)\n\n return reviews_df",
"def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Capacity').order_by('objective')",
"def queryset(self, request):\n qs = super(AdRepAdvertiserAdmin, self).queryset(request)\n qs = AdRepAdvertiser.objects.select_related().filter(id__in=qs\n ).defer('advertiser__site__envelope',\n 'advertiser__site__geom',\n 'advertiser__site__point')\n return qs",
"def get_approved(self):\n return self.filter(verified=True, blacklisted=False,\n flags__lte=ExamFlag.LIMIT)",
"def autocomplete(request):\n courses = (\n Course.objects.filter(course_filters_pcr_allow_xlist)\n .order_by(\"semester\")\n .values(\"full_code\", \"title\")\n .distinct()\n )\n course_set = sorted(\n [\n {\n \"title\": course[\"full_code\"],\n \"desc\": [course[\"title\"]],\n \"url\": f\"/course/{course['full_code']}\",\n }\n for course in courses\n ],\n key=lambda x: x[\"title\"],\n )\n departments = Department.objects.all().values(\"code\", \"name\")\n department_set = sorted(\n [\n {\n \"title\": dept[\"code\"],\n \"desc\": dept[\"name\"],\n \"url\": f\"/department/{dept['code']}\",\n }\n for dept in departments\n ],\n key=lambda d: d[\"title\"],\n )\n\n instructors = (\n Instructor.objects.filter(\n id__in=Subquery(Section.objects.filter(section_filters_pcr).values(\"instructors__id\"))\n )\n .distinct()\n .values(\"name\", \"id\", \"section__course__department__code\")\n )\n instructor_set = {}\n for inst in instructors:\n if inst[\"id\"] not in instructor_set:\n instructor_set[inst[\"id\"]] = {\n \"title\": inst[\"name\"],\n \"desc\": set([inst[\"section__course__department__code\"]]),\n \"url\": f\"/instructor/{inst['id']}\",\n }\n instructor_set[inst[\"id\"]][\"desc\"].add(inst[\"section__course__department__code\"])\n\n def join_depts(depts):\n try:\n return \",\".join(sorted(list(depts)))\n except TypeError:\n return \"\"\n\n instructor_set = sorted(\n [\n {\n \"title\": v[\"title\"],\n \"desc\": join_depts(v[\"desc\"]),\n \"url\": v[\"url\"],\n }\n for v in instructor_set.values()\n ],\n key=lambda x: x[\"title\"],\n )\n\n return Response(\n {\"courses\": course_set, \"departments\": department_set, \"instructors\": instructor_set}\n )",
"def refresh(self):\n self._policies = self._get_policies()",
"def get_queryset(self):\n queryset = []\n if 'owner_id' in self.request.GET:\n queryset = (AdoptionProposal.objects\n .filter(owner__id=self.request.GET['owner_id'],\n was_deleted=False))\n elif 'all_adoptions' in self.request.GET:\n queryset = (AdoptionProposal.objects\n .filter(date__gt=datetime.now()-timedelta(days=15),\n was_deleted=False, status=2))\n else:\n queryset = AdoptionProposal.objects.filter(was_deleted=False)\n return queryset",
"def setup_eager_loading(queryset):\n queryset = queryset.select_related('user')\n return queryset"
] | [
"0.54367656",
"0.5391246",
"0.5300379",
"0.52060467",
"0.5086948",
"0.50608206",
"0.5014186",
"0.50046194",
"0.4998559",
"0.49390295",
"0.49279544",
"0.49236992",
"0.48870006",
"0.48863047",
"0.48823196",
"0.48582858",
"0.48275545",
"0.4819519",
"0.48050737",
"0.4800241",
"0.47424582",
"0.47132236",
"0.4700224",
"0.46751013",
"0.46668842",
"0.4650516",
"0.4640751",
"0.4637339",
"0.46212274",
"0.46164793"
] | 0.6178668 | 0 |
Override the redirect url after successful save of an existing burn plan. | def response_post_save_change(self, request, obj):
url = reverse('admin:prescription_prescription_detail',
args=[str(obj.id)])
return HttpResponseRedirect(url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_success_url(self):\n return reverse('warehouse-list')",
"def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)",
"def get_success_url(self):\n if self.success_url:\n url = self.success_url % self.object.__dict__\n elif hasattr(self.object,'url'):\n return self.object.url\n else:\n try:\n url = self.object.get_absolute_url()\n except AttributeError:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either provide a url or define\"\n \" a get_absolute_url method on the Model.\")\n return url",
"def get_success_url(self):\n if self.success_url:\n url = self.success_url % self.object.__dict__\n elif hasattr(self.object,'url'):\n return self.object.url\n else:\n try:\n url = self.object.get_absolute_url()\n except AttributeError:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either provide a url or define\"\n \" a get_absolute_url method on the Model.\")\n return url",
"def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)",
"def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)",
"def create_redirect_url(self):\n return url_for(self.create_redirect_to_view)",
"def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url",
"def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)",
"def get_success_url(self):\n return reverse('logs-jobs')",
"def get_success_url(self):\n return reverse('logs-jobs')",
"def get_success_url(self):\n return reverse('outward-list')",
"def get_success_url(self):\n return reverse('outward-list')",
"def action(self):\r\n return braintree.TransparentRedirect.url()",
"def get_success_url(self):\n return reverse('blog-detail', kwargs={'pk': self.kwargs['pk'], })",
"def get_success_url(self):\n return reverse('rol_list')",
"def get_success_url(self):\n kwargs = {\n key: value for key, value in self.kwargs.iteritems() if value\n }\n return reverse('boards:{}'.format(self.url_name), kwargs=kwargs)",
"def get_success_url(self):\n url = reverse('meals')\n return url",
"def get_success_url(self):\n return \"/\"",
"def get_success_url(self):\n\n # get the pk for this quote\n profile_pk = self.kwargs['profile_pk']\n status_pk = self.kwargs['status_pk']\n\n # # reverse to show the person page.\n return reverse('show_profile_page', kwargs={'pk':profile_pk})",
"def get_success_url(self):\n return reverse('overseasinvoice-list')",
"def get_success_url(self):\n return reverse('overseasinvoice-list')",
"def get_success_url(self):\n url_slug = source_to_url_slug(self.source)\n return reverse('activity-management', kwargs={'source': url_slug})",
"def get_success_url(self):\n return reverse('post-detail', kwargs={'pk': self.kwargs['pk'], })",
"def get_redirect_url(self):\n return reverse('accounts:home')",
"def get_success_url(self):\n return reverse('account:details',\n kwargs={'username': self.request.user.username})",
"def get_success_url(self):\n return reverse('post-detail', kwargs={'pk': self.kwargs['pk'],})",
"def get_success_url(self):\n url = reverse(\n \"qa:question-create\"\n ) + \"?success=true\"\n return url",
"def get_success_url(self):\n return reverse_lazy('grades:list') + '?ok'",
"def get_success_url(self):\n return reverse('parts-list')"
] | [
"0.66047686",
"0.6257987",
"0.6155057",
"0.6155057",
"0.61462444",
"0.61178875",
"0.61144376",
"0.6113364",
"0.60519975",
"0.6047323",
"0.6047323",
"0.6019146",
"0.6019146",
"0.59772176",
"0.5965734",
"0.5944104",
"0.59375286",
"0.591943",
"0.59057283",
"0.58903694",
"0.5881203",
"0.5881203",
"0.5880297",
"0.58740294",
"0.5868768",
"0.58515465",
"0.5844424",
"0.5842238",
"0.58033335",
"0.5788313"
] | 0.6583407 | 1 |
View to manage corporate approval of an ePFP. | def corporate_approve(self, request, object_id, extra_context=None):
obj = self.get_object(request, unquote(object_id))
if request.method == 'POST':
url = reverse('admin:prescription_prescription_detail',
args=[str(obj.id)])
if request.POST.get('_cancel'):
return HttpResponseRedirect(url)
if request.POST.get('_save'):
if (obj.planning_status == obj.PLANNING_DRAFT and obj.can_corporate_approve):
obj.planning_status = obj.PLANNING_SUBMITTED
obj.planning_status_modified = timezone.now()
obj.save()
self.message_user(
request, "Successfully submitted for corporate approval.")
return HttpResponseRedirect(url)
if obj.planning_status == obj.PLANNING_SUBMITTED:
# Only ePFP Application Administrator can apply corporate approval
if ((not request.user.has_perm(
'prescription.can_corporate_approve'))):
raise PermissionDenied
obj.planning_status = obj.PLANNING_APPROVED
obj.planning_status_modified = timezone.now()
obj.save()
self.message_user(
request, "Corporate approval successful.")
return HttpResponseRedirect(url)
elif request.POST.get('_delete'):
if (obj.planning_status == obj.PLANNING_APPROVED and request.user.has_perm('prescription.can_admin')):
obj.planning_status = obj.PLANNING_DRAFT
obj.planning_status_modified = timezone.now()
obj.save()
self.message_user(
request, "Successfully deleted for corporate approval.")
return HttpResponseRedirect(url)
context = {
'current': obj,
}
return TemplateResponse(request, self.corporate_approval_template,
context, current_app=self.admin_site.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def office_edit_process_view(request):\n status = ''\n success = True\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n ballotpedia_office_id = request.POST.get('ballotpedia_office_id', False) # Related to office_held\n ballotpedia_race_id = request.POST.get('ballotpedia_race_id', False) # Related to contest_office\n ballotpedia_race_office_level = request.POST.get('ballotpedia_race_office_level', False)\n ballotpedia_office_name = request.POST.get('ballotpedia_office_name', False)\n ballotpedia_is_marquee = request.POST.get('ballotpedia_is_marquee', False)\n ctcl_uuid = request.POST.get('ctcl_uuid', False)\n district_id = request.POST.get('district_id', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_office_name2 = request.POST.get('google_civic_office_name2', False)\n google_civic_office_name3 = request.POST.get('google_civic_office_name3', False)\n google_civic_office_name4 = request.POST.get('google_civic_office_name4', False)\n google_civic_office_name5 = request.POST.get('google_civic_office_name5', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n ocd_division_id = request.POST.get('ocd_division_id', False)\n office_held_we_vote_id = request.POST.get('office_held_we_vote_id', False)\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n vote_usa_office_id = request.POST.get('vote_usa_office_id', False)\n is_battleground_race = request.POST.get('is_battleground_race', False)\n remove_duplicate_process = request.POST.get('remove_duplicate_process', False)\n redirect_to_contest_office_list = convert_to_int(request.POST.get('redirect_to_contest_office_list', 0))\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n office_on_stage = None\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n success = False\n\n if success:\n try:\n if office_on_stage_found:\n office_on_stage_id = office_on_stage.id\n google_civic_election_id = office_on_stage.google_civic_election_id\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n office_on_stage_id = office_on_stage.id\n google_civic_election_id = office_on_stage.google_civic_election_id\n office_on_stage_found = True\n if office_on_stage_found:\n # Update\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n office_on_stage.ballotpedia_is_marquee = positive_value_exists(ballotpedia_is_marquee)\n if ballotpedia_office_id is not False:\n office_on_stage.ballotpedia_office_id = convert_to_int(ballotpedia_office_id)\n if ballotpedia_office_name is not False:\n office_on_stage.ballotpedia_office_name = ballotpedia_office_name\n if ballotpedia_race_id is not False:\n office_on_stage.ballotpedia_race_id = convert_to_int(ballotpedia_race_id)\n if ballotpedia_race_office_level is not False:\n office_on_stage.ballotpedia_race_office_level = ballotpedia_race_office_level\n if ctcl_uuid is not False:\n office_on_stage.ctcl_uuid = ctcl_uuid\n if district_id is not False:\n office_on_stage.district_id = district_id\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if google_civic_office_name2 is not False:\n office_on_stage.google_civic_office_name2 = google_civic_office_name2\n if google_civic_office_name3 is not False:\n office_on_stage.google_civic_office_name3 = google_civic_office_name3\n if google_civic_office_name4 is not False:\n office_on_stage.google_civic_office_name4 = google_civic_office_name4\n if google_civic_office_name5 is not False:\n office_on_stage.google_civic_office_name5 = google_civic_office_name5\n # Save office is_battleground_race for this year, and then prepare to update all related objects\n office_on_stage.is_battleground_race = positive_value_exists(is_battleground_race)\n election_day_text = office_on_stage.get_election_day_text()\n year = 0\n years_false_list = []\n years_true_list = []\n if positive_value_exists(election_day_text):\n date_as_integer = convert_we_vote_date_string_to_date_as_integer(election_day_text)\n year = date_as_integer // 10000\n if positive_value_exists(year):\n if positive_value_exists(is_battleground_race):\n years_false_list = []\n years_true_list = [year]\n else:\n years_false_list = [year]\n years_true_list = []\n years_list = list(set(years_false_list + years_true_list))\n if ocd_division_id is not False:\n office_on_stage.ocd_division_id = ocd_division_id\n if office_held_we_vote_id is not False:\n office_on_stage.office_held_we_vote_id = office_held_we_vote_id\n from office_held.models import OfficeHeldManager\n office_held_manager = OfficeHeldManager()\n office_held_results = office_held_manager.retrieve_office_held(\n office_held_we_vote_id=office_held_we_vote_id,\n read_only=True)\n if office_held_results['office_held_found']:\n office_held = office_held_results['office_held']\n office_on_stage.office_held_name = office_held.office_held_name\n if office_name is not False:\n office_on_stage.office_name = office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if vote_usa_office_id is not False:\n office_on_stage.vote_usa_office_id = vote_usa_office_id\n\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n office_on_stage_we_vote_id = office_on_stage.we_vote_id\n messages.add_message(request, messages.INFO, 'Office updated.')\n # ##################################\n # Update \"is_battleground_race\" for candidates under this office through the link CandidateToOfficeLink\n # We can't automatically update all of these candidates with the office's setting,\n # because we may be saving a primary election office which isn't a battleground race,\n # and the candidate may have made it through to the general election which\n # *is* a battleground.\n # from candidate.controllers import update_candidates_with_is_battleground_race\n # results = update_candidates_with_is_battleground_race(office_we_vote_id=office_on_stage.we_vote_id)\n if positive_value_exists(office_on_stage_we_vote_id) and len(years_list) > 0:\n from politician.controllers import update_parallel_fields_with_years_in_related_objects\n results = update_parallel_fields_with_years_in_related_objects(\n field_key_root='is_battleground_race_',\n master_we_vote_id_updated=office_on_stage_we_vote_id,\n years_false_list=years_false_list,\n years_true_list=years_true_list,\n )\n if not results['success']:\n status += results['status']\n status += \"FAILED_TO_UPDATE_PARALLEL_FIELDS_FROM_OFFICE \"\n messages.add_message(request, messages.ERROR, status)\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office (create new): ' + str(e))\n else:\n messages.add_message(request, messages.ERROR, 'Could not save office, success = False from above: ' + status)\n\n if redirect_to_contest_office_list:\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n if remove_duplicate_process:\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n else:\n return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)))",
"def referee_synopsis_approval(request):\n \n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n \n user = auth.get_user(request)\n referee = Referee.objects.get(user = user)\n \n if request.method == \"POST\":\n id = int(request.POST['id'])\n isApproved = (request.POST['isApproved'] == \"True\")\n feedback = request.POST['feedback']\n\n thesis = Thesis.objects.get(id = id)\n panelMember = PanelMember.objects.get(thesis = thesis, referee = referee)\n\n dict = {'status' : 'OK', 'message' : 'Your response has been submitted successfully' }\n \n if isApproved:\n panelMember.status = 'A'\n panelMember.save()\n else:\n panelMember.status = 'R'\n panelMember.save()\n if referee.type == 'I':\n invite_indian_referees(thesis)\n else:\n invite_foreign_referees(thesis)\n\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))",
"def test_apply_corporate_approval(self):\n p = self.make('Prescription')\n self.set_cbas_attributes(p)\n p.planning_status = p.PLANNING_SUBMITTED\n p.save()\n\n url = reverse('admin:prescription_prescription_corporate_approve',\n args=(str(p.id),))\n self.client.login(username='fmsb', password='test')\n response = self.client.post(url, {}, follow=True)\n self.assertEqual(response.status_code, 200)\n\n p = Prescription.objects.get(name='test')\n self.assertTrue(p.planning_status == p.PLANNING_APPROVED)\n self.assertTrue(p.planning_status_modified is not None)",
"def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)",
"def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)",
"def test_submit_for_corporate_approval(self):\n # set up the prescription to be ready for corporate approval\n p = self.make('Prescription')\n self.set_cbas_attributes(p)\n self.assertTrue(p.can_corporate_approve)\n self.assertTrue(p.planning_status == p.PLANNING_DRAFT)\n\n # submit for corporate approval\n url = reverse('admin:prescription_prescription_corporate_approve',\n args=(str(p.id),))\n response = self.client.post(url, {}, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # refresh prescription object\n p = Prescription.objects.get(name='test')\n self.assertTrue(p.planning_status == p.PLANNING_SUBMITTED)\n self.assertTrue(p.planning_status_modified is not None)",
"def moderation_view():\n\n # Ensure that the current user is an admin.\n assert users.get_current_user() and users.is_current_user_admin()\n\n config = ModerationConfig.get()\n\n # Approve something, if we were asked to.\n if request.form.get(\"approve\"):\n key = ndb.Key(urlsafe=request.form.get(\"approve\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n entity = key.get()\n entity.approve(\"Approved by {!r} on {!r}\".format(\n users.get_current_user().email(),\n str(datetime.datetime.now())\n ))\n entity.put()\n return \"\"\n\n elif request.form.get(\"deny\"):\n key = ndb.Key(urlsafe=request.form.get(\"deny\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n key.delete()\n return \"\"\n\n if request.form.get(\"automod\"):\n if config.enabled:\n config.blacklist = [x.strip() for x in\n request.form.get(\"blacklist\", \"\").split(\"\\n\")\n if x.strip()]\n config.min_delay = int(request.form.get(\"min_delay\", \"0\"))\n config.enabled = (request.form.get(\"automod\") == \"true\")\n config.put()\n\n inquiries = model.UnapprovedInquiry().query().fetch(100)\n listings = model.UnapprovedListing().query().fetch(100)\n\n inquiries.sort(key=email_order)\n listings.sort(key=email_order)\n\n return render_template(\"moderation/view.html\",\n inquiries=inquiries,\n listings=listings,\n config=config)",
"def view_approved():\n global approved\n global appr_ind\n appr = approved.get_all_values()\n headings = appr[0]\n first_appl = appr[appr_ind]\n for head, app in zip(headings, first_appl):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n keep_viewing = True\n while keep_viewing:\n view_next = input('\\nPress V to view next, Q to quit, M for main '\n 'menu.\\n')\n if view_next.lower() == 'q':\n logout()\n elif view_next.lower() == 'v':\n appr_ind += 1\n if appr_ind < len(appr):\n print('Next approved application: \\n')\n view_approved()\n else:\n print('\\nNo more approved applications to view \\n')\n keep_viewing = False\n next_action()\n elif view_next.lower() == 'm':\n keep_viewing = False\n hr_main()\n break\n else:\n is_invalid()",
"def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)",
"def proposal(request):\n context={\n\n\n }\n\n return render(request, 'valor_airquality/proposal.html', context)",
"def office_edit_process_view(request):\n authority_required = {'verified_volunteer'} # admin, verified_volunteer\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n\n try:\n if office_on_stage_found:\n # Update\n # Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if office_name is not False:\n office_on_stage.office_name = office_name\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n messages.add_message(request, messages.INFO, 'Office updated.')\n google_civic_election_id = office_on_stage.google_civic_election_id\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n office_on_stage.save()\n messages.add_message(request, messages.INFO, 'New office saved.')\n\n # Come back to the \"Create New Office\" page\n return HttpResponseRedirect(reverse('office:office_new', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office.')\n\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n \"?google_civic_election_id=\" + google_civic_election_id)",
"def approval_model(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"approval_model\")",
"def manage_access_approval(\n request: AuthenticatedHttpRequest,\n *,\n access_request_pk: int,\n entity: Literal[\"importer\", \"exporter\"],\n) -> HttpResponse:\n\n with transaction.atomic():\n if entity == \"importer\":\n model_cls = ImporterAccessRequest\n form_cls = ImporterApprovalRequestForm\n else:\n model_cls = ExporterAccessRequest\n form_cls = ExporterApprovalRequestForm\n\n access_request = get_object_or_404(\n model_cls.objects.select_for_update(), pk=access_request_pk\n )\n\n case_progress.access_request_in_processing(access_request)\n\n if request.method == \"POST\":\n form = form_cls(request.POST, access_request=access_request)\n\n if form.is_valid():\n approval_request = form.save(commit=False)\n approval_request.status = ApprovalRequest.Statuses.OPEN\n approval_request.access_request = access_request\n approval_request.requested_by = request.user\n approval_request.save()\n send_approval_request_opened_email(approval_request)\n return redirect(\n reverse(\n \"access:case-management-access-approval\",\n kwargs={\"access_request_pk\": access_request.pk, \"entity\": entity},\n )\n )\n else:\n approval_request = access_request.approval_requests.filter(is_active=True).first()\n form = form_cls(instance=approval_request, access_request=access_request)\n\n context = {\n \"case_type\": \"access\",\n \"process\": access_request,\n \"form\": form,\n \"approval_request\": approval_request,\n \"entity\": entity,\n }\n\n return render(\n request=request,\n template_name=\"web/domains/case/access/management-access-approval.html\",\n context=context,\n )",
"def test_corporate_approval_allowed(self):\n p = self.make('Prescription')\n self.assertFalse(p.can_corporate_approve)\n self.assertFalse(p.has_corporate_approval)\n\n fields = ['priority', 'location', 'perimeter', 'area', 'last_season',\n 'last_year', 'treatment_percentage', 'allocation']\n for field in fields:\n self.set_cbas_attributes(p, exclude_fields=[field])\n self.assertFalse(p.can_corporate_approve)\n self.set_cbas_attributes(p)\n self.assertTrue(p.can_corporate_approve)",
"def urls(self):\n urls = super(Approval, self).urls()\n urls.append(url(r'^(?P<process_pk>\\d+)/{}/(?P<task_pk>\\d+)/assign/$'.format(self.name),\n self.assign_view, {'flow_task': self}, name=\"{}__assign\".format(self.name)))\n urls.append(url(r'^(?P<process_pk>\\d+)/{}/(?P<task_pk>\\d+)/unassign/$'.format(self.name),\n self.unassign_view, {'flow_task': self}, name=\"{}__unassign\".format(self.name)))\n return urls",
"def purchase_indent_approve(request, request_id):\n purchase_indent_request = get_object_or_404(PurchaseIndentRequest, pk=request_id)\n current_employee = request.user.employee_set.all()[0]\n\n if purchase_indent_request.state == 'Submitted':\n if purchase_indent_request.indenter.department.hod_id != current_employee.id:\n raise PermissionDenied\n return render(request, 'purchase/purchase_indent/show_hod.html',\n {'purchase_indent_request': purchase_indent_request})\n\n elif purchase_indent_request.state == 'Approved by Head of Department':\n if not request.user.groups.filter(name='JrAO_AccountsDepartment').exists():\n raise PermissionDenied\n form = PurchaseIndentBudgetDetailsForm()\n\n return render(request, 'purchase/purchase_indent/show_jao.html',\n {'purchase_indent_request': purchase_indent_request, 'form': form})\n\n elif purchase_indent_request.state == 'Approved by Junior Accounts Officer':\n if not request.user.groups.filter(name='DR_AccountsDepartment').exists():\n raise PermissionDenied\n return render(request, 'purchase/purchase_indent/show_dr.html',\n {'purchase_indent_request': purchase_indent_request})\n\n else:\n return PermissionDenied",
"def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state",
"def endorse(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n\n title = \"Endorse this ePFP\"\n if obj.endorsement_status == obj.ENDORSEMENT_DRAFT:\n title = \"Submit for endorsement\"\n\n form = AddEndorsementForm(request.POST or None, request=request)\n\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if request.POST.get('_cancel'):\n return HttpResponseRedirect(url)\n if obj.endorsement_status == obj.ENDORSEMENT_DRAFT and obj.can_endorse:\n obj.endorsement_status = obj.ENDORSEMENT_SUBMITTED\n obj.endorsement_status_modified = timezone.now()\n obj.save()\n\n # Remove all object permissions for sections that can't be\n # modified after submitting for endorsement\n update_permissions(obj, self.admin_site, 'endorsement')\n\n # a simple hack to set the default prescribing officer\n if obj is not None and obj.prescribing_officer is None:\n obj.prescribing_officer = request.user\n obj.save()\n\n self.message_user(\n request, \"Successfully submitted for endorsement.\")\n return HttpResponseRedirect(url)\n if obj.endorsement_status == obj.ENDORSEMENT_SUBMITTED or obj.endorsement_status == obj.ENDORSEMENT_APPROVED:\n # create an endorsement\n if form.is_valid():\n endorsement = form.save(commit=False)\n endorsement.prescription = obj\n endorsement.endorsed = (\n request.POST.get('_dont_endorse') is None)\n endorsement.creator = request.user\n endorsement.modifier = request.user\n endorsement.save()\n group = Group.objects.get(name='ePFP Application Administrator')\n assign_perm('delete_endorsement', group, endorsement)\n\n # check that all needed endorsements are there\n if obj.all_reviewed:\n if obj.all_endorsed:\n obj.endorsement_status = obj.ENDORSEMENT_APPROVED\n msg = (\" All endorsements have been completed on \"\n \"this ePFP.\")\n group = Group.objects.get(name='ePFP Application Administrator')\n else:\n logger.warning('Prescription {} - all endorsements input but \"delete all\" business logic triggered'.format(obj))\n for i in obj.endorsement_set.all():\n logger.info('Staff: {}, role: {}, endorsement: {}'.format(i.creator.get_full_name(), i.role, i.endorsed))\n msg = (\" All endorsements have been reviewed but some \"\n \"of them have not been endorsed on this ePFP.\")\n obj.save()\n self.message_user(\n request, \"Successfully added endorsement.\" +\n msg)\n else:\n self.message_user(\n request, \"Successfully added endorsement.\")\n return HttpResponseRedirect(url)\n\n form.fields['role'].queryset = obj.not_endorsed_endorsing_roles\n\n group = Group.objects.get(name='ePFP Application Administrator')\n for endorsement in obj.endorsement_set.all():\n assign_perm('delete_endorsement', group, endorsement)\n\n context = {\n 'title': title,\n 'current': obj,\n 'errors': None,\n 'form': form,\n 'endorsed_endorsing_roles': obj.endorsement_set.all(),\n 'not_endorsed_endorsing_roles': obj.not_endorsed_endorsing_roles,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"endorsement.html\", context,\n current_app=self.admin_site.name)",
"def generate_approval_route(self):\n for order in self:\n if not order.team_id:\n continue\n if order.approver_ids:\n # reset approval route\n order.approver_ids.unlink()\n for team_approver in order.team_id.approver_ids:\n\n custom_condition = order.compute_custom_condition(team_approver)\n if not custom_condition:\n # Skip approver, if custom condition for the approver is set and the condition result is not True\n continue\n\n min_amount = team_approver.company_currency_id._convert(\n team_approver.min_amount,\n order.currency_id,\n order.company_id,\n order.date_order or fields.Date.today())\n if min_amount > order.amount_total:\n # Skip approver if Minimum Amount is greater than Total Amount\n continue\n max_amount = team_approver.company_currency_id._convert(\n team_approver.max_amount,\n order.currency_id,\n order.company_id,\n order.date_order or fields.Date.today())\n if max_amount and max_amount < order.amount_total:\n # Skip approver if Maximum Amount is set and less than Total Amount\n continue\n\n # Add approver to the PO\n self.env['purchase.order.approver'].create({\n 'sequence': team_approver.sequence,\n 'team_id': team_approver.team_id.id,\n 'user_id': team_approver.user_id.id,\n 'role': team_approver.role,\n 'min_amount': team_approver.min_amount,\n 'max_amount': team_approver.max_amount,\n 'lock_amount_total': team_approver.lock_amount_total,\n 'order_id': order.id,\n 'team_approver_id': team_approver.id,\n })",
"def apply(request):\n user = request.user\n if not(user.is_authenticated()):\n return redirect('/allotter/login/')\n \n context = get_details(user) \n \n return render(request, 'allotter/apply.html', context)",
"def get_info_from_db(self):\n return axdb_client.get_approval_info(root_id=self.root_id, leaf_id=self.leaf_id)",
"def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)",
"def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state",
"def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state",
"def apply_method(self, r, **attr):\n\n if r.name == \"organisation\" and \\\n r.id and \\\n not r.component and \\\n r.representation in (\"html\", \"aadata\"):\n\n organisation_id = r.id\n\n delegation_id = r.get_vars.get(\"delegation_id\")\n if delegation_id is None:\n current.session.error = \"Can't Approve Application if Delegation not supplied\"\n redirect(URL(args = [organisation_id]))\n\n T = current.T\n db = current.db\n s3db = current.s3db\n auth = current.auth\n has_role = auth.s3_has_role\n\n # Check that this User is permitted to review Applications for this Org\n if has_role(\"ADMIN\"):\n # OK\n pass\n elif has_role(\"ORG_ADMIN\") and (auth.user.organisation_id == organisation_id): # @ToDo: Ideally (post-CCC) make this realm to allow OG_ADMIN role to be assigned to 1 User for multiple Orgs\n # OK\n pass\n else:\n current.session.error = \"You are not permitted to Approve Applications for this Organisation\"\n redirect(URL(args = [organisation_id]))\n\n # Read the delegation\n dtable = s3db.hrm_delegation\n delegation = db(dtable.id == delegation_id).select(dtable.id, # For update_record\n dtable.organisation_id,\n dtable.person_id,\n dtable.status,\n dtable.comments,\n limitby = (0, 1)\n ).first()\n if not delegation:\n current.session.error = \"Application not found!\"\n redirect(URL(args = [organisation_id]))\n\n if delegation.organisation_id != organisation_id:\n current.session.error = \"Application not for this Organisation!\"\n redirect(URL(args = [organisation_id]))\n\n if delegation.status != \"APPL\":\n current.session.error = \"Application has incorrect status!\"\n redirect(URL(args = [organisation_id]))\n\n # Lookup Person\n person_id = delegation.person_id\n ptable = s3db.pr_person\n person = db(ptable.id == person_id).select(ptable.first_name,\n ptable.middle_name,\n ptable.last_name,\n ptable.pe_id,\n limitby = (0, 1)\n ).first()\n pe_id = person.pe_id\n\n # Lookup User Account\n ltable = s3db.pr_person_user\n utable = db.auth_user\n query = (ltable.pe_id == pe_id) & \\\n (ltable.user_id == utable.id)\n user = db(query).select(utable.id,\n utable.organisation_id,\n limitby = (0, 1),\n ).first()\n if user.organisation_id:\n current.session.error = \"Volunteer has already been accepted to join an Organisation\"\n redirect(URL(args = [organisation_id]))\n\n user_id = user.id\n\n record = r.record\n org_name = record.name\n realm_entity = record.pe_id\n\n requires = IS_IN_SET({\"0\": T(\"No\"),\n \"1\": T(\"Yes\"),\n })\n form = FORM(DIV(DIV(LABEL(\"Should this volunteer become affiliated to this Organisation?\",\n SPAN(\" *\",\n _class = \"req\",\n ),\n _for = \"pr_person_sub_apply_value\",\n ),\n DIV(DIV(DIV(DIV(INPUT(requires = requires,\n _type = \"radio\",\n _name = \"approve\",\n _id = \"approve_0\",\n _value = \"0\",\n #value = None,\n ),\n LABEL(T(\"No\"),\n _for = \"approve_0\",\n ),\n ),\n ),\n DIV(DIV(INPUT(requires = requires,\n _type = \"radio\",\n _name = \"approve\",\n _id = \"approve_1\",\n _value = \"1\",\n #value = None,\n ),\n LABEL(T(\"Yes\"),\n _for = \"approve_1\",\n ),\n ),\n ),\n _class = \"generic-widget web2py_radiowidget\",\n _id = \"pr_person_sub_apply_value\",\n ),\n _class = \"controls\",\n ),\n _class = \"small-12 columns\",\n ),\n _class = \"form-row row\",\n ),\n DIV(DIV(DIV(INPUT(_class = \"button primary small btn\",\n _type = \"submit\",\n _value = \"Save\",\n ),\n _class = \"controls\",\n ),\n _class = \"small-12 columns\",\n ),\n _class = \"form-row row\",\n ),\n )\n\n if form.accepts(r.post_vars):\n\n # Message Applicant\n\n # Lookup Email\n ctable = s3db.pr_contact\n query = (ctable.pe_id == pe_id) & \\\n (ctable.contact_method == \"EMAIL\") & \\\n (ctable.deleted == False)\n emails = db(query).select(ctable.value,\n distinct = True)\n\n # Create Email\n system_name = current.deployment_settings.get_system_name_short()\n if form.vars.approve == \"0\":\n subject = \"%s: Application to %s has been Rejected\" % (system_name, org_name)\n\n # Update Delegation\n delegation.update_record(status = \"RJCT\")\n else:\n subject = \"%s: Application to %s has been Approved\" % (system_name, org_name)\n\n # Update Delegation\n delegation.update_record(status = \"ACPT\")\n\n # Update User Account\n user.update_record(organisation_id = organisation_id)\n\n # Add Human Resource Record\n htable = s3db.hrm_human_resource\n hr = {\"organisation_id\": organisation_id,\n \"person_id\": person_id,\n \"realm_entity\": realm_entity,\n }\n human_resource_id = htable.insert(**hr)\n hr[\"id\"] = human_resource_id\n s3db.update_super(htable, hr)\n onaccept = s3db.get_config(\"hrm_human_resource\", \"create_onaccept\") or \\\n s3db.get_config(\"hrm_human_resource\", \"onaccept\")\n if callable(onaccept):\n hform = Storage(vars = hr)\n onaccept(hform)\n\n # Add VOLUNTEER role\n auth.s3_assign_role(user_id, \"VOLUNTEER\", for_pe=realm_entity)\n\n # Remove RESERVE role\n auth.s3_withdraw_role(user_id, \"RESERVE\", for_pe=[])\n\n # Set Realm Entity\n ttable = s3db.hrm_human_resource_tag\n reserve = delegation.comments\n if reserve == \"1\":\n ftable = s3db.pr_forum\n forum = db(ftable.organisation_id == organisation_id).select(ftable.pe_id,\n limitby = (0, 1)\n ).first()\n\n auth.set_realm_entity(\"pr_person\", person_id, entity=forum.pe_id, force_update=True)\n else:\n auth.set_realm_entity(\"pr_person\", person_id, entity=realm_entity, force_update=True)\n\n # Set Reserves Tag\n ttable = s3db.hrm_human_resource_tag\n ttable.insert(human_resource_id = human_resource_id,\n tag = \"reserve\",\n value = reserve,\n )\n\n message = subject\n\n # Send Email to each of the Person's emails\n send_email = current.msg.send_email\n for email in emails:\n send_email(to = email.value,\n subject = subject,\n message = message,\n )\n # Redirect\n current.session.confirmation = T(\"Application has been processed\")\n redirect(URL(args = [organisation_id]))\n\n # Show Form\n ctable = s3db.hrm_competency\n stable = s3db.hrm_skill\n query = (ctable.person_id == person_id) & \\\n (ctable.deleted == False) & \\\n (ctable.skill_id == stable.id)\n offers = db(query).select(stable.name)\n offers = \", \".join([o.name for o in offers])\n\n ttable = s3db.pr_person_tag\n query = (ttable.person_id == person_id) & \\\n (ttable.tag == \"skill_details\")\n tag = db(query).select(ttable.id,\n ttable.value,\n limitby = (0, 1)\n ).first()\n if tag and tag.value is not None:\n offer_details = TR(TD(\"Offer Details:\"),\n TD(tag.value),\n )\n else:\n offer_details = TR(TD(_colspan = 2))\n\n ctable = s3db.pr_contact\n query = (ctable.pe_id == pe_id) & \\\n (ctable.contact_method == \"EMAIL\") & \\\n (ctable.deleted == False)\n emails = db(query).select(ctable.value)\n email = \", \".join([e.value for e in emails])\n\n query = (ctable.pe_id == pe_id) & \\\n (ctable.contact_method.belongs((\"SMS\", \"HOME_PHONE\"))) & \\\n (ctable.deleted == False)\n phones = db(query).select(ctable.value)\n phone = \", \".join([p.value for p in phones])\n\n atable = s3db.pr_address\n gtable = s3db.gis_location\n query = (atable.pe_id == pe_id) & \\\n (atable.deleted == False) & \\\n (atable.location_id == gtable.id)\n location = db(query).select(gtable.L3,\n gtable.L4,\n gtable.addr_street,\n limitby = (0, 1)\n ).first()\n if location:\n address = location.addr_street or \"\"\n if location.L4:\n if address:\n address = \"%s, %s\" % (address, location.L4)\n else:\n address = location.L4\n if location.L3:\n if address:\n address = \"%s, %s\" % (address, location.L3)\n else:\n address = location.L3\n address = TR(TD(\"Address:\"),\n TD(address)\n )\n else:\n address = TR(TD(_colspan = 2))\n\n header = DIV(P(\"This volunteer has applied to join this Organisation:\"),\n TABLE(TR(TD(\"Name:\"),\n TD(s3_fullname(person))\n ),\n TR(TD(\"Volunteer Offer:\"),\n TD(offers),\n ),\n offer_details,\n TR(TD(\"Telephone:\"),\n TD(phone),\n ),\n TR(TD(\"Email:\"),\n TD(email),\n ),\n address,\n ),\n )\n\n output = {\"form\": form,\n \"header\": header,\n \"title\": \"Application to join Organisation: %s\" % org_name,\n }\n current.response.title = T(\"Application\")\n S3CustomController._view(THEME, \"apply.html\")\n return output\n\n else:\n r.error(405, current.ERROR.BAD_METHOD)",
"def update_application(request):\n\n record = RegApplication.query.filter_by(email=request.form['application-email']).first()\n\n record.application_processed = True\n record.application_granted = False if request.form['application-action'] == 'reject' else True\n record.processed_date = datetime.datetime.now()\n db.session.commit()\n\n if not record.application_granted:\n\n send_message(subject='OpenAPS Access Refused',\n email=request.form['application-email'],\n content=f\"\"\"Your application for access to the OpenAPS data portal was rejected for the following reason:\n <br><br>\n '{request.form['reject-reason']}'\"\"\")\n\n return record.project_requests",
"def take_ownership_access_approval(\n request: AuthenticatedHttpRequest,\n *,\n approval_request_pk: int,\n entity: Literal[\"importer\", \"exporter\"],\n) -> HttpResponse:\n\n with transaction.atomic():\n if entity == \"importer\":\n approval_request = get_object_or_404(\n ImporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n else:\n approval_request = get_object_or_404(\n ExporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n\n case_progress.approval_request_in_processing(approval_request)\n\n # Already assigned\n if approval_request.requested_from:\n raise PermissionDenied\n\n org = approval_request.access_request.get_specific_model().link\n if not can_user_manage_org_contacts(request.user, org):\n raise PermissionDenied\n\n approval_request.requested_from = request.user\n approval_request.save()\n\n return redirect(\n reverse(\n \"access:case-approval-respond\",\n kwargs={\n \"access_request_pk\": approval_request.access_request.id,\n \"entity\": entity,\n \"approval_request_pk\": approval_request.pk,\n },\n )\n )",
"def GetApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def ApproveApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def approval_model(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"approval_model\")"
] | [
"0.60776347",
"0.59112906",
"0.58727604",
"0.57528615",
"0.57168037",
"0.56779003",
"0.5671841",
"0.5618632",
"0.55696684",
"0.55305827",
"0.551526",
"0.55147535",
"0.54409975",
"0.54267627",
"0.5425578",
"0.5413403",
"0.5285814",
"0.5261875",
"0.5244001",
"0.5226627",
"0.52104264",
"0.520973",
"0.52046096",
"0.51606107",
"0.5153411",
"0.5143488",
"0.5142799",
"0.5098136",
"0.50842845",
"0.50826293"
] | 0.66895914 | 0 |
View to manage endorsement of an ePFP. | def endorse(self, request, object_id, extra_context=None):
obj = self.get_object(request, unquote(object_id))
title = "Endorse this ePFP"
if obj.endorsement_status == obj.ENDORSEMENT_DRAFT:
title = "Submit for endorsement"
form = AddEndorsementForm(request.POST or None, request=request)
if request.method == 'POST':
url = reverse('admin:prescription_prescription_detail',
args=[str(obj.id)])
if request.POST.get('_cancel'):
return HttpResponseRedirect(url)
if obj.endorsement_status == obj.ENDORSEMENT_DRAFT and obj.can_endorse:
obj.endorsement_status = obj.ENDORSEMENT_SUBMITTED
obj.endorsement_status_modified = timezone.now()
obj.save()
# Remove all object permissions for sections that can't be
# modified after submitting for endorsement
update_permissions(obj, self.admin_site, 'endorsement')
# a simple hack to set the default prescribing officer
if obj is not None and obj.prescribing_officer is None:
obj.prescribing_officer = request.user
obj.save()
self.message_user(
request, "Successfully submitted for endorsement.")
return HttpResponseRedirect(url)
if obj.endorsement_status == obj.ENDORSEMENT_SUBMITTED or obj.endorsement_status == obj.ENDORSEMENT_APPROVED:
# create an endorsement
if form.is_valid():
endorsement = form.save(commit=False)
endorsement.prescription = obj
endorsement.endorsed = (
request.POST.get('_dont_endorse') is None)
endorsement.creator = request.user
endorsement.modifier = request.user
endorsement.save()
group = Group.objects.get(name='ePFP Application Administrator')
assign_perm('delete_endorsement', group, endorsement)
# check that all needed endorsements are there
if obj.all_reviewed:
if obj.all_endorsed:
obj.endorsement_status = obj.ENDORSEMENT_APPROVED
msg = (" All endorsements have been completed on "
"this ePFP.")
group = Group.objects.get(name='ePFP Application Administrator')
else:
logger.warning('Prescription {} - all endorsements input but "delete all" business logic triggered'.format(obj))
for i in obj.endorsement_set.all():
logger.info('Staff: {}, role: {}, endorsement: {}'.format(i.creator.get_full_name(), i.role, i.endorsed))
msg = (" All endorsements have been reviewed but some "
"of them have not been endorsed on this ePFP.")
obj.save()
self.message_user(
request, "Successfully added endorsement." +
msg)
else:
self.message_user(
request, "Successfully added endorsement.")
return HttpResponseRedirect(url)
form.fields['role'].queryset = obj.not_endorsed_endorsing_roles
group = Group.objects.get(name='ePFP Application Administrator')
for endorsement in obj.endorsement_set.all():
assign_perm('delete_endorsement', group, endorsement)
context = {
'title': title,
'current': obj,
'errors': None,
'form': form,
'endorsed_endorsing_roles': obj.endorsement_set.all(),
'not_endorsed_endorsing_roles': obj.not_endorsed_endorsing_roles,
}
return TemplateResponse(request, "admin/prescription/prescription/"
"endorsement.html", context,
current_app=self.admin_site.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))",
"def office_edit_process_view(request):\n status = ''\n success = True\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n ballotpedia_office_id = request.POST.get('ballotpedia_office_id', False) # Related to office_held\n ballotpedia_race_id = request.POST.get('ballotpedia_race_id', False) # Related to contest_office\n ballotpedia_race_office_level = request.POST.get('ballotpedia_race_office_level', False)\n ballotpedia_office_name = request.POST.get('ballotpedia_office_name', False)\n ballotpedia_is_marquee = request.POST.get('ballotpedia_is_marquee', False)\n ctcl_uuid = request.POST.get('ctcl_uuid', False)\n district_id = request.POST.get('district_id', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_office_name2 = request.POST.get('google_civic_office_name2', False)\n google_civic_office_name3 = request.POST.get('google_civic_office_name3', False)\n google_civic_office_name4 = request.POST.get('google_civic_office_name4', False)\n google_civic_office_name5 = request.POST.get('google_civic_office_name5', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n ocd_division_id = request.POST.get('ocd_division_id', False)\n office_held_we_vote_id = request.POST.get('office_held_we_vote_id', False)\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n vote_usa_office_id = request.POST.get('vote_usa_office_id', False)\n is_battleground_race = request.POST.get('is_battleground_race', False)\n remove_duplicate_process = request.POST.get('remove_duplicate_process', False)\n redirect_to_contest_office_list = convert_to_int(request.POST.get('redirect_to_contest_office_list', 0))\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n office_on_stage = None\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n success = False\n\n if success:\n try:\n if office_on_stage_found:\n office_on_stage_id = office_on_stage.id\n google_civic_election_id = office_on_stage.google_civic_election_id\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n office_on_stage_id = office_on_stage.id\n google_civic_election_id = office_on_stage.google_civic_election_id\n office_on_stage_found = True\n if office_on_stage_found:\n # Update\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n office_on_stage.ballotpedia_is_marquee = positive_value_exists(ballotpedia_is_marquee)\n if ballotpedia_office_id is not False:\n office_on_stage.ballotpedia_office_id = convert_to_int(ballotpedia_office_id)\n if ballotpedia_office_name is not False:\n office_on_stage.ballotpedia_office_name = ballotpedia_office_name\n if ballotpedia_race_id is not False:\n office_on_stage.ballotpedia_race_id = convert_to_int(ballotpedia_race_id)\n if ballotpedia_race_office_level is not False:\n office_on_stage.ballotpedia_race_office_level = ballotpedia_race_office_level\n if ctcl_uuid is not False:\n office_on_stage.ctcl_uuid = ctcl_uuid\n if district_id is not False:\n office_on_stage.district_id = district_id\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if google_civic_office_name2 is not False:\n office_on_stage.google_civic_office_name2 = google_civic_office_name2\n if google_civic_office_name3 is not False:\n office_on_stage.google_civic_office_name3 = google_civic_office_name3\n if google_civic_office_name4 is not False:\n office_on_stage.google_civic_office_name4 = google_civic_office_name4\n if google_civic_office_name5 is not False:\n office_on_stage.google_civic_office_name5 = google_civic_office_name5\n # Save office is_battleground_race for this year, and then prepare to update all related objects\n office_on_stage.is_battleground_race = positive_value_exists(is_battleground_race)\n election_day_text = office_on_stage.get_election_day_text()\n year = 0\n years_false_list = []\n years_true_list = []\n if positive_value_exists(election_day_text):\n date_as_integer = convert_we_vote_date_string_to_date_as_integer(election_day_text)\n year = date_as_integer // 10000\n if positive_value_exists(year):\n if positive_value_exists(is_battleground_race):\n years_false_list = []\n years_true_list = [year]\n else:\n years_false_list = [year]\n years_true_list = []\n years_list = list(set(years_false_list + years_true_list))\n if ocd_division_id is not False:\n office_on_stage.ocd_division_id = ocd_division_id\n if office_held_we_vote_id is not False:\n office_on_stage.office_held_we_vote_id = office_held_we_vote_id\n from office_held.models import OfficeHeldManager\n office_held_manager = OfficeHeldManager()\n office_held_results = office_held_manager.retrieve_office_held(\n office_held_we_vote_id=office_held_we_vote_id,\n read_only=True)\n if office_held_results['office_held_found']:\n office_held = office_held_results['office_held']\n office_on_stage.office_held_name = office_held.office_held_name\n if office_name is not False:\n office_on_stage.office_name = office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if vote_usa_office_id is not False:\n office_on_stage.vote_usa_office_id = vote_usa_office_id\n\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n office_on_stage_we_vote_id = office_on_stage.we_vote_id\n messages.add_message(request, messages.INFO, 'Office updated.')\n # ##################################\n # Update \"is_battleground_race\" for candidates under this office through the link CandidateToOfficeLink\n # We can't automatically update all of these candidates with the office's setting,\n # because we may be saving a primary election office which isn't a battleground race,\n # and the candidate may have made it through to the general election which\n # *is* a battleground.\n # from candidate.controllers import update_candidates_with_is_battleground_race\n # results = update_candidates_with_is_battleground_race(office_we_vote_id=office_on_stage.we_vote_id)\n if positive_value_exists(office_on_stage_we_vote_id) and len(years_list) > 0:\n from politician.controllers import update_parallel_fields_with_years_in_related_objects\n results = update_parallel_fields_with_years_in_related_objects(\n field_key_root='is_battleground_race_',\n master_we_vote_id_updated=office_on_stage_we_vote_id,\n years_false_list=years_false_list,\n years_true_list=years_true_list,\n )\n if not results['success']:\n status += results['status']\n status += \"FAILED_TO_UPDATE_PARALLEL_FIELDS_FROM_OFFICE \"\n messages.add_message(request, messages.ERROR, status)\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office (create new): ' + str(e))\n else:\n messages.add_message(request, messages.ERROR, 'Could not save office, success = False from above: ' + status)\n\n if redirect_to_contest_office_list:\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n if remove_duplicate_process:\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n else:\n return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)))",
"def moderation_view():\n\n # Ensure that the current user is an admin.\n assert users.get_current_user() and users.is_current_user_admin()\n\n config = ModerationConfig.get()\n\n # Approve something, if we were asked to.\n if request.form.get(\"approve\"):\n key = ndb.Key(urlsafe=request.form.get(\"approve\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n entity = key.get()\n entity.approve(\"Approved by {!r} on {!r}\".format(\n users.get_current_user().email(),\n str(datetime.datetime.now())\n ))\n entity.put()\n return \"\"\n\n elif request.form.get(\"deny\"):\n key = ndb.Key(urlsafe=request.form.get(\"deny\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n key.delete()\n return \"\"\n\n if request.form.get(\"automod\"):\n if config.enabled:\n config.blacklist = [x.strip() for x in\n request.form.get(\"blacklist\", \"\").split(\"\\n\")\n if x.strip()]\n config.min_delay = int(request.form.get(\"min_delay\", \"0\"))\n config.enabled = (request.form.get(\"automod\") == \"true\")\n config.put()\n\n inquiries = model.UnapprovedInquiry().query().fetch(100)\n listings = model.UnapprovedListing().query().fetch(100)\n\n inquiries.sort(key=email_order)\n listings.sort(key=email_order)\n\n return render_template(\"moderation/view.html\",\n inquiries=inquiries,\n listings=listings,\n config=config)",
"def proposal(request):\n context={\n\n\n }\n\n return render(request, 'valor_airquality/proposal.html', context)",
"def elder(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/league_layout.html',\n {\n 'background': getElderBackground(),\n 'color': getElderColor(),\n 'title':'Elder League Rosters',\n 'query_results': Elder_League_Request(request),\n 'year': datetime.now().year,\n }\n )",
"def management(request):\n\n # query on all order records\n orders = ODOrder.objects.all()\n\n # structured order into simple dict, \n # later on, in template, we can render it easily, ex: {{ orders }}\n data = {'orders': orders}\n\n return render(request, 'order/management.html', data)",
"async def government(self, ctx):\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **eboard**\\n\\n**eboard seats** - shows a list of all '\n 'government positions and their corresponding officers.\\n\\n**eboard position \\\"<position>\\\"** - shows the '\n 'current officer that fills this position and a description of the position.')",
"def listerp(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n try:\n Programme.objects.order_by(\"id\")\n Prog_lister = Programme.objects.all()\n return render(request, 'esihapp/listp.html', locals())\n except KeyError:\n return render(request, 'esihapp/listp.html', locals())",
"def office_edit_process_view(request):\n authority_required = {'verified_volunteer'} # admin, verified_volunteer\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n\n try:\n if office_on_stage_found:\n # Update\n # Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if office_name is not False:\n office_on_stage.office_name = office_name\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n messages.add_message(request, messages.INFO, 'Office updated.')\n google_civic_election_id = office_on_stage.google_civic_election_id\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n office_on_stage.save()\n messages.add_message(request, messages.INFO, 'New office saved.')\n\n # Come back to the \"Create New Office\" page\n return HttpResponseRedirect(reverse('office:office_new', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office.')\n\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n \"?google_civic_election_id=\" + google_civic_election_id)",
"def polution(request):\r\n return render(request, 'polution.html')",
"def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )",
"def accommodation(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'accommodation.html',\n context_instance=RequestContext(request, {})\n )",
"def browse_mentors(request):\n all_mentors = Mentor.objects.all()\n mentors = [mentor for mentor in all_mentors if mentor.mentor.has_capacity()]\n return render(request, 'match/browse_mentors.html', {'mentors': mentors})",
"def occupation(request, pk):\r\n occupation = get_object_or_404(Occupation, pk=pk)\r\n return HttpResponse('Occupation: %s' % occupation)",
"def printing_view(request):\n committees = Committee.objects.all().order_by(\"name\")\n\n context = {\"committees\": committees}\n template = \"jurycore/printing_view.html\"\n return render(request, template, context)",
"def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))",
"def about(request):\n realtors = Realtor.objects.order_by('-hire_date')\n mvp_realtors = Realtor.objects.all().filter(is_mvp=True)\n context = {\n 'realtors': realtors,\n 'mvp_realtors': mvp_realtors\n }\n return render(request, 'pages/about.html', context)",
"def show_applicants_and_mentors():\n data_list = queries2.applicants_and_mentors()[0]\n table_titles = queries2.applicants_and_mentors()[1]\n title = \"Applicants and mentors\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)",
"def voters_eligibility(request, election):\n user = get_user(request)\n\n if request.method == \"GET\":\n # this shouldn't happen, only POSTs\n return HttpResponseRedirect(\"/\")\n\n # for now, private elections cannot change eligibility\n if election.private_p:\n return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))\n\n # eligibility\n eligibility = request.POST['eligibility']\n\n if eligibility in ['openreg', 'limitedreg']:\n election.openreg= True\n\n if eligibility == 'closedreg':\n election.openreg= False\n\n if eligibility == 'limitedreg':\n # now process the constraint\n category_id = request.POST['category_id']\n\n constraint = AUTH_SYSTEMS[user.user_type].generate_constraint(category_id, user)\n election.eligibility = [{'auth_system': user.user_type, 'constraint': [constraint]}]\n else:\n election.eligibility = None\n\n election.save()\n return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))",
"def get(self, request, **kwargs):\n elementos_list= Elementos.objects.all()\n return render(request, 'alchemy/index.html', {})",
"def update(self, request, *args, **kwargs):\n response = super(ProviderViewSet, self).update(request, *args, **kwargs)\n response.data['message'] = \"Proveedor ha sido editado\"\n return response",
"def view(self):",
"def acceptedPapersIndustrial(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'acceptedIndustrial.html',\n context_instance=RequestContext(request, {})\n )",
"def brother_excuse(request, excuse_id):\n excuse = Excuse.objects.get(pk=excuse_id)\n if not request.user == excuse.brother.user: # brother auth check\n messages.error(request, \"Please log into the brother that submitted that excuse\")\n return HttpResponseRedirect(reverse('dashboard:home'))\n\n context = {\n 'excuse': excuse,\n 'type': 'review',\n }\n return render(request, \"excuse.html\", context)",
"def partners(request):\n return render(request, 'ecosystem/partners.html', {'page': 'partners'})",
"def view_article():\n\n return render_template('article.html', api_endpoint = Parameters()[\"api.endpoint\"])",
"def inv(request):\n assert isinstance(request, HttpRequest)\n departments={}\n booklist=[]\n for league in models.Dept.objects.all(): \n departments[league.pk]=league\n for k,v in get_valid_Books().items():\n booklist.append(v)\n modes=['manage','add','order']\n postobj = request.POST.copy()\n modetype=\"\"\n displaymode=\"\"\n try:\n modetype=postobj['inventoryMode']\n if 'manage' in modetype.lower():\n \n return render(\n request,\n 'app/manageInv.html',\n {\n 'title':'Manage Inventory',\n 'invmodes':modes,\n 'dispmode':'manage',\n 'message':'Manage Inventory details page.',\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'books':booklist,\n 'year':datetime.now().year,\n }\n )\n elif 'add' in modetype.lower():\n return render(\n request,\n 'app/addInv.html',\n {\n 'title':'Add Inventory',\n 'invmodes':modes,\n 'dispmode':'add',\n 'message':'Inventory page.',\n 'librarian':get_librarians(),\n 'le':list(range(1,11)),\n 'DepartmentList':departments.keys(),\n 'year':datetime.now().year,\n }\n )\n else:\n return render(\n request,\n 'app/orderInv.html',\n {\n 'title':'Order for new books',\n 'invmodes':modes,\n 'dispmode':'add',\n 'message':'Order Books page (procurement).',\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'year':datetime.now().year,\n }\n )\n except Exception as e:\n return render(\n request,\n 'app/manageInv.html',\n {\n 'title':'Manage Inventory',\n 'invmodes':modes,\n 'dispmode':'manage',\n 'message':'Manage Inventory details page.',\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'books':booklist,\n 'year':datetime.now().year,\n }\n )\n \n return render(\n request,\n 'app/inventory.html',\n {\n 'title':'Inventory',\n 'invmodes':modes,\n 'dispmode':displaymode,\n 'message':'Inventory details page.',\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'year':datetime.now().year,\n }\n )",
"def get(self):\n return self.__expedition",
"def edit_announcement():\n # Implement me!\n\n announcement = get_announcement(request.vars.announcement_id, auth.user.email)\n\n announcement.description = request.vars.description\n announcement.name = request.vars.name\n announcement.updated_on = datetime.datetime.utcnow()\n announcement.update_record()\n return response.json(announcement)",
"def award_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n award_reference = get_object_or_404(Award, id=id,company=company)\n\n return render_to_response('award_form.html', \n {'details': award_reference,'info':award_reference},\n context_instance=RequestContext(request))"
] | [
"0.60825455",
"0.56893605",
"0.5541821",
"0.55269986",
"0.5434012",
"0.54181087",
"0.5341557",
"0.5296141",
"0.52757007",
"0.5231355",
"0.52184767",
"0.5189548",
"0.5168149",
"0.51590496",
"0.5138254",
"0.51100785",
"0.5106903",
"0.5091372",
"0.5052076",
"0.5046529",
"0.5016765",
"0.5001322",
"0.4996015",
"0.4981423",
"0.49792174",
"0.49615693",
"0.49604514",
"0.49479854",
"0.49316815",
"0.49308777"
] | 0.72415787 | 0 |
Find the substring between the first and last chars/strings | def __find_between(self, s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_string(begin, end, string):\n b = string.find(begin) + len(begin)\n e = string.find(end, b)\n\n return string[b:e]",
"def find_between(s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return \"\"",
"def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]",
"def find_between(s, first='<title>', last='</title>'):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return s",
"def get_str_between_s_and_e(start_str, end_str, line):\n\tstart = line.find(start_str)\n\tif start >= 0:\n\t\tstart = start + len(start_str)\n\t\tend = line.find(end_str, start)\n\t\tif end >= 0:\n\t\t\treturn line[start:end].strip()\n\telse:\n\t\treturn None",
"def extract_substring(string, left, right, right_to_left=False):\n if right_to_left:\n l_index = string.rfind(left) + len(left)\n r_index = string.rfind(right)\n else:\n l_index = string.find(left) + len(left)\n r_index = string.find(right)\n\n return string[l_index:r_index]",
"def get_substring(self, start, end):\n return self.input[start:end]",
"def find_text_between(start,end,haystack):\n found = re.search(start+'(.*)'+end,haystack,re.IGNORECASE | re.DOTALL)\n \n if found:\n return found.group(1).strip()\n else:\n raise Exception(\"There is no substring starting with '{}', ending\"\n \" with '{}' in content '{}' \".format(start,end,haystack))",
"def between_markers(text: str, begin: str, end: str) -> str:\n # your code here\n return text[text.find(begin) + 1:text.find(end)]",
"def get_substring_using_marker(self, whole_string, first_marker, end_marker=None):\r\n start = whole_string.find(first_marker) + len(first_marker)\r\n end = whole_string.find(end_marker, start)\r\n return whole_string[start:end].strip()",
"def get_first_and_last(input_string):\n return input_string[0: 10], input_string[-10:]",
"def find_text_in_string(string, start_text, end_text):\r\n index_start = string.rfind(start_text)\r\n if not index_start==-1:\r\n index_end = string.rfind(end_text)\r\n str = string[index_start+len(start_text):index_end]\r\n return str",
"def getsubString(w, c):\n count = 0\n for x in w:\n if x == c:\n break\n count=count+1\n return w[:count]",
"def first_inside_quotes(s):\n\n assert type(s) == str, repr(s)+' is not a string.'\n assert introcs.count_str(s,'\"') >= 2, repr(s)+' needs two \"\" characters.'\n\n #find first double quotation\n\n first = introcs.find_str(s,'\"')\n #print(first)\n #find second double quotation\n\n second = introcs.find_str(s,'\"',first+1)\n #print(second)\n #get string between first and second quotation\n\n result = s[first+1:second]\n #print(result)\n #print(type(result))\n #return the result\n return result",
"def find_start_end(text, start_text, end_text, start=0):\n # return (s, e) or None\n s = text.find(start_text, start)\n if s < 0:\n return None\n e = text.find(end_text, s+1)\n if e < 0:\n return None\n e += len(end_text)\n return s, e",
"def get_chrom_start_end_from_string(s):\n try:\n chrom, s_e = s.split('__substr__')\n start, end = s_e.split('_')\n return chrom, int(start), int(end)\n except Exception:\n raise ValueError(\"String %s must be of format '{chrom}__substr__{start}_{end}'\" % s)",
"def between_markers(text: str, begin: str, end: str): # -> str\n begin_pos = text.find(begin)\n end_pos = text.find(end)\n \n if (begin_pos != -1 and end_pos != -1 and begin_pos < end_pos):\n return text[begin_pos + len(begin) : end_pos]\n elif (begin_pos == -1 and end_pos != -1):\n return text[0: end_pos]\n elif (begin_pos != -1 and end_pos == -1):\n return text[begin_pos + len(begin) :]\n elif(begin_pos == -1 and end_pos == -1):\n return text\n elif (begin_pos != -1 and end_pos != -1 and begin_pos > end_pos):\n return ''",
"def substr(string, beg, end):\n require_type(isa(string, str), 'the first parameter of substring must be a string')\n if beg < 0 or end >= len(string) or beg > end:\n raise IndexError('the index of substring is invalid')\n return string[beg:end]",
"def extract_sub(s: str):\n subject = re.search(r'sub-\\d+', s)[0]\n return subject",
"def subsentence(sentence, start, end):\r\n \r\n sentence_list=re.split(' ', sentence)\r\n return sentence_list[start:end]",
"def getSubstring(str):\n\tresultStr=[]\n\ttempStr=\"\"\n\tfor i in range(len(str)-1):\n\t\t#result[a]=a in result and result[a]+1 or 1\n\t\ttempStr=str[i]\n\t\tfor b in str[i+1:]:\n\t\t\tif b in tempStr:\n\t\t\t\tbreak\n\t\t\telse: tempStr+=b\n\n\t\tresultStr.append(tempStr)\n\treturn sorted(resultStr,key=lambda x:len(x),reverse=True)[0]",
"def remove(somestring, sub):\n location = somestring.find(sub)\n length = len(sub)\n part_before = somestring[:length+location]\n part_after = somestring[location+length:]\n return part_before + part_after",
"def getBetween(identifiers, haystackStr):\n startLoc = haystackStr.find(identifiers[0]) + len(identifiers[0])\n endLoc = haystackStr[startLoc:].find(identifiers[1]) + startLoc\n return haystackStr[startLoc:endLoc]",
"def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end",
"def _match_start_get_remaining(self, start, text):\n if not text.startswith(start):\n return\n return text[len(start):]",
"def without_end(s):\n string = s[1:-1]\n return string",
"def pluck(text, start, end, default=None):\n # type (str, str, str, Any) -> Any\n if default is None:\n default = \"\"\n idx = text.find(start) + len(start)\n try:\n return text[idx:text.find(end, idx)]\n except IndexError:\n return default",
"def textbetween(variable,\n firstnum=None,\n secondnum=None,\n locationoftext='regular'):\n if locationoftext == 'regular':\n return variable[firstnum:secondnum]\n elif locationoftext == 'toend':\n return variable[firstnum:]\n elif locationoftext == 'tostart':\n return variable[:secondnum]",
"def read_str(\n s: str,\n i1: int,\n i2: int,\n ) -> str: \n\n s_last = s[i1:i2]\n\n return s_last",
"def _strip_slice_of_string(base_string, start_i, end_i) -> Tuple[int, int]:\n while start_i < len(base_string) and base_string[start_i] == \" \":\n start_i += 1\n while end_i > start_i and base_string[end_i - 1] == \" \":\n end_i -= 1\n return start_i, end_i"
] | [
"0.75524485",
"0.7417657",
"0.7238031",
"0.7159151",
"0.7050701",
"0.6868164",
"0.67760736",
"0.66350746",
"0.6626501",
"0.661806",
"0.6558184",
"0.65289724",
"0.6459214",
"0.64507204",
"0.6400073",
"0.63390535",
"0.6337741",
"0.62925154",
"0.6251687",
"0.62035304",
"0.618484",
"0.6144094",
"0.61418736",
"0.61385316",
"0.6040194",
"0.60202426",
"0.6000919",
"0.5965836",
"0.5882364",
"0.5852945"
] | 0.7477597 | 1 |
Figure out where to redirect after the 'Save' button has been pressed when adding a new object. | def response_post_save_add(self, request, obj):
opts = self.model._meta
if "next" in request.GET:
return HttpResponseRedirect(request.GET['next'])
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.module_name),
args=(quote(self.prescription.pk),),
current_app=self.admin_site.name)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def response_add(self, request, obj, post_url_continue='../%s/'):\n opts = obj._meta\n pk_value = obj._get_pk_val()\n\n msg = '\"%s\" was successfully added to the \"%s\" menu.' % (\n force_unicode(obj),\n obj.menu_item.menu\n )\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(post_url_continue % pk_value)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect('%s?menu=%s' % (\n request.path,\n obj.menu_item.menu.pk,\n ))\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())",
"def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)",
"def response_post_save_change(self, request, obj):\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)",
"def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)",
"def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)",
"def after_successful_edit(self):\n pass",
"def add_object(self, object):\n object.save()",
"def response_post_save_add(self, request, obj):\n\n # a simple hack to set the default prescribing officer\n if obj is not None and obj.prescribing_officer is None:\n obj.prescribing_officer = request.user\n obj.save()\n\n if obj is not None and obj.creator_id == 1:\n obj.creator = request.user\n obj.save()\n\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)",
"def response_add(self, request, obj, post_url_continue=None):\n # We should allow further modification of the user just added i.e. the\n # 'Save' button should behave like the 'Save and continue editing'\n # button except in two scenarios:\n # * The user has pressed the 'Save and add another' button\n # * We are adding a user in a popup\n if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:\n request.POST['_continue'] = 1\n return super(UserAdmin, self).response_add(request, obj,\n post_url_continue)",
"def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)",
"def object_edit_save(request, simulation, object_name):\n # Retrieve the formset from the POST data.\n formset = gen_formset(object_name, simulation, request=request)\n if formset.is_valid():\n # Save the formset (updated values and newly created objects).\n formset.save()\n # Update the foreign keys (we cannot select the newly added forms so we\n # do it for all forms not deleted).\n changed_forms = list(\n set(formset.forms) - set(formset.deleted_forms)\n )\n if object_name in ['centroid', 'crossing', 'link']:\n for form in changed_forms:\n # Link the object to the correct network.\n form.instance.network.add(\n simulation.scenario.supply.network\n )\n elif object_name == 'function':\n for form in changed_forms:\n # Link the function to the correct functionset.\n form.vdf_id = form.instance.id\n form.save()\n form.instance.functionset.add(\n simulation.scenario.supply.functionset\n )\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(reverse(\n 'metro:object_edit', args=(simulation.id, object_name,)\n ))\n else:\n # Redirect to a page with the errors.\n context = {\n 'simulation': simulation,\n 'formset': formset,\n }\n return render(request, 'metro_app/errors_formset.html', context)",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def on_save_button_clicked(self, widget):\n active_page = self.get_active_tab()\n active_page.save()",
"def save (self):\n pass",
"def save(self):\n return None",
"def do_saving(self, request, new_object, form, formsets, add):\n try:\n with transaction.atomic(savepoint=False):\n self.save_model(request, new_object, form, not add)\n self.save_related(request, form, formsets, not add)\n change_message = self.construct_change_message(request, form, formsets, add)\n if add:\n self.log_addition(request, new_object, change_message)\n return self.response_add(request, new_object)\n else:\n self.log_change(request, new_object, change_message)\n return self.response_change(request, new_object)\n except ValidationError as ex:\n for message in ex.messages:\n self.message_user(request, message, messages.ERROR)\n return False",
"def on_post(self):\n return \"Ok, the stuff is being saved\"",
"def save(self, *args, **kwargs):\n return",
"def save(self):\n # TODO (Pierre): code",
"def saveButtonMethod(self):\n return AbstractAction(\"Save\")",
"def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()",
"def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()",
"def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing",
"def save(self):\n\n pass",
"def onSaveNotesButton(self, button):\n pass",
"def add_location():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n \n form = Location_Form()\n\n if form.validate_on_submit():\n try:\n location = Location(\n site_name = form.site_name.data,\n city = form.city.data,\n state = form.state.data\n )\n db.session.add(location)\n db.session.commit()\n except IntegrityError:\n flash(\"This location already exists\", \"danger\")\n return render_template(\"/admin/add_location.html\", form = form)\n \n flash(\"Location Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n return render_template(\"/admin/add_location.html\", form = form)"
] | [
"0.64924026",
"0.6153812",
"0.61309814",
"0.59334826",
"0.591273",
"0.5899871",
"0.56958824",
"0.5689271",
"0.5634287",
"0.55540913",
"0.5533874",
"0.55007654",
"0.55007654",
"0.55007654",
"0.55007654",
"0.55007654",
"0.5483759",
"0.5469536",
"0.5456317",
"0.54537416",
"0.54476535",
"0.5426135",
"0.5411427",
"0.54058385",
"0.5402421",
"0.5402421",
"0.53976613",
"0.53862286",
"0.53834504",
"0.53709304"
] | 0.68166924 | 0 |
Save the model and assign delete permissions to particular objects. Also save user to object if an audit object | def save_model(self, request, obj, form, change):
try:
obj.prescription = self.prescription
except AttributeError:
pass
if not obj.pk:
obj.creator = request.user
obj.modifier = request.user
obj.save()
# If can_delete is set, allow the user to delete this object.
if self.can_delete:
opts = self.opts
group = Group.objects.get(name='Users')
perm = get_permission_codename('delete', opts)
assign_perm("%s.%s" % (opts.app_label, perm), group, obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_model( self, request, obj, form, change ):\n obj.save()",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','updates'):\n abort(403)",
"def save_model(self, request, obj, form, change):\n if not change:\n obj.creator = request.user\n obj.save()",
"def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n obj.save()",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete', 'advisors'):\n abort(403)",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','admins'):\n abort(403)",
"def save_model(self, request, obj, form, change):\r\n if change:\r\n obj.updated_by = request.user\r\n obj.save()",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete', 'advisorapplicants'):\n abort(403)",
"def save_model(self, request, obj, form, change):\n obj.created_by = request.user\n\n super(\n IngredientAdmin,\n self\n ).save_model(\n request,\n obj,\n form,\n change,\n )",
"def save_model(self, request, obj, form, change):\n obj.propietario = request.user\n obj.save()",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create', 'advisors'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit', 'advisors'):\n abort(403)",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','updates'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','updates'):\n abort(403)",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','mn_activity'):\n abort(403)",
"def write_authorize(cls, user, obj):\n if not cls._meta.model.published_where_is_examiner(user).filter(id=obj.id):\n raise PermissionDenied()\n if obj.id == None:\n raise PermissionDenied() # We only allow update",
"def save_model(self, request, instance, form, change):\r\n instance.updated_by = request.user\r\n instance.save()",
"def save_model(self, request, instance, form, change):\r\n instance.updated_by = request.user\r\n instance.save()",
"def save_object(self, object, **kwargs):\n object.save()",
"def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n super(EntryAdmin, self).save_model(request, obj, form, change)",
"def save_model(self, *args, **kwargs):\n raise NotImplementedError",
"def bulkSave(self, objList: List[PermissionContext], tokenData: TokenData):",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)",
"def save(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n if not user:\n raise ValueError(\"User not present in the model\")\n if not hasattr(self, 'owner'):\n self.owner = user\n elif not self.can_be_edited(user):\n raise ValueError(\"User can't edit the model\")\n self.last_updated_by = user\n self.last_updated_datetime = datetime.datetime.now()\n super(BaseModel, self).save(*args, **kwargs)",
"def save_model(self):\n pass",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','roles'):\n abort(403)",
"def save_model(self, request, obj, form, change):\n obj.revise()",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','specialties'):\n abort(403)",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create', 'advisorapplicants'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit', 'advisorapplicants'):\n abort(403)",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','mn_activity'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','mn_activity'):\n abort(403)",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','subspecialties'):\n abort(403)",
"def save_model(self, request, instance, form, change):\n pass"
] | [
"0.64432746",
"0.6320202",
"0.63064605",
"0.6246145",
"0.62002057",
"0.60748917",
"0.60741484",
"0.6067784",
"0.60301447",
"0.5991352",
"0.5975772",
"0.59583265",
"0.5951502",
"0.5950179",
"0.59379965",
"0.59379965",
"0.59276307",
"0.5916935",
"0.5892021",
"0.58640754",
"0.5857622",
"0.58468264",
"0.5840486",
"0.58292997",
"0.5826913",
"0.5815212",
"0.57926416",
"0.5778155",
"0.5770653",
"0.57702863"
] | 0.727388 | 0 |
Fix up the display of the criteria so that it looks a bit nicer. | def criteria_display(self, obj):
return markdownify(obj.criteria) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _print_active_criteria(conv_status, conv_requirements):\n conv_str = f\"\\n\\t {'===> Final Convergence Report <===': ^76}\\n\"\n conv_str += \"\\n\\t\" + \"-\" * 76\n conv_str += f\"\\n\\t|{'Required Criteria': ^24}|{'One or More Criteria': ^24}|{'Alternate Criteria': ^24}|\"\n conv_str += \"\\n\\t\" + \"-\" * 76\n\n print_len = max(\n len(conv_status.get(\"required\")), max(len(conv_status.get(\"one of\")), len(conv_status.get(\"alternate\")))\n )\n\n for i in range(print_len):\n\n conv_str += \"\\n\\t|\"\n\n for key in conv_status:\n\n # conv_requirments[key] is an empty list if no criteria match (not guaranteed for conv_status)\n if conv_requirements.get(key)[0] is None or i >= len(conv_status.get(key)):\n conv_str += f\"{'': ^24}|\"\n else:\n conv_str += f\"{' [x]' if conv_status.get(key)[i] else ' [ ]'}{conv_requirements.get(key)[i]: ^20}|\"\n\n conv_str += \"\\n\\t\" + \"-\" * 76 + \"\\n\\n\"\n return conv_str",
"def __str__(self):\n return f\"criterion {self._name}\"",
"def _display_operators(operators):\n def sort_name(o): return o.name\n def filter_op(o): return any([o.email, o.address, o.website, o.twitter])\n\n return sorted(filter(filter_op, operators), key=sort_name)",
"def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))",
"def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:",
"def customize_search_results(self):\n adjust_date_range(self.driver, self.date_range)\n adjust_salary_range(self.driver, self.salary_range)\n # adjust_search_radius(self.driver, self.search_radius) # deprecated\n # scroll to top of page so the sorting menu is in view\n self.driver.execute_script(\"window.scrollTo(0, 0);\")\n sort_results_by(self.driver, self.sort_by)",
"def __str__(self):\n return \"(%s)\" % ' | '.join(map(str, self.__subrules))",
"def latex_criteria(value):\n value = value.replace(' ', '\\hspace*{0.5cm}').replace('\\n', '\\\\newline')\n return value",
"def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()",
"def criteria_prettified(self, instance):\n\n # Convert the data to sorted, indented JSON\n response = json.dumps(instance.criteria, sort_keys=True, indent=2)\n\n # Truncate the data. Alter as needed\n response = response\n\n # Get the Pygments formatter\n formatter = HtmlFormatter(style='colorful')\n\n # Highlight the data\n response = highlight(response, JsonLexer(), formatter)\n\n # Get the stylesheet\n style = \"<style>\" + formatter.get_style_defs() + \"</style><br>\"\n\n # Safe the output\n return mark_safe(style + response)",
"def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c",
"def sorter(row):\n criteria = []\n for value in row[1]: # Ignore enumeration\n criteria.append(\n (\n value is not None,\n \"\" if isinstance(value, Number) else type(value).__name__,\n value,\n )\n )\n return criteria",
"def __str__(self):\n return \"Query(constraints={},model={})\".format(\n [str(c) for c in self.constraints], self.model\n )",
"def get_conditions(self, ):\n args = []\n conds = []\n # datetime conditions\n solve_lower_upper(args, conds, \"datetime\",\n self.dialog.datetime_range.get_lower_datetime(),\n self.dialog.datetime_range.get_upper_datetime())\n\n # instruments selected\n instrs = self.dialog.instruments.get_checked_rows()\n if is_null_or_empty(instrs):\n return None\n else:\n solve_field_in(args, conds, \"paper_id\",\n map(lambda a: a[0], instrs))\n \n # numerical fields\n for (field_name, control) in [(\"count\", self.dialog.count),\n (\"points\", self.dialog.price),\n (\"commission\", self.dialog.comm),\n (\"volume\", self.dialog.volume)]:\n solve_lower_upper(args, conds, field_name,\n control.get_lower_value(),\n control.get_upper_value())\n\n # position\n inpos = self.dialog.position.get_value()\n if inpos <> None:\n if inpos:\n conds.append(\"position_id is not null\")\n else:\n conds.append(\"position_id is null\")\n\n # direction\n direct = self.dialog.direction.get_value()\n if direct <> None:\n conds.append(\"direction = ?\")\n args.append(direct)\n\n # accounts\n cac = self._parent.model.get_current_account()\n ss = self.dialog.account_current.get_value()\n if ss == \"current\":\n if cac == None:\n return None # we want to show deals from current account but current account is None\n conds.append(\"account_id = ?\")\n args.append(cac[\"id\"])\n elif ss == \"all\":\n pass\n elif ss == \"select\":\n selected = self.dialog.accounts.get_checked_rows()\n if is_null_or_empty(selected):\n return None\n else:\n solve_field_in(args, conds, 'account_id',\n map(lambda a: a[0], selected))\n return (reduce_by_string(\" and \", conds), args)",
"def __str_healthrule_critical_conditions(self,healthrule):\n def str_custom_condition_expression(condition,expression):\n # In custom conditions the expression is given, only need to replace shortNames by metric name\n if 'metricExpression' in condition:\n return expression.replace( condition['shortName'],\n condition['metricExpression']['metricDefinition']['logicalMetricName'].lower() + \" \" + \\\n condition['operator'].lower() + \" \" + \\\n str(condition['value']) )\n else:\n return str_custom_condition_expression(condition['condition1'],\n str_custom_condition_expression(condition['condition2'],expression) )\n def str_condition_expression(condition,operator):\n # In the rest of conditions, no expression is given, need to create it from scratch\n if 'metricExpression' in condition and 'metricDefinition' in condition['metricExpression']:\n metricExp = condition['metricExpression']['metricDefinition']['logicalMetricName'].lower() + \" \" + \\\n condition['operator'].lower() + \" \" + str(condition['value'])\n return metricExp\n elif 'metricExpression' in condition and condition['conditionExpression'] is not None:\n return condition['conditionExpression']\n else:\n return str_condition_expression(condition['condition1'],operator) + \" \" + operator + \" \" + \\\n str_condition_expression(condition['condition2'],operator)\n\n if 'evalCriterias' not in healthrule:\n if 'DEBUG' in locals(): sys.stderr.write(\"Unrecognized evaluation criteria for healthrule \"+healthrule['name'])\n elif healthrule['evalCriterias']['criticalCriteria'] is not None: ## Legacy XML format\n if healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['evalDetailType'] == \"METRIC_EXPRESSION\":\n return healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['metricExpression']\n elif healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['evalDetailType'] == \"SINGLE_METRIC\":\n evalDetail = healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']\n if evalDetail['metricEvalDetail']['metricEvalDetailType']==\"BASELINE_TYPE\":\n return evalDetail['metricPath']+\" is \"+ \\\n evalDetail['metricEvalDetail']['baselineCondition']+\" \"+ \\\n evalDetail['metricEvalDetail']['baselineName']+\" by \"+ \\\n str(evalDetail['metricEvalDetail']['compareValue'])+\" \"+ \\\n evalDetail['metricEvalDetail']['baselineUnit']\n elif evalDetail['metricEvalDetail']['metricEvalDetailType']==\"SPECIFIC_TYPE\":\n return evalDetail['metricPath']+\" is \"+ \\\n evalDetail['metricEvalDetail']['baselineCondition']+\" \"+ \\\n str(evalDetail['metricEvalDetail']['compareValue'])\n return \"\"",
"def _transform_criteria(criteria, params_dict):\n\n conv_met = {key: fabs(val) < params_dict.get(f\"conv_{key}\") for key, val in criteria.items()}\n conv_active = {key: params_dict.get(f\"i_{key}\") for key in criteria}\n\n return conv_met, conv_active",
"def __str_healthrule_critical_conditions(self,healthrule):\n if 'critical' in healthrule and healthrule['critical'] is not None:\n condition = healthrule['critical']['condition']\n if healthrule['critical']['conditionAggregationType'] == \"CUSTOM\":\n conditionExpression = self.__format_condition_expression(healthrule['critical']['conditionExpression'])\n return self.__str_condition_expression(condition=condition,expression=conditionExpression)\n else: # conditionAggregationType is \"ANY\", \"ALL\" or null\n operator = \"OR\" if healthrule['critical']['conditionAggregationType'] == \"ANY\" else \"AND\"\n return self.__str_condition_expression(condition=condition,aggregationType=operator)\n elif 'critical' in healthrule: # and healthrule['critical'] is None:\n return \"\"\n elif 'evalCriterias' in healthrule:\n sys.stderr.write(\"Format not supported.\")\n return \"\"\n else:\n sys.stderr.write(\"Unrecognized evaluation criteria for healthrule \"+healthrule['name'])\n return \"\"",
"def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values",
"def load_criterias():\r\n l = [ (p.id, p.name) for p in StockProperty.objects.all() ]\r\n l.insert(0, ('', 'Select to add criteria ...'))\r\n return l",
"def __str__(self):\n return \"{} != {} ({})\".format(self.var1.name,\n self.var2.name,\n self.satisfied())",
"def absolute_criteria(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"absolute_criteria\")",
"def filter_criteria(self):\n return self.filter_nodes('//Validation/Criteria')",
"def get_criteria(self):\n\n\t\treturn self.__criteria",
"def __str__(self) -> str:\n header = [(\"Computation\", \"Time\"), (\"Error Term\", \"Draws\")]\n values = [format_seconds(self.computation_time), self.draws]\n if self.fp_iterations.sum() > 0 or self.contraction_evaluations.sum() > 0:\n header.extend([(\"Fixed Point\", \"Iterations\"), (\"Contraction\", \"Evaluations\")])\n values.extend([self.fp_iterations.sum(), self.contraction_evaluations.sum()])\n return format_table(header, values, title=\"Optimal Instrument Results Summary\")",
"def __str_healthrule_warning_conditions(self,healthrule):\n if 'warning' in healthrule and healthrule['warning'] is not None:\n condition = healthrule['warning']['condition']\n if healthrule['warning']['conditionAggregationType'] == \"CUSTOM\":\n conditionExpression = self.__format_condition_expression(healthrule['warning']['conditionExpression'])\n return self.__str_condition_expression(condition=condition,expression=conditionExpression)\n else: # conditionAggregationType is \"ANY\", \"ALL\" or null\n operator = \"OR\" if healthrule['warning']['conditionAggregationType'] == \"ANY\" else \"AND\"\n return self.__str_condition_expression(condition=condition,aggregationType=operator)\n elif 'warning' in healthrule: # and healthrule['warning'] is None:\n return \"\"\n elif 'evalCriterias' in healthrule:\n sys.stderr.write(\"Format not supported.\")\n return \"\"\n else:\n sys.stderr.write(\"Unrecognized evaluation criteria for healthrule \"+healthrule['name'])\n return \"\"",
"def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))",
"def __str__(self):\n\n return 'IF {0} THEN {1}'.format(', '.join([str(fv) for fv in self.fvals]),\n str(self.label))",
"def logic_program_form(self):\n s = ''\n\n if self.return_sort == \"booleans\" :\n s = s + self.whenReturnSortBooleans()\n else :\n s = s + self.whenReturnSortNotBooleans()\n\n for attr_name in self.attr_names:\n s = s + '% CWA for dom_' + attr_name + '\\n\\n'\n s = s + '-dom_' + attr_name + '('\n s = s + concatenateParams(self.param_sorts, ') :-\\n\\t') \n s = s + 'not dom_' + attr_name + '('\n s = s + concatenateParams(self.param_sorts, '),\\n\\t') \n s = s + constructInstance(self.param_sorts, '.\\n\\n')\n\n return s",
"def __str__(self):\n return '%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s ' % (self.Month,\n self.Production,self.Hygienist_Production,\n self.Collections_Percentage,self.Overthe_Counter,\n self.AR_31_60,self.AR_61_90,self.AR_Over_90,\n self.AR_Ins_31_60,self.AR_Ins_61_90,self.AR_Ins_Over_90,\n self.New_Patients,self.Total_Patients_Seen,\n self.Broken_Apointments,self.Broken_Appt_Pct,\n self.Hygiene_Pct)",
"def get_where_clause(self, params: Dict) -> str:\n return ''"
] | [
"0.6004934",
"0.5642903",
"0.5641137",
"0.56178117",
"0.5541909",
"0.5533003",
"0.55291677",
"0.5502816",
"0.5482058",
"0.54708695",
"0.5310582",
"0.5297918",
"0.52971756",
"0.52587277",
"0.5255082",
"0.5253097",
"0.5229677",
"0.52207726",
"0.5216699",
"0.5201825",
"0.51989347",
"0.5178008",
"0.51566607",
"0.5148845",
"0.5135451",
"0.5126749",
"0.5120552",
"0.5116641",
"0.5106011",
"0.5105378"
] | 0.730556 | 0 |
At the begining the function reads the layers thickness of a composite material and converts its properties to the property of the isotropic one. Then the properties of the isotropic material are assigned to the tables if necessary. Additionally the function tests the input parameters of the isotropic material on consistency. At the end the function performs all computations using table data and stores the result of them into the Graph instance containers | def updateData( Tables, Graph, LayersInfo, WarningMessage ):
# clean the warning message
LayersInfo.clean()
WarningMessage.clean()
LayerThicknessBuffer = Tables[ "GeometryProperties" ].getValue( 0, 2 )
try:
Layers = getLayersFromString( Tables[ "GeometryProperties" ].getValue( 0, 2 ) )
LayersInfo.printMessage( str( len( Layers ) ) )
# Homogenize the input data
if len(Layers) != 1:
makeMultiLayerMask( Tables )
HomogenizedData = homogenize( Tables[ "ElasticModulus" ].getData( )[ 0 ],
Tables[ "ShearModulus" ].getData( )[ 0 ],
Tables[ "PoissonRatios" ].getData( ),
Layers )
#cangeMode( Tables, WarningMessage, Graph.getMode( ) )
Tables[ "ElasticModulus" ].assignValuesSet( HomogenizedData[ "ElasticModulus" ] )
Tables[ "ShearModulus" ].assignValuesSet( HomogenizedData[ "ShearModulus" ] )
Tables[ "PoissonRatios" ].assignValuesSet( HomogenizedData[ "PoissonRatios" ] )
Tables[ "GeometryProperties" ].assignValue( 0, 2, HomogenizedData[ "TotalThickness" ] )
# Part of error handling.Function "isInputNegative" throws an error
# if there is an element with its negetive value.
isInputNegative( Tables [ "ElasticModulus" ].getData( ) )
isInputNegative( Tables [ "ShearModulus" ].getData( ) )
isInputNegative( Tables [ "PoissonRatios" ].getData( ) )
isInputNegative( Tables [ "MaterialProperties" ].getData( ) )
isInputNegative( Tables [ "GeometryProperties" ].getData( ) )
# update the tables buffers
makeMask( Tables, Graph.getMode() )
# before calling user-define functions check the current mode
cangeMode( Tables, WarningMessage, Graph.getMode() )
precomputePoissonRatios( Tables )
# get data from the corresponding tables
ElasticModulusData = Tables [ "ElasticModulus" ].getData( )
ShearModulusData = Tables [ "ShearModulus" ].getData( )
PoissonRatiosData = Tables [ "PoissonRatios" ].getData( )
MaterialPropertiesData = Tables [ "MaterialProperties" ].getData( )
GeometryPropertiesData = Tables [ "GeometryProperties" ].getData( )
#################### CALL USER-SPECIFIC FUNCTION ##########################
testInputData( Graph.getMode(), PoissonRatiosData )
Graph.Containers[ "WaveVelocity" ] = wave_speeds(
ElasticModulusData,
ShearModulusData,
PoissonRatiosData,
MaterialPropertiesData,
GeometryPropertiesData,
bool( Graph.getMode() ),
Graph.getRange() )
Graph.Containers[ "ModesInBand" ] = ModesInBand(
ElasticModulusData,
ShearModulusData,
PoissonRatiosData,
MaterialPropertiesData,
GeometryPropertiesData,
bool( Graph.getMode( ) ),
Graph.getRange( ) )
Graph.Containers[ "ModalDensity" ] = ModaleDichte(
Graph.Containers[ "WaveVelocity" ][ "c_L" ],
Graph.Containers[ "WaveVelocity" ][ "c_S" ],
Graph.Containers[ "WaveVelocity" ][ "c_B_eff" ],
Graph.Containers[ "WaveVelocity" ][ "c_g_eff" ],
GeometryPropertiesData,
bool( Graph.getMode( ) ),
Graph.getRange( ) )
Graph.Containers[ "ModalOverlapFactor" ] = ModalOverlapFactor(
MaterialPropertiesData,
Graph.Containers[ "ModalDensity" ],
Graph.getRange( ) )
Graph.Containers[ "MaxElementSize" ] = MaximumElementSize(
Graph.Containers[ "WaveVelocity" ][ "c_B" ],
Graph.Containers[ "WaveVelocity" ][ "c_B_eff" ],
Graph.getRange( ) )
Graph.Containers[ "EigenFrequency" ] = EigenfrequenciesPlate(
ElasticModulusData,
ShearModulusData,
PoissonRatiosData,
MaterialPropertiesData,
GeometryPropertiesData,
bool( Graph.getMode() ),
Graph.getRange() )
# Update the current graph with new data
updateGraph( Graph, Graph.getCurrentGraphNumber( ) )
WarningMessage.clean()
except VibroP_DataCorrupted as Error:
WarningMessage.printMessage( str(Error) )
Tables[ "GeometryProperties" ].setValue( 0, 2, LayerThicknessBuffer, "" )
except VibroP_WrongLayersThikness as Error:
WarningMessage.printMessage( str(Error) )
except VibroP_TableCorrupted as Error:
WarningMessage.printMessage( str(Error) )
#'''
except:
Message = "Error: Unexpected error. Please, refer to the code"
WarningMessage.printMessage( Message )
#''' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_material_info(TABLE_info):\n \"\"\"\n 1 Get info from TABLE_info.\n \"\"\"\n width = TABLE_info[0]\n height = TABLE_info[1]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get material info.\n \"\"\"\n z_m = 3 * t_m\n\n m_width = rs.GetInteger(\"Put the width of material\", z_m, None, None)\n\n t_sen = rs.GetReal(\"Put Int(mm): Thickness of material to cut SEN.\", t_m / 2, None, None)\n\n x_m1 = m_width\n x_m2 = height - x_m1\n x_m3 = x_m2\n x_m4 = x_m1\n\n y_m2 = m_width\n y_m3 = y_m2\n y_m1 = width - (y_m2 + y_m3)\n y_m4 = y_m1\n\n\n # material1\n m1_p0 = (x_m3, y_m3)\n m1_p1 = (x_m3, y_m3 + y_m1)\n m1_p2 = (x_m3 + x_m1, y_m3 + y_m1)\n m1_p3 = (x_m3 + x_m1, y_m3)\n m1_points = [m1_p0, m1_p1, m1_p2, m1_p3]\n\n m1_info = [x_m1, y_m1, z_m, m1_points, t_sen]\n\n # material2\n m2_p0 = (0, width - y_m2)\n m2_p1 = (0, width)\n m2_p2 = (height - x_m1, width)\n m2_p3 = (height - x_m1, width - y_m2)\n m2_points = [m2_p0, m2_p1, m2_p2, m2_p3]\n\n m2_info = [x_m2, y_m2, z_m, m2_points, t_sen]\n\n # material3\n m3_p0 = (0, 0)\n m3_p1 = (0, y_m3)\n m3_p2 = (x_m3, y_m3)\n m3_p3 = (x_m3, 0)\n m3_points = [m3_p0, m3_p1, m3_p2, m3_p3]\n\n m3_info = [x_m3, y_m3, z_m, m3_points, t_sen]\n\n # material4\n m4_p0 = (0, y_m3)\n m4_p1 = (0, y_m3 + y_m4)\n m4_p2 = (-x_m4, y_m3 + y_m4)\n m4_p3 = (-x_m4, y_m3)\n m4_points = [m4_p0, m4_p1, m4_p2, m4_p3]\n\n m4_info = [x_m4, y_m4, z_m, m4_points, t_sen]\n\n return m1_info, m2_info, m3_info, m4_info",
"def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)",
"def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )",
"def make_male_3D_model\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs):\n \"\"\"\n 1 Get t_m from TABLE_info\n \"\"\"\n width = TABLE_info[0]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get crvs from list.\n \"\"\"\n # m1\n m1_male_upper_crv = m1_male_crvs[0]\n m1_male_middle_crv = m1_male_crvs[1]\n m1_male_lower_crv = m1_male_crvs[2]\n\n # m2\n m2_male_left_upper_crv = m2_male_left_crvs[0]\n m2_male_left_middle_crv = m2_male_left_crvs[1]\n m2_male_left_lower_crv = m2_male_left_crvs[2]\n\n m2_male_right_upper_crv = m2_male_right_crvs[0]\n m2_male_right_middle_crv = m2_male_right_crvs[1]\n m2_male_right_lower_crv = m2_male_right_crvs[2]\n\n # m3\n m3_male_left_upper_crv = m3_male_left_crvs[0]\n m3_male_left_middle_crv = m3_male_left_crvs[1]\n m3_male_left_lower_crv = m3_male_left_crvs[2]\n\n m3_male_right_upper_crv = m3_male_right_crvs[0]\n m3_male_right_middle_crv = m3_male_right_crvs[1]\n m3_male_right_lower_crv = m3_male_right_crvs[2]\n\n # m4\n m4_male_upper_crv = m4_male_crvs[0]\n m4_male_middle_crv = m4_male_crvs[1]\n m4_male_lower_crv = m4_male_crvs[2]\n\n \"\"\"\n 3 Make 3D.\n \"\"\"\n # path\n start = (0, 0, 0)\n end = (0, 0, t_m)\n path = rs.AddLine(start, end)\n\n # m1\n m1_male_upper_model = rs.ExtrudeCurve(m1_male_upper_crv, path)\n m1_male_middle_model = rs.ExtrudeCurve(m1_male_middle_crv, path)\n m1_male_lower_model = rs.ExtrudeCurve(m1_male_lower_crv, path)\n\n rs.CapPlanarHoles(m1_male_upper_model)\n rs.CapPlanarHoles(m1_male_middle_model)\n rs.CapPlanarHoles(m1_male_lower_model)\n\n # m2 left\n m2_male_left_upper_model = rs.ExtrudeCurve(m2_male_left_upper_crv, path)\n m2_male_left_middle_model = rs.ExtrudeCurve(m2_male_left_middle_crv, path)\n m2_male_left_lower_model = rs.ExtrudeCurve(m2_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_left_upper_model)\n rs.CapPlanarHoles(m2_male_left_middle_model)\n rs.CapPlanarHoles(m2_male_left_lower_model)\n\n # m2 right\n m2_male_right_upper_model = rs.ExtrudeCurve(m2_male_right_upper_crv, path)\n m2_male_right_middle_model = rs.ExtrudeCurve(m2_male_right_middle_crv, path)\n m2_male_right_lower_model = rs.ExtrudeCurve(m2_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_right_upper_model)\n rs.CapPlanarHoles(m2_male_right_middle_model)\n rs.CapPlanarHoles(m2_male_right_lower_model)\n\n # m3 left\n m3_male_left_upper_model = rs.ExtrudeCurve(m3_male_left_upper_crv, path)\n m3_male_left_middle_model = rs.ExtrudeCurve(m3_male_left_middle_crv, path)\n m3_male_left_lower_model = rs.ExtrudeCurve(m3_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_left_upper_model)\n rs.CapPlanarHoles(m3_male_left_middle_model)\n rs.CapPlanarHoles(m3_male_left_lower_model)\n\n # m3 right\n m3_male_right_upper_model = rs.ExtrudeCurve(m3_male_right_upper_crv, path)\n m3_male_right_middle_model = rs.ExtrudeCurve(m3_male_right_middle_crv, path)\n m3_male_right_lower_model = rs.ExtrudeCurve(m3_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_right_upper_model)\n rs.CapPlanarHoles(m3_male_right_middle_model)\n rs.CapPlanarHoles(m3_male_right_lower_model)\n\n # m4\n m4_male_upper_model = rs.ExtrudeCurve(m4_male_upper_crv, path)\n m4_male_middle_model = rs.ExtrudeCurve(m4_male_middle_crv, path)\n m4_male_lower_model = rs.ExtrudeCurve(m4_male_lower_crv, path)\n\n rs.CapPlanarHoles(m4_male_upper_model)\n rs.CapPlanarHoles(m4_male_middle_model)\n rs.CapPlanarHoles(m4_male_lower_model)\n\n male_upper_models =\\\n [m1_male_upper_model, m2_male_left_upper_model, m2_male_right_upper_model,\\\n m3_male_left_upper_model, m3_male_right_upper_model, m4_male_upper_model]\n\n male_middle_models =\\\n [m1_male_middle_model, m2_male_left_middle_model, m2_male_right_middle_model,\\\n m3_male_left_middle_model, m3_male_right_middle_model, m4_male_middle_model]\n\n male_lower_models =\\\n [m1_male_lower_model, m2_male_left_lower_model, m2_male_right_lower_model,\\\n m3_male_left_lower_model, m3_male_right_lower_model, m4_male_lower_model]\n\n # move objects\n trans_upper = (0, 0, 2 * t_m)\n trans_middle = (0, 0, t_m)\n rs.MoveObjects(male_upper_models, trans_upper)\n rs.MoveObjects(male_middle_models, trans_middle)\n\n\n # deploy models\n O = (0, 0, 0)\n angle = 90\n rs.RotateObjects(male_upper_models, O, angle, None, False)\n rs.RotateObjects(male_middle_models, O, angle, None, False)\n rs.RotateObjects(male_lower_models, O, angle, None, False)\n\n axis = (1, 0, 0)\n rs.RotateObjects(male_upper_models, O, angle, axis, False)\n rs.RotateObjects(male_middle_models, O, angle, axis, False)\n rs.RotateObjects(male_lower_models, O, angle, axis, False)\n\n trans = (-1.5 * width, 0, 0)\n rs.MoveObjects(male_upper_models, trans)\n rs.MoveObjects(male_middle_models, trans)\n rs.MoveObjects(male_lower_models, trans)\n\n rs.DeleteObject(path)\n\n male_models = [male_upper_models, male_middle_models, male_lower_models]",
"def make_female_3D_model\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs):\n \"\"\"\n 1 Get t_m from TABLE_info\n \"\"\"\n width = TABLE_info[0]\n height = TABLE_info[1]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get crvs from list.\n \"\"\"\n # m1\n m1_female_upper_crv = m1_female_crvs[0]\n m1_female_middle_crv = m1_female_crvs[1]\n m1_female_lower_crv = m1_female_crvs[2]\n\n # m2\n m2_female_left_upper_crv = m2_female_left_crvs[0]\n m2_female_left_middle_crv = m2_female_left_crvs[1]\n m2_female_left_lower_crv = m2_female_left_crvs[2]\n\n m2_female_right_upper_crv = m2_female_right_crvs[0]\n m2_female_right_middle_crv = m2_female_right_crvs[1]\n m2_female_right_lower_crv = m2_female_right_crvs[2]\n\n # m3\n m3_female_left_upper_crv = m3_female_left_crvs[0]\n m3_female_left_middle_crv = m3_female_left_crvs[1]\n m3_female_left_lower_crv = m3_female_left_crvs[2]\n\n m3_female_right_upper_crv = m3_female_right_crvs[0]\n m3_female_right_middle_crv = m3_female_right_crvs[1]\n m3_female_right_lower_crv = m3_female_right_crvs[2]\n\n # m4\n m4_female_upper_crv = m4_female_crvs[0]\n m4_female_middle_crv = m4_female_crvs[1]\n m4_female_lower_crv = m4_female_crvs[2]\n\n \"\"\"\n 3 Make 3D.\n \"\"\"\n # path\n start = (0, 0, 0)\n end = (0, 0, t_m)\n path = rs.AddLine(start, end)\n\n # m1\n m1_female_upper_model = rs.ExtrudeCurve(m1_female_upper_crv, path)\n m1_female_middle_model = rs.ExtrudeCurve(m1_female_middle_crv, path)\n m1_female_lower_model = rs.ExtrudeCurve(m1_female_lower_crv, path)\n\n rs.CapPlanarHoles(m1_female_upper_model)\n rs.CapPlanarHoles(m1_female_middle_model)\n rs.CapPlanarHoles(m1_female_lower_model)\n\n # m2 left\n m2_female_left_upper_model = rs.ExtrudeCurve(m2_female_left_upper_crv, path)\n m2_female_left_middle_model = rs.ExtrudeCurve(m2_female_left_middle_crv, path)\n m2_female_left_lower_model = rs.ExtrudeCurve(m2_female_left_lower_crv, path)\n\n rs.CapPlanarHoles(m2_female_left_upper_model)\n rs.CapPlanarHoles(m2_female_left_middle_model)\n rs.CapPlanarHoles(m2_female_left_lower_model)\n\n # m2 right\n m2_female_right_upper_model = rs.ExtrudeCurve(m2_female_right_upper_crv, path)\n m2_female_right_middle_model = rs.ExtrudeCurve(m2_female_right_middle_crv, path)\n m2_female_right_lower_model = rs.ExtrudeCurve(m2_female_right_lower_crv, path)\n\n rs.CapPlanarHoles(m2_female_right_upper_model)\n rs.CapPlanarHoles(m2_female_right_middle_model)\n rs.CapPlanarHoles(m2_female_right_lower_model)\n\n # m3 left\n m3_female_left_upper_model = rs.ExtrudeCurve(m3_female_left_upper_crv, path)\n m3_female_left_middle_model = rs.ExtrudeCurve(m3_female_left_middle_crv, path)\n m3_female_left_lower_model = rs.ExtrudeCurve(m3_female_left_lower_crv, path)\n\n rs.CapPlanarHoles(m3_female_left_upper_model)\n rs.CapPlanarHoles(m3_female_left_middle_model)\n rs.CapPlanarHoles(m3_female_left_lower_model)\n\n # m3 right\n m3_female_right_upper_model = rs.ExtrudeCurve(m3_female_right_upper_crv, path)\n m3_female_right_middle_model = rs.ExtrudeCurve(m3_female_right_middle_crv, path)\n m3_female_right_lower_model = rs.ExtrudeCurve(m3_female_right_lower_crv, path)\n\n rs.CapPlanarHoles(m3_female_right_upper_model)\n rs.CapPlanarHoles(m3_female_right_middle_model)\n rs.CapPlanarHoles(m3_female_right_lower_model)\n\n # m4\n m4_female_upper_model = rs.ExtrudeCurve(m4_female_upper_crv, path)\n m4_female_middle_model = rs.ExtrudeCurve(m4_female_middle_crv, path)\n m4_female_lower_model = rs.ExtrudeCurve(m4_female_lower_crv, path)\n\n rs.CapPlanarHoles(m4_female_upper_model)\n rs.CapPlanarHoles(m4_female_middle_model)\n rs.CapPlanarHoles(m4_female_lower_model)\n\n female_upper_models =\\\n [m1_female_upper_model, m2_female_left_upper_model, m2_female_right_upper_model,\\\n m3_female_left_upper_model, m3_female_right_upper_model, m4_female_upper_model]\n\n female_middle_models =\\\n [m1_female_middle_model, m2_female_left_middle_model, m2_female_right_middle_model,\\\n m3_female_left_middle_model, m3_female_right_middle_model, m4_female_middle_model]\n\n female_lower_models =\\\n [m1_female_lower_model, m2_female_left_lower_model, m2_female_right_lower_model,\\\n m3_female_left_lower_model, m3_female_right_lower_model, m4_female_lower_model]\n\n # move objects\n trans_upper = (0, 0, 2 * t_m)\n trans_middle = (0, 0, t_m)\n rs.MoveObjects(female_upper_models, trans_upper)\n rs.MoveObjects(female_middle_models, trans_middle)\n\n\n # deploy models\n O = (0, 0, 0)\n angle = 90\n rs.RotateObjects(female_upper_models, O, angle, None, False)\n rs.RotateObjects(female_middle_models, O, angle, None, False)\n rs.RotateObjects(female_lower_models, O, angle, None, False)\n\n axis = (1, 0, 0)\n rs.RotateObjects(female_upper_models, O, angle, axis, False)\n rs.RotateObjects(female_middle_models, O, angle, axis, False)\n rs.RotateObjects(female_lower_models, O, angle, axis, False)\n\n rs.RotateObjects(female_upper_models, O, angle, None, False)\n rs.RotateObjects(female_middle_models, O, angle, None, False)\n rs.RotateObjects(female_lower_models, O, angle, None, False)\n\n trans = (-2 * width - 3 * t_m / 2, width / 2 - 3 * t_m / 2, 0)\n rs.MoveObjects(female_upper_models, trans)\n rs.MoveObjects(female_middle_models, trans)\n rs.MoveObjects(female_lower_models, trans)\n\n rs.DeleteObject(path)\n\n female_models = [female_upper_models, female_middle_models, female_lower_models]",
"def RUN():\n TABLE_info = get_TABLE_info()\n m1_info, m2_info, m3_info, m4_info = get_material_info(TABLE_info)\n\n TSUGITE_name, SHIGUCHI_name, offset = ask_KUMIKI()\n\n # TSUGITE_list = [m2_left_list, m2_right_list, m3_left_list, m3_right_list]\n TSUGITE_list, m2_SEN_info, m3_SEN_info = make_TSUGITE_list(TSUGITE_name, m2_info, m3_info, m4_info, offset)\n\n # SHIGUCHI_list = [m2_KUMIKI_points1, m2_KUMIKI_points2, m3_KUMIKI_points1, m3_KUMIKI_points2]\n SHIGUCHI_list = make_SHIGUCHI_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n # m1\n m1_male_points_list, m1_male_SEN_info = make_male_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n m1_female_points_list, m1_female_SEN_info = make_female_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n m1_male_crvs = make_m1_male_crv(m1_male_points_list)\n m1_female_crvs = make_m1_female_crv(m1_female_points_list)\n\n # m2, m3 crvs\n m2_male_left_crvs, m2_male_right_crvs = make_m2_crv(TSUGITE_list, SHIGUCHI_list)\n m3_male_left_crvs, m3_male_right_crvs = make_m3_crv(TSUGITE_list, SHIGUCHI_list)\n\n m2_female_left_crvs = rs.CopyObjects(m2_male_left_crvs)\n m2_female_right_crvs = rs.CopyObjects(m2_male_right_crvs)\n m3_female_left_crvs = rs.CopyObjects(m3_male_left_crvs)\n m3_female_right_crvs = rs.CopyObjects(m3_male_right_crvs)\n\n # m4\n m4_male_points_list, m4_male_SEN_info = make_male_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n m4_female_points_list, m4_female_SEN_info = make_female_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset)\n\n m4_male_crvs = make_m4_male_crv(m4_male_points_list)\n m4_female_crvs = make_m4_female_crv(m4_female_points_list)\n\n make_SEN_crvs(m1_male_SEN_info, m1_female_SEN_info, m2_SEN_info, m3_SEN_info, m4_male_SEN_info, m4_female_SEN_info, offset)\n\n # Make 3D\n male_models = make_male_3D_model\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs)\n\n female_models = make_female_3D_model\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs)\n\n # Deploy crvs (processing data)\n deploy_male_crvs\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs)\n\n deploy_female_crvs\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs)\n\n make_board(TABLE_info)\n\n rs.ZoomExtents()\n pass",
"def AddDispersionMaterial(GeometryName,RGBData):\n\n r,g,b=RGBData\n onlyR = tuple([r,0,0,1])\n onlyG = tuple([0,g,0,1])\n onlyB = tuple([0,0,b,1])\n\n\n currentMaterial = bpy.data.materials.new(name='TypeA'+GeometryName)\n currentMaterial.use_nodes = True\n nodes = currentMaterial.node_tree.nodes\n\n math01 = nodes.new(\"ShaderNodeMath\")\n math01.operation = \"POWER\"\n\n glassBSDF01 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF01.inputs[0].default_value = onlyR\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF01.inputs[1])\n\n glassBSDF02 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF02.inputs[0].default_value = onlyG\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF02.inputs[1])\n\n glassBSDF03 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF03.inputs[0].default_value = onlyB\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF03.inputs[1])\n\n math02 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],glassBSDF02.inputs[2])\n\n math03 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],math03.inputs[1])\n currentMaterial.node_tree.links.new(math03.outputs[0],glassBSDF01.inputs[2])\n\n addShader01 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(glassBSDF01.outputs[0],addShader01.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF02.outputs[0],addShader01.inputs[1])\n\n addShader02 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(addShader01.outputs[0],addShader02.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF03.outputs[0],addShader02.inputs[1])\n\n volumeAbs = nodes.new(\"ShaderNodeVolumeAbsorption\")\n\n materialOutput=nodes.get(\"Material Output\")\n currentMaterial.node_tree.links.new(addShader02.outputs[0],materialOutput.inputs[0])\n currentMaterial.node_tree.links.new(volumeAbs.outputs[0],materialOutput.inputs[1])\n\n bpy.data.objects[GeometryName].data.materials.append(currentMaterial)",
"def add_surface_ontology(metadata):\n metadata, tbl = metadata.copy(), {}\n for val in metadata['surface_material'].unique():\n if has_keyword(val, 'glass', 'metal', 'steel', 'copper'):\n tbl[val] = ('metal', 'impermeable')\n elif has_keyword(val, 'stone', 'marble', 'ceramic', 'concrete', 'cement', 'granite'):\n tbl[val] = ('stone', 'impermeable')\n elif has_keyword(val, 'plastic', 'rubber', 'vinyl', 'pvc', 'formica'):\n tbl[val] = ('plastic', 'impermeable')\n elif has_keyword(val, 'fabric', 'cloth', 'carpet'):\n tbl[val] = ('fabric', 'permeable')\n elif has_keyword(val, 'hand', 'flesh', 'wood', 'leather', 'fiber'):\n tbl[val] = ('biological', 'permeable')\n elif has_keyword(val, 'control'):\n tbl[val] = ('control', 'control')\n else:\n tbl[val] = (NAN, NAN)\n metadata['surface_ontology_fine'] = metadata['surface_material'].apply(lambda x: tbl[x][0])\n metadata['surface_ontology_coarse'] = metadata['surface_material'].apply(lambda x: tbl[x][1])\n return metadata",
"def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)",
"def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2",
"def BuildArchetypeDict(BuildingData):\n\n #Manually Set Lighting Control\n lighting_control_d = { \n \"MULTI_RES\": 250.,\n \"OFFICE\": 350.,\n \"SCHOOL\": 350.,\n }\n\n #Set Mean Occupancy as calculated from occupancy profiles. Maybe redundant\n mean_occupancy_d = { \n \"MULTI_RES\": 0.014355,\n \"OFFICE\": 0.009951,\n \"SCHOOL\": 0.010913,\n }\n\n #Recreated the above dictionary into a dataframe manually because im an idiot and short of time\n mean_occupancy_df = pd.DataFrame({\"Code\": [\"MULTI_RES\",\"OFFICE\",\"SCHOOL\"], \"people_sqm\": [0.014355,0.009951,0.010913]})\n\n volume = (BuildingData['room_width'] / 1000.) * (BuildingData['room_depth'] / 1000.) * (\n BuildingData['room_height'] / 1000.)\n area = (BuildingData['room_width'] / 1000.) * (BuildingData['room_depth'] / 1000.)\n\n # read thermal properties for RC model\n arch = pd.read_excel(paths['Archetypes_properties'], sheetname='THERMAL')\n r = re.compile(\"([a-zA-Z_]+)\") # generate regular expression of letters to strip numbers\n\n # Strip numbers off the building archetypes for matching later on\n arch[\"code1\"] = pd.DataFrame([r.match(string).groups() for string in arch.Code])\n arch.set_index(['code1'], inplace=True)\n print arch\n\n # Delete uneeded archetypes and\n arch.drop(['SERVERROOM', 'PARKING', 'SWIMMING', 'COOLROOM', \"SINGLE_RES\", \"HOTEL\", \"RETAIL\", \"FOODSTORE\", \"RESTAURANT\", \"INDUSTRIAL\", \"HOSPITAL\", \"GYM\"], inplace=True)\n arch.reset_index(drop=False, inplace=True)\n arch.drop('Es', axis=1, inplace=True) # Ratio of floor area that has electricity not needed\n arch.drop('Hs', axis=1, inplace=True) # ratio of gross floor area heated or cooled not needed\n arch.drop('U_roof', axis=1, inplace=True) # roof u value not needed, assume only facade loss\n arch.drop('U_base', axis=1, inplace=True) # heat transfer through basement not needed\n\n # read internal loads for RC model from CEA excel sheet and keep necessary loads\n int_loads = pd.read_excel(paths['Archetypes_properties'], sheetname='INTERNAL_LOADS')\n int_loads = int_loads[['Code', 'Qs_Wp', 'Ea_Wm2', 'El_Wm2']]\n\n # read thermal set points and ventilation rates\n thermal_setpoint_ventelation = pd.read_excel(paths['Archetypes_properties'], sheetname='INDOOR_COMFORT')\n\n thermal_setpoint_ventelation = thermal_setpoint_ventelation.merge(mean_occupancy_df)\n\n #Set a ventilation rate in air changes per hour. However this doesn't work with average occupancy\n #TODO: Set a dynamic ventilation strategy in the ASF Simulation Model\n #thermal_setpoint_ventelation['ACH_vent']=thermal_setpoint_ventelation['Ve_lps']*thermal_setpoint_ventelation['people_sqm'] * area * 3.6/volume\n\n # Combine everything into a single dataframe\n b_props = arch.merge(int_loads, how='left', left_on='code1', right_on='Code')\n b_props = b_props.merge(thermal_setpoint_ventelation, how='left', left_on='code1', right_on='Code')\n\n\n b_props = b_props.drop(['Code_y', 'Code'], axis=1)\n\n # Create set back temperature definition to match with the ASF_Simulation\n b_props['setBackTempC'] = b_props['Tcs_setb_C'] - b_props['Tcs_set_C']\n b_props['setBackTempH'] = b_props['Ths_set_C'] - b_props['Ths_setb_C']\n\n\n\n #Ventilation rate per person\n b_props['ACH_vent'] = b_props['Ve_lps'] * 3.6 / volume\n\n # Assign values for Cm from ISO13790:2008, Table 12, based on archetypes\n c_m = []\n for i in range(0, len(b_props['th_mass'])):\n # c_m.append(165.0*10**3) just testing default value\n if b_props['th_mass'][i] == \"T1\":\n c_m.append(110.0 * 10 ** 3) # Light\n elif b_props['th_mass'][i] == \"T2\":\n c_m.append(165.0 * 10 ** 3) # Medium\n elif b_props['th_mass'][i] == \"T3\":\n c_m.append(260.0 * 10 ** 3) # Heavy\n b_props['c_m_A_f'] = pd.DataFrame(c_m)\n\n # declare variables\n occupancy = []\n lighting_control = []\n mean_occupancy = []\n # declare constants\n glass_solar_transmittance = []\n glass_light_transmittance = []\n Lighting_Utilisation_Factor = []\n Lighting_Maintenance_Factor = []\n ACH_vent = []\n ACH_infl = []\n ventilation_efficiency = []\n phi_c_max_A_f = []\n phi_h_max_A_f = []\n heatingSupplySystem = []\n coolingSupplySystem = []\n heatingEmissionSystem = []\n coolingEmissionSystem = []\n heatingEfficiency = []\n coolingEfficiency = []\n ActuationEnergy = []\n COP_H = []\n COP_C = []\n\n print b_props['code1']\n #TODO: Change a lot of thees with df.assign(column_name=constant)\n for code in b_props['code1']:\n # variables\n occupancy.append('schedules_occ_%s.csv' % code)\n lighting_control.append(lighting_control_d.get(code))\n mean_occupancy.append(mean_occupancy_d.get(code))\n glass_solar_transmittance.append(0.6)\n glass_light_transmittance.append(0.6)\n Lighting_Utilisation_Factor.append(0.45)\n Lighting_Maintenance_Factor.append(0.9)\n ACH_vent.append(2.0) # TODO: Shoudlnt this be a variable\n ACH_infl.append(0.5)\n ventilation_efficiency.append(0.6)\n phi_c_max_A_f.append(-np.inf)\n phi_h_max_A_f.append(np.inf)\n heatingSupplySystem.append(COP42Heater) # DirectHeater, #ResistiveHeater #HeatPumpHeater\n coolingSupplySystem.append(COP81Cooler) # DirectCooler, #HeatPumpCooler\n heatingEmissionSystem.append(FloorHeating)\n coolingEmissionSystem.append(FloorHeating)\n heatingEfficiency.append(1.0)\n coolingEfficiency.append(1.0)\n ActuationEnergy.append(False)\n COP_H.append(1.0)\n COP_C.append(1.0)\n\n\n b_props['Qs_Wm2'] = mean_occupancy * b_props['Qs_Wp'] # occupancy: p/m2, qs_wp: W/p\n b_props['Occupancy'] = occupancy\n b_props['ActuationEnergy'] = ActuationEnergy\n\n\n #Build Building Properties dataframe with building inputs with the same variable definition as the ASF simulation engine\n BuildingPropertiesDF = pd.DataFrame({'Code': []})\n BuildingPropertiesDF['Code'] = b_props.loc[:, 'Code_x']\n BuildingPropertiesDF.loc[:, 'lighting_load'] = b_props.loc[:, 'El_Wm2']\n BuildingPropertiesDF.loc[:, 'lighting_control'] = lighting_control\n BuildingPropertiesDF.loc[:, 'U_em'] = b_props.loc[:, 'U_wall']\n BuildingPropertiesDF.loc[:, 'U_w', ] = b_props.loc[:, 'U_win']\n BuildingPropertiesDF.loc[:, 'theta_int_h_set'] =b_props.loc[:,'Ths_set_C'].apply(pd.to_numeric)\n BuildingPropertiesDF.loc[:, 'theta_int_c_set'] = b_props.loc[:, 'Tcs_set_C'].apply(pd.to_numeric)\n BuildingPropertiesDF.loc[:, 'c_m_A_f'] = b_props.loc[:, 'c_m_A_f']\n BuildingPropertiesDF.loc[:, 'Qs_Wp'] = b_props.loc[:, 'Qs_Wp']\n BuildingPropertiesDF.loc[:, 'Ea_Wm2'] = b_props.loc[:, 'Ea_Wm2']\n BuildingPropertiesDF.loc[:, 'glass_solar_transmittance'] = glass_solar_transmittance\n BuildingPropertiesDF.loc[:, 'glass_light_transmittance'] = glass_light_transmittance\n BuildingPropertiesDF.loc[:, 'Lighting_Utilisation_Factor'] = Lighting_Utilisation_Factor\n BuildingPropertiesDF.loc[:, 'Lighting_Maintenance_Factor'] = Lighting_Maintenance_Factor\n BuildingPropertiesDF.loc[:, 'ACH_vent'] = ACH_vent\n BuildingPropertiesDF.loc[:, 'ACH_infl'] = ACH_infl\n BuildingPropertiesDF.loc[:, 'ventilation_efficiency'] = ventilation_efficiency\n BuildingPropertiesDF.loc[:, 'phi_c_max_A_f'] = phi_c_max_A_f\n BuildingPropertiesDF.loc[:, 'phi_h_max_A_f'] = phi_h_max_A_f\n BuildingPropertiesDF.loc[:, 'heatingSupplySystem'] = heatingSupplySystem\n BuildingPropertiesDF.loc[:, 'coolingSupplySystem'] = coolingSupplySystem\n BuildingPropertiesDF.loc[:, 'heatingEmissionSystem'] = heatingEmissionSystem\n BuildingPropertiesDF.loc[:, 'coolingEmissionSystem'] = coolingEmissionSystem\n # BuildingPropertiesDF.loc[:, 'heatingEfficiency'] = heatingEfficiency\n # BuildingPropertiesDF.loc[:, 'coolingEfficiency'] = coolingEfficiency\n # BuildingPropertiesDF.loc[:, 'COP_H'] = COP_H\n # BuildingPropertiesDF.loc[:, 'COP_C'] = COP_C\n BuildingPropertiesDF.set_index(['Code'], inplace=True)\n\n #Build Simulation Options dataframe with the same variable definitions as the ASF Simulation tool\n SimulationOptionsDF = b_props[['Code_x', 'setBackTempC', 'setBackTempH', 'Occupancy', 'ActuationEnergy']]\n SimulationOptionsDF= SimulationOptionsDF.assign(human_heat_emission = 0.12)\n SimulationOptionsDF= SimulationOptionsDF.assign(Temp_start = 20.0)\n SimulationOptionsDF.set_index(['Code_x'], inplace=True)\n\n # Temp: only analyse the first two lines for testing purposes. Delete the next two lines:\n SimulationOptionsDF = SimulationOptionsDF[12:18]\n BuildingPropertiesDF=BuildingPropertiesDF[12:18]\n # Temp complete\n\n print BuildingPropertiesDF\n print SimulationOptionsDF\n\n #Convert dataframes to dictionaries\n SimulationOptions = SimulationOptionsDF.to_dict(orient='index')\n BuildingProperties = BuildingPropertiesDF.to_dict(orient='index')\n BuildingPropertiesDF.to_csv('Builtdictionaries2.csv')\n SimulationOptionsDF.to_csv('SOdictionaries.csv')\n\n return BuildingProperties, SimulationOptions",
"def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m",
"def generate_materials_dict(self):\n c = 299792458.0\n w_mat = 2 * np.pi * c / self.l_mat - self.w0\n l2_mat = (self.l_mat * 1e6) ** 2\n\n n_air = 1 + 0.05792105 * l2_mat / (238.0185 * l2_mat - 1) + 0.00167917 * l2_mat / (57.362 * l2_mat - 1)\n air_ip = interp1d(w_mat, n_air, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['air'] = air_ip\n\n n_fs = np.sqrt(1 + 0.6961663 * l2_mat / (l2_mat - 0.0684043 ** 2) +\n 0.4079426 * l2_mat / (l2_mat - 0.1162414 ** 2) +\n 0.8974794 * l2_mat / (l2_mat - 9.896161 ** 2))\n fs_ip = interp1d(w_mat, n_fs, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['fs'] = fs_ip\n\n n_mgf2 = np.sqrt(1 + 0.48755108 * l2_mat / (l2_mat - 0.04338408 ** 2) +\n 0.39875031 * l2_mat / (l2_mat - 0.09461442 ** 2) +\n 2.3120353 * l2_mat / (l2_mat - 23.793604 ** 2))\n mgf2_ip = interp1d(w_mat, n_mgf2, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['mgf2'] = mgf2_ip\n\n n_sapphire_o = np.sqrt(1 + 1.4313493 * l2_mat / (l2_mat - 0.0726631 ** 2) +\n 0.65054713 * l2_mat / (l2_mat - 0.1193242 ** 2) +\n 5.3414021 * l2_mat / (l2_mat - 18.028251 ** 2))\n sapphire_o_ip = interp1d(w_mat, n_sapphire_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_o'] = sapphire_o_ip\n\n n_sapphire_e = np.sqrt(1 + 1.5039759 * l2_mat / (l2_mat - 0.0740288 ** 2) +\n 0.55069141 * l2_mat / (l2_mat - 0.1216529 ** 2) +\n 6.5927379 * l2_mat / (l2_mat - 20.072248 ** 2))\n sapphire_e_ip = interp1d(w_mat, n_sapphire_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_e'] = sapphire_e_ip\n\n n_bbo_o = np.sqrt(2.7405 + 0.0184 / (l2_mat - 0.0179) - 0.0155 * l2_mat)\n bbo_o_ip = interp1d(w_mat, n_bbo_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_o'] = bbo_o_ip\n\n n_bbo_e = np.sqrt(2.3730 + 0.0128 / (l2_mat - 0.0156) - 0.0044 * l2_mat)\n bbo_e_ip = interp1d(w_mat, n_bbo_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_e'] = bbo_e_ip\n\n materials_files = os.listdir(self.materials_path)\n logger.info(\"Found {0:d}\".format(materials_files.__len__()))\n for mat_file in materials_files:\n logger.debug(mat_file)\n self.read_material(''.join((self.materials_path, '/', mat_file)))",
"def __init__(self, mech, nodes, traceable, intermediates, bottomup = True):\n # Internalize arguments for later use\n self.__mech = mech\n self.__traceable = set(traceable)\n self.__intermediates = set(intermediates)\n self.__bottomup = bottomup\n \n # make sure that nodes is a list of nodes\n if isinstance(nodes, Species):\n nodes = [nodes]\n\n # Initialize dictionaries for storing\n # source data\n self.origins = {}\n self.origin_loss = {}\n\n # Producers is a dictionary (keys = species; values = dictionaries of producers) \n # that contains integrated production arrays\n #\n # Example: {'CO': {'Transport': array([8,5,6,...]), ...}}\n self.producers = {}\n \n # Concentrations is a dictionary (keys = species; values dictionary \n # initial, average, and final concentration arrays. Similar to producers\n self.concentrations = {}\n \n # Production is a dictionary of gross production each species\n self.production = {}\n \n # Losses is a dictionary of gross consumption of each species\n self.losses = {}\n\n # Preset traced nodes as those provided by the user\n self.traced = set(nodes)\n \n \n # Storing data about input shapes\n tmp_init = mech('INIT')\n self.__old_shape = tmp_init[list(tmp_init.keys())[0]].shape\n self.__shape = [i for i in self.__old_shape]\n self.__ntimes = mech.mrg.variables['TFLAG'].shape[-3]\n for i, s in enumerate(self.__shape):\n if s == self.__ntimes:\n self.__shape[i] = self.__ntimes + 1\n self.__time_dim = i\n self.__shape = tuple(self.__shape)\n \n self.run()",
"def __init__(self, structure_a = None, structure_b = None):\n\n self.cell_1 = None\n self.cell_2 = None\n self.rep_1 = None\n self.rep_2 = None\n self.eps_11 = None\n self.eps_22 = None\n self.eps_12 = None\n self.eps_mas = None\n self.atoms = None\n self.ang = None\n\n \"\"\"(E_s1 + E_s2 - E_i)/A\"\"\"\n self.w_sep_c = None\n self.w_sep_d = None\n\n \"\"\"(E_s1 + E_s2(strained) - E_i)/A\"\"\"\n self.w_seps_c = None\n self.w_seps_d = None\n\n \"\"\"(E_i - E_b1 - E_b2)/A\"\"\"\n self.e_int_c = None\n self.e_int_d = None\n\n \"\"\"Holder for parameters usefull in various surface/interface calculations\"\"\"\n self.parameters = {\"sigma_c_11\": 0, \"sigma_c_12\": 0, \"sigma_c_21\": 0, \"sigma_c_22\": 0,\\\n \"sigma_d_11\": 0, \"sigma_d_12\": 0, \"sigma_d_21\": 0, \"sigma_d_22\": 0}\n\n if structure_a is None or structure_b is None:\n self.base_1 = None\n self.base_2 = None\n self.pos_1 = None\n self.pos_2 = None\n self.spec_1 = None\n self.spec_2 = None\n self.mass_1 = None\n self.mass_2 = None\n else:\n self.base_1 = structure_a.cell.copy()\n self.base_2 = structure_b.cell.copy()\n self.pos_1 = structure_a.pos.copy()\n self.pos_2 = structure_b.pos.copy()\n self.spec_1 = structure_a.type_n.copy()\n self.spec_2 = structure_b.type_n.copy()\n self.mass_1 = structure_a.mass.copy()\n self.mass_2 = structure_b.mass.copy()\n\n self.filename = None\n self.alt_base_1 = None\n self.alt_base_2 = None\n self.order = None",
"def init_solid_params(eos_d):\n # All units must be per atom (to make sense for arbitrary composition)\n\n models.Control.set_consts( [], [], eos_d )\n\n const_d = eos_d['const_d']\n\n Nat_cell = 20\n Nat_formula = 5\n\n T0 = 300 # K\n\n # EOS Parameter values initially set by Mosenfelder2009\n # Set model parameter values\n mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)\n S0 = 0.0 # must adjust\n param_key_a = ['T0','S0','mass_avg']\n param_val_a = np.array([T0,S0,mass_avg])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom\n V0 = 162.35/Nat_cell # ang^3/atom\n K0 = 254.7 # GPa\n KP0= 4.26\n E0 = 0.0\n param_key_a = ['V0','K0','KP0','E0']\n param_val_a = np.array([V0,K0,KP0,E0])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n VR = V0\n thetaR = 736 # K\n gammaR = 2.23\n qR = 1.83\n param_key_a = ['VR','thetaR','gammaR','qR']\n param_val_a = np.array([VR,thetaR,gammaR,qR])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # NOTE: Mosenfelder(2009) has mislabeled units as J/K/g\n # -> units are actually J/K/kg ???\n # The measured 1000K heat capacity of MgSiO3 is ~125 J/K/mol\n # (equal to Dulong Petit value for 5 atom basis)\n # -> This value is thus ~65% of that nominal value,\n # balancing the 30 to 40% values of gamma that are higher than other\n # studies (static compression only constrains Gamma*Cv\n #\n # Max const-vol heat capacity:\n Cvmax = (806.0/1e3)*mass_avg/const_d['kJ_molpereV']/1e3 # J/mol atoms/K -> eV/K/atom\n\n param_key_a = ['Cvmax']\n param_val_a = np.array([Cvmax])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # # Must convert energy units from kJ/g to eV/atom\n energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']\n models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac], eos_d )\n\n\n compress_path_mod = models.BirchMurn3(path_const='S',level_const=T0,\n supress_energy=False,\n supress_press=False,\n expand_adj=False)\n models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod],\n eos_d )\n\n gamma_mod = models.GammaPowLaw(V0ref=False)\n models.Control.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )\n\n thermal_mod = models.MieGrunDebye()\n models.Control.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )\n\n full_mod = models.ThermalPressMod()\n models.Control.set_modtypes( ['FullMod'], [full_mod], eos_d )\n\n\n return eos_d",
"def layer_properties(freq_vec, material):\n # name of the material\n material_name = material[0]\n # thickness of the material (reshape with freq shape, in a tuple, to\n # allow the sum with the tuple of material properties)\n thickness = (np.array( [material[1]]*len(freq_vec) ), )\n # check if we have to pass extra arguments for non homogenous material\n if material_name == 'meta':\n param = material[2:]\n else:\n param = ()\n # read/compute material properties\n prop = mat.properties(material_name, freq_vec, *param)\n\n return thickness + prop",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def run_multiobjective(inputs, parameters = None):\n def thickness(x, t, chord):\n y = af.Naca00XX(chord, t, [x], return_dict = 'y')\n thickness_at_x = y['u'] - y['l']\n return thickness_at_x \n\n if parameters != None:\n eng = parameters[0]\n import_matlab = False\n else:\n eng = None\n import_matlab = True\n \n sma = inputs['sma']\n linear = inputs['linear']\n sigma_o = 100e6\n R = inputs['R']\n \n airfoil = \"naca0012\"\n chord = 1.#0.6175\n t = 0.12*chord\n\n J = {'x':0.75, 'y':0.}\n \n #Adding the area key to the dictionaries\n sma['area'] = math.pi*(0.000381/2.)**2\n linear['area'] = 0.001\n \n # Design constants \n #arm length to center of gravity\n r_w = 0.10\n\n\n #Aicraft weight (mass times gravity)\n W = 0.0523*9.8 #0.06*9.8\n alpha = 0.\n V = 10 #m/s\n altitude = 10000. #feet\n \n # Temperature\n T_0 = 273.15 + 30\n T_final = inputs['T_f']\n \n #Initial martensitic volume fraction\n MVF_init = 1.\n \n # Number of steps and cycles\n n = 200\n n_cycles = 0\n #~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #Parameters to select how to output stuff\n all_outputs = True\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if all_outputs:\n eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap_multiobjective(airfoil, \n chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = True,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n\n return theta, sigma, T, MVF, eps_s, L_s",
"def computeProp(self):\n self.chem = {}\n for key in self.config.C:\n if key in ['P', 'T', 'Z', 'DZ']:\n continue\n self.chem[key] = chemistry.ConstituentProperties(key)\n\n # nAtm = len(self.gas[self.config.C['P']])\n self.property = []\n for op in self.config.LP:\n self.property.append([])\n zOffset = 0.0\n iOffset = 0\n psep = 1.0E6\n for i, zv in enumerate(self.gas[self.config.C['Z']]): # find the nearest z value at p_ref\n P = self.gas[self.config.C['P']][i]\n if abs(P - self.config.p_ref) < psep:\n psep = abs(P - self.config.p_ref)\n iOffset = i\n zOffset = self.gas[self.config.C['Z']][iOffset]\n z_at_p_ref = self.config.Req\n\n for i, zv in enumerate(self.gas[self.config.C['Z']]):\n T = self.gas[self.config.C['T']][i]\n P = self.gas[self.config.C['P']][i]\n self.property[self.config.LP['P']].append(P)\n self.property[self.config.LP['Z']].append(zv)\n rr = z_at_p_ref + zv - zOffset\n # note that this is the \"actual\"z along equator referenced to planet center (aka radius)\n self.property[self.config.LP['R']].append(rr)\n # ##set mean amu\n amulyr = 0.0\n for key in self.chem:\n amulyr += self.chem[key].amu * self.gas[self.config.C[key]][i]\n self.property[self.config.LP['AMU']].append(amulyr)\n # ##set GM pre-calc (normalized further down) and get lapse rate\n if not i:\n self.property[self.config.LP['GM']].append(0.0)\n self.property[self.config.LP['LAPSE']].append(0.0)\n self.property[self.config.LP['LAPSEP']].append(0.0)\n else:\n rho = (amulyr * P) / (chemistry.R * T)\n dr = abs(zv - self.gas[self.config.C['Z']][i - 1])\n dV = 4.0 * np.pi * (rr**2) * dr\n dM = 1.0e11 * rho * dV\n GdM = self.property[self.config.LP['GM']][i - 1] + chemistry.GravConst * dM\n # in km3/s2\n # mass added as you make way into atmosphere by radius r (times G)\n self.property[self.config.LP['GM']].append(GdM)\n dT = abs(T - self.gas[self.config.C['T']][i - 1])\n dP = abs(P - self.gas[self.config.C['P']][i - 1])\n self.property[self.config.LP['LAPSE']].append(dT / dr)\n self.property[self.config.LP['LAPSEP']].append(dT / dP)\n # ##set refractivity and index of refraction\n refrlyr = 0.0\n for key in self.chem:\n refrlyr += self.chem[key].refractivity(T=T) * self.gas[self.config.C[key]][i]\n refrlyr = refrlyr * P * (293.0 / T)\n self.property[self.config.LP['REFR']].append(refrlyr)\n nlyr = refrlyr / 1.0E6 + 1.0\n self.property[self.config.LP['N']].append(nlyr)\n\n # ##Now need to normalize GM to planet and calculate scale height (H)\n GMnorm = self.property[self.config.LP['GM']][iOffset] # G*(Mass added by p_ref)\n for i, mv in enumerate(self.property[self.config.LP['GM']]):\n gm = self.config.GM_ref - (mv - GMnorm)\n self.property[self.config.LP['GM']][i] = gm\n little_g = gm / self.property[self.config.LP['R']][i]**2\n m_bar = self.property[self.config.LP['AMU']][i]\n T = self.gas[self.config.C['T']][i]\n self.property[self.config.LP['H']].append((chemistry.R * T) /\n (little_g * m_bar) / 1000.0)\n self.property[self.config.LP['g']].append(little_g)\n self.property = np.array(self.property)",
"def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final",
"def initParams(self):\n sizes = [self.inputDim]+self.layerSizes+[self.outputDim]\n scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]\n self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \\\n for n,m,s in zip(sizes[:-1],sizes[1:],scales)]\n self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]\n\n if self.train:\n # Now assuming that all layers are the same size\n self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]\n self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))\n self.deltasOut_M = cm.empty((sizes[1],self.maxBatch)) \n self.deltasIn_M = cm.empty((sizes[1],self.maxBatch)) \n self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))\n \n # Allocate memory once here and reuse\n # Store probs\n self.probs_M = cm.empty((self.outputDim,self.maxBatch))\n # Store col max\n self.rowVec_M = cm.empty((1,self.maxBatch))\n \n self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]\n for w,b in self.stack]\n\n if self.temporalLayer > 0:\n # dummy bias used for temporal layer\n dummy = cm.empty((1,1))\n dummy.assign(0.0)\n\n scale = np.sqrt(6)/np.sqrt(self.layerSize*2)\n wtf = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n wtb = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n self.stack.append([wtf,dummy])\n self.stack.append([wtb,dummy])\n\n # forward and backward activations for temporal layer\n self.hActsFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.hActsBack_M = cm.empty((self.layerSize,self.maxBatch))\n\n if self.train:\n dwtf = cm.empty(wtf.shape)\n self.grad.append([dwtf,dummy])\n dwtb = cm.empty(wtb.shape)\n self.grad.append([dwtb,dummy])\n\n self.tmpGradBack_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasBack_M = cm.empty((self.layerSize,self.maxBatch))",
"def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)",
"def run(inputs, parameters = None):\n def thickness(x, t, chord):\n y = af.Naca00XX(chord, t, [x], return_dict = 'y')\n thickness_at_x = y['u'] - y['l']\n return thickness_at_x \n\n if parameters != None:\n eng = parameters[0]\n import_matlab = False\n else:\n eng = None\n import_matlab = True\n \n sma = inputs['sma']\n linear = inputs['linear']\n R = inputs['R']\n\n sigma_o = 100e6\n\n \n airfoil = \"naca0012\"\n chord = 1.#0.6175\n\n J = {'x':0.75, 'y':0.}\n \n #Adding the area key to the dictionaries\n sma['area'] = math.pi*(0.000381/2.)**2\n linear['area'] = 0.001\n \n # Design constants \n #arm length to center of gravity\n r_w = 0.10\n \n #Aicraft weight (mass times gravity)\n W = 0.0523*9.8 #0.06*9.8\n alpha = 0.\n V = 10 #m/s\n altitude = 10000. #feet\n \n # Temperature\n T_0 = 273.15 + 30.\n T_final = 273.15 + 140.\n \n #Initial martensitic volume fraction\n MVF_init = 1.\n \n # Number of steps and cycles\n n = 200\n n_cycles = 0\n #~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #Parameters to select how to output stuff\n all_outputs = True\n save_data = True\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if all_outputs:\n eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap(airfoil, \n chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = True,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(np.rad2deg(theta), eps_s, lw=2., label = \"$\\epsilon_s$\")\n plt.plot(np.rad2deg(theta), eps_l, 'b--',lw=2, label = \"$\\epsilon_l$\")\n# plt.scatter(theta, eps_s, c = 'b')\n# plt.scatter(theta, eps_l, c = 'b')\n plt.ylabel('$\\epsilon$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n print len(T), len(eps_s), len(eps_l), len(theta), len(eps_t)\n plt.figure()\n plt.plot(np.rad2deg(theta), eps_t, lw=2.)\n# plt.scatter(theta, eps_t, c = 'b')\n plt.ylabel('$\\epsilon_t$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(np.rad2deg(theta), MVF, lw=2.)\n# plt.scatter(theta, MVF, c = 'b')\n plt.ylabel('$MVF$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n\n plt.figure()\n plt.plot(T, MVF, lw=2.)\n# plt.scatter(T, MVF, c = 'b')\n plt.ylabel('$MVF$', fontsize=24)\n plt.xlabel('$T (K)$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n\n plt.figure()\n plt.plot(T, sigma, lw=2.)\n# plt.scatter(T, sigma, c = 'b')\n plt.ylabel('$\\sigma$', fontsize=24)\n plt.xlabel('$T (K)$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(T, eps_s, 'b', lw=2., label = \"$\\epsilon_s$\")\n plt.plot(T, eps_l, 'b--',lw=2, label = \"$\\epsilon_l$\")\n# plt.scatter(T, eps_s, c = 'b')\n# plt.scatter(T, eps_l, c = 'b')\n plt.xlabel('$T (K)$', fontsize=20)\n plt.ylabel('$\\epsilon$', fontsize=24)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(T, np.rad2deg(theta), lw=2.)\n# plt.scatter(T, theta, c = 'b')\n plt.xlabel('$T (K)$', fontsize=20)\n plt.ylabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.grid()\n \n F_s = []\n for i in range(len(sigma)):\n F_s.append(sigma[i]*sma['area'])\n# sigma_MPa = []\n# for sigma_i in sigma:\n# sigma_MPa.append(sigma_i/1e6)\n plt.figure()\n plt.plot(theta, F_s, 'b', lw=2., label = \"$F_s$\")\n plt.plot(theta, F_l, 'b--', lw=2., label = \"$F_l$\")\n# plt.scatter(theta, F_s, c = 'b')\n# plt.scatter(theta, F_l, c = 'b')\n plt.ylabel('$F (N)$', fontsize=20)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid() \n else:\n theta, k= flap(airfoil, chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = False,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n \n if save_data == True:\n Data = {'theta': theta, 'eps_s': eps_s, 'eps_l': eps_l, \n 'sigma': sigma, 'xi': MVF, 'T': T, 'eps_t': eps_t,\n 'F_l': F_l, 'k': k, 'L_s':L_s}\n pickle.dump(Data, open( \"data.p\", \"wb\" ) )\n \n return {'theta': theta, 'k': k}",
"def others():\n\n # Fuel cells ('FC') were not calculated and assigned heat rates\n # These sum up to 63 MW of capacity in WECC\n # Cleanest option is to remove them from the current runs:\n query = \"CREATE TABLE switch.fuel_cell_generation_plant_backup (like generation_plant);\\\n INSERT INTO fuel_cell_generation_plants\\\n (SELECT * FROM generation_plant WHERE gen_tech = 'FC');\\\n DELETE FROM generation_plant_scenario_member gpsm USING generation_plant gp\\\n WHERE gp.generation_plant_id = gpsm.generation_plant_id\\\n AND gen_tech = 'FC';\\\n DELETE FROM generation_plant_cost gpc USING generation_plant gp\\\n WHERE gp.generation_plant_id = gpc.generation_plant_id\\\n AND gen_tech = 'FC';\\\n DELETE FROM generation_plant_existing_and_planned gpep USING generation_plant gp\\\n WHERE gp.generation_plant_id = gpep.generation_plant_id\\\n AND gen_tech = 'FC';\\\n DELETE FROM generation_plant WHERE gen_tech = 'FC';\"\n connect_to_db_and_run_query(query,\n database='switch_wecc', user=user, password=password, quiet=True)\n\n # Others ('OT') also do not have an assigned heat rate. Assign an average.\n query = \"UPDATE generation_plant set full_load_heat_rate = \\\n (select avg(full_load_heat_rate)\\\n from generation_plant\\\n join generation_plant_scenario_member using (generation_plant_id)\\\n where energy_source = 'Gas'\\\n and generation_plant_scenario_id = 2)\\\n where gen_tech = 'OT' and energy_source = 'Gas'\"\n connect_to_db_and_run_query(query,\n database='switch_wecc', user=user, password=password, quiet=True)\n\n # Replace 'NaN's with 'Null's\n # (NaNs result from the aggregation process)\n cols_to_replace_nans = ['connect_cost_per_mw','hydro_efficiency','min_build_capacity',\n 'unit_size','storage_efficiency','store_to_release_ratio',\n 'min_load_fraction','startup_fuel','startup_om',\n 'ccs_capture_efficiency', 'ccs_energy_load']\n for col in cols_to_replace_nans:\n query = \"UPDATE generation_plant SET {c} = Null WHERE {c} = 'NaN'\".format(c=col)\n connect_to_db_and_run_query(query,\n database='switch_wecc', user=user, password=password, quiet=True)\n print \"Replaced NaNs in column '{}'\".format(col)\n\n # Replace Nulls with zeros where Switch expects a number\n query = \"UPDATE generation_plant\\\n SET connect_cost_per_mw = 0.0\\\n WHERE connect_cost_per_mw is Null\"\n connect_to_db_and_run_query(query,\n database='switch_wecc', user=user, password=password, quiet=True)",
"def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")",
"def calc_h2_working_cap(isotmt_dict): # pylint: disable=too-many-locals\n\n out_dict = {}\n out_dict['is_porous'] = isotmt_dict['is_porous']\n\n if out_dict['is_porous']:\n press2index = {}\n temp2index = {}\n for press in 1, 5, 100:\n press2index[press] = isotmt_dict['isotherm'][0]['pressure'].index(press)\n for temp in 77, 198, 298:\n temp2index[temp] = isotmt_dict['temperature'].index(temp)\n\n case2pt = {'a': [[100, 198], [5, 298]], 'b': [[100, 77], [5, 77]], 'c': [[100, 77], [1, 77]]}\n\n unitconv = {\n 'wt%': # convert mol/kg to wt%\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] / 10,\n 'g/L': # convert mol/kg to g/L\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] *\n isotmt_dict['Density']\n }\n\n for case, presstemp in case2pt.items():\n for unit, conv in unitconv.items():\n load_average = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_average'][\n press2index[presstemp[0][0]]]\n disc_average = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_average'][\n press2index[presstemp[1][0]]]\n load_dev = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_dev'][press2index[\n presstemp[0][0]]]\n disc_dev = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_dev'][press2index[\n presstemp[1][0]]]\n out_dict.update({\n 'case-{}_{}_unit'.format(case, unit): unit,\n 'case-{}_{}_average'.format(case, unit): (load_average - disc_average) * conv,\n 'case-{}_{}_dev'.format(case, unit): sqrt(load_dev**2 + disc_dev**2) * conv\n })\n\n return Dict(dict=out_dict)",
"def TNG_net(self): \n \n import h5py as h5\n filename = localpath+'input/yields/TNG/SNII.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n \n # Define element indexing\t\t\t\n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['C'] = 'Carbon'\n indexing['N']= 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['Ne'] = 'Neon'\n indexing['Mg'] = 'Magnesium'\n indexing['Si'] = 'Silicon'\n indexing['S'] = 'Sulphur' # Not used by TNG simulation\n indexing['Ca'] = 'Calcium' # Not used by TNG simulation\n indexing['Fe'] = 'Iron'\n \n self.elements = list(indexing.keys())\n \n self.table = {}\n \n # Define masses / metallicities\n self.metallicities = list(f['Metallicities'].value)\n self.masses = f['Masses'].value\n\n \n for z_index,z in enumerate(self.metallicities):\n \n yield_subtable = {}\n \n z_name = f['Yield_names'].value[z_index].decode('utf-8')\n z_data = f['Yields/'+z_name+'/Yield']\n \n ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value\n \n yield_subtable['Mass'] = self.masses\n remnants = self.masses-ejecta_mass\n yield_subtable['mass_in_remnants'] = np.divide(remnants,self.masses)\n for el in list(indexing.keys()):\n yield_subtable[el] = np.zeros(len(self.masses))\n \n summed_yields = np.zeros(len(self.masses))\n \n for m_index,mass in enumerate(self.masses):\n for el_index,el in enumerate(self.elements):\n el_yield_fraction = z_data[el_index][m_index]/mass #(mass-remnants[m_index]) # Find fraction of ejecta per element\n yield_subtable[el][m_index] = el_yield_fraction\t\t\t\t\t\n summed_yields[m_index]+=el_yield_fraction # Compute total yield\n \n yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable",
"def getDictCWells(self,itype):\n #Method begins here\n #nx=self.__grid['nx'] #From the geometry in grid\n ny=self.__grid['ny']\n nz=self.__grid['nz']\n minx=self.__grid['ox']\n miny=self.__grid['oy']\n minz=self.__grid['oz']\n rx=self.__grid['dx']\n ry=self.__grid['dy']\n rz=self.__grid['dz']\n \n # well package\n # Remember to use zero-based layer, row, column indices!\n lcoordw=np.zeros((self.__ncwells,3),dtype=np.int32)\n for i in range (self.__ncwells):\n lcoordw[i,0]=floor((self.__dfclst.iloc[i,3]-minx)/rx)\n #In MODFLOW y ans z coordinates are inverted\n lcoordw[i,1]=floor((miny+ry*ny-self.__dfclst.iloc[i,4])/ry)\n lcoordw[i,2]=floor((minz+rz*nz-self.__dfclst.iloc[i,5])/rz)\n \n nper=self.__df.getForcPer()\n ssm_data = {}\n print('Number of conc periods='+str(nper)) \n for i in range(nper):\n lst=[]\n for j in range(self.__ncwells):\n conc_rate=self.__dfcwells.iloc[i+1,j+1]\n lst.append( [ lcoordw[j,2], lcoordw[j,1], lcoordw[j,0], conc_rate, itype['WEL'] ] )\n ssm_data[i]=lst\n print(ssm_data)\n \n print('*--- Succesfull reading of concentration wells ---*')\n \n return ssm_data",
"def importWells3D(BD_prlvm,grid,lst_domain,fac=1/365/86400,V_col=\"V Bancaris\",geol_col=\"NAPPE_CAPT\",\n geol_layer=[\"PLIOCENE\",\"QUATERNAIRE\"],layer_num=[1,0]):\n \n ix=GridIntersect(grid)\n stress_data_well=[]\n \n for ilayer in range(len(geol_layer)): # iterate through layers\n BD = BD_prlvm[BD_prlvm[geol_col] == geol_layer[ilayer]] # only keep layers with the right geol\n for o in BD.index: #iterate through each well\n Vw = BD.loc[o,V_col]\n if not (np.isnan(Vw)) | (Vw == 0): #keep productive well\n cellidx = ix.intersect(BD.geometry[o]).cellids[0][0]\n cellidy = ix.intersect(BD.geometry[o]).cellids[0][1]\n \n if type(layer_num[ilayer]) == int :\n cellid = (layer_num[ilayer],cellidx,cellidy) #cell on which the well is active\n if cellid in lst_domain: # check if the well is in the domain\n stress_data_well.append((cellid,-fac*Vw))\n elif len(layer_num[ilayer]) > 1:\n cpt=0\n for isublay in layer_num[ilayer]:\n cellid = (isublay,cellidx,cellidy)\n\n if cellid in lst_domain:\n cpt+=1\n for isublay in layer_num[ilayer]: \n cellid = (isublay,cellidx,cellidy)\n if cellid in lst_domain: # check if the well is in the domain\n stress_data_well.append((cellid,-fac*Vw/cpt))\n \n return stress_data_well"
] | [
"0.6475344",
"0.5682321",
"0.5672466",
"0.5636607",
"0.552437",
"0.5461393",
"0.53857917",
"0.527137",
"0.5246535",
"0.5223118",
"0.5219347",
"0.51697445",
"0.51493186",
"0.5122936",
"0.51042575",
"0.5100329",
"0.50907505",
"0.5060393",
"0.5043993",
"0.504055",
"0.5037686",
"0.5033965",
"0.5026525",
"0.5016267",
"0.50033",
"0.49788022",
"0.49706534",
"0.49701118",
"0.49628377",
"0.49594134"
] | 0.59535277 | 1 |
The function saves the current state of the tables and calls "cangeMode" function | def updateMode( Tables,
WarningMessage,
Graph,
Properties ):
WarningMessage.clean( )
Graph.setMode( Properties )
#WarningMessage.printMessage( "Click on the Apply button to update grapths..." )
if Properties == 0:
Tables[ "ElasticModulus" ].fillTableWithBufferData( "GeneralOrthotropic" )
Tables[ "ShearModulus" ].fillTableWithBufferData( "GeneralOrthotropic" )
Tables[ "PoissonRatios" ].fillTableWithBufferData( "GeneralOrthotropic" )
elif Properties == 1:
Tables[ "ElasticModulus" ].fillTableWithBufferData( "GeneralIsotropic" )
Tables[ "ShearModulus" ].fillTableWithBufferData( "GeneralIsotropic" )
Tables[ "PoissonRatios" ].fillTableWithBufferData( "GeneralIsotropic" )
cangeMode( Tables, WarningMessage, Graph.getMode() ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alterTableMode(database: str, table: str, mode: str) -> int:\n\n try:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n if tb:\n\n if tb[\"modo\"] == mode or mode not in [\"avl\", \"b\", \"bplus\", \"dict\", \"hash\", \"isam\", \"json\"]:\n return 4\n\n registros = extractTable(database, table)\n\n _createDatabase(database, mode, bd[\"encoding\"])\n\n _createTable(database, table + \"_temp\", tb[\"columnas\"], mode)\n alterAddPK(database, table + \"_temp\", tb[\"pk\"])\n\n for registro in registros:\n insert(database, table + \"_temp\", registro)\n\n dropTable(database, table)\n alterTable(database, table + \"_temp\", table)\n\n for key in [\"foreign_keys\", \"unique_index\", \"index\"]: print(\">> \" + key + \" :\",\n _table(database, table)[key].alterTableMode(\n mode))\n\n return 0\n\n else:\n return 3\n\n else:\n return 2\n\n except Exception:\n print(\"=\" * 30)\n traceback.print_exc()\n return 1",
"def change_modes(self, change_list):\n\t\tprint \"CHG_MODE START\"\n\t\tfor mode_ix in range(0,len(change_list),2):\n\t\t\tsetid_and_index = self.__mode_modesetid(change_list[mode_ix])\n\t\t\tif setid_and_index is not None:\n\t\t\t\tif change_list[mode_ix+1] == True:\n\t\t\t\t\tprint \"Setting Active Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].activate(setid_and_index[1])\n\t\t\t\telif change_list[mode_ix+1] == False:\n\t\t\t\t\tprint \"Setting DEactive Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].deactivate(setid_and_index[1])\n\t\t\t\telse:\n\t\t\t\t\tprint \"Invalid State\"\n\t\tif 'volume' in self.ms_all:\n\t\t\tprint self.ms_all['volume'].active()\n\t\tif 'modecycle1' in self.ms_all:\n\t\t\tprint self.ms_all['modecycle1'].active()\n\t\tprint \"CHG_MODE STOP\"",
"def safeModeOn(database: str, table: str) -> int:\n nombreST = str(database) + '-' + str(table)\n if not _database(database):\n return 2\n\n if not _table(database, table):\n return 3\n\n if BC.EsUnaTablaSegura(nombreST, _main_path):\n return 4\n\n try:\n BC.CreateBlockChain(nombreST, _main_path)\n return 0\n except:\n return 1",
"def alterDatabaseMode(database: str, mode: str) -> int:\n\n try:\n\n bd = _database(database)\n\n if bd:\n\n if bd[\"modo\"] == mode or mode not in [\"avl\", \"b\", \"bplus\", \"dict\", \"hash\", \"isam\", \"json\"]:\n return 4\n\n data = []\n\n lista_tablas = showTables(database)\n\n if lista_tablas:\n\n for tabla in lista_tablas:\n # lista de [tabla, registros] \n\n registros = extractTable(database, tabla)\n data.append([tabla, registros])\n\n # creando la nueva base de datos\n createDatabase(database + \"_temp\", mode, bd[\"encoding\"])\n\n for tabla in data:\n\n tb = _table(database, tabla[0])\n\n createTable(database + \"_temp\", tb[\"nombre\"], tb[\"columnas\"])\n alterAddPK(database + \"_temp\", tb[\"nombre\"], tb[\"pk\"])\n\n for registro in tabla[1]:\n insert(database + \"_temp\", tb[\"nombre\"], registro)\n\n dropDatabase(database)\n alterDatabase(database + \"_temp\", database)\n\n return 0\n\n else:\n return 2\n\n except:\n return 1",
"def __cb_mode_change(self, list_of_modes):\t\n\t\t\n\t\tnew_active_modes = []\t\t# only the new active mode(s)\n\t\tmode_change_params = []\n\t\tfor mode in list_of_modes:\n\t\t\tmode_change_params.append(mode['mode'])\n\t\t\tmode_change_params.append(mode['state'])\n\t\t\tif mode['state']:\n\t\t\t\tnew_active_modes.append(mode['mode'])\n\n\t\tself.__printer(\"Mode change. {0}\".format(mode_change_params),level=LL_DEBUG)\n\t\tself.__exec_function_by_code('MODE-CHANGE',*mode_change_params)\n\t\t\n\t\tif callable(self.callback_mode_change):\n\t\t\tself.callback_mode_change(mode_change_params)\n\t\t\n\t\t# Check if we have an event for this..\n\t\tif self.event_mode_change:\n\t\t\n\t\t\tfor emc in self.event_mode_change:\n\t\t\t\tif any(x in new_active_modes for x in emc['modes']):\n\t\t\t\t\t\n\t\t\t\t\t# TODO! check if ['type'] == 'mode_change'\n\t\t\t\t\t\n\t\t\t\t\tfor active_mode in new_active_modes:\n\t\t\t\t\t\tif active_mode in emc['modes']:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\trgb_dev = self.get_device_config(emc['device'])\n\t\t\t\t\t\t\tpin_r = rgb_dev['r']\n\t\t\t\t\t\t\tpin_g = rgb_dev['g']\n\t\t\t\t\t\t\tpin_b = rgb_dev['b']\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# ignore pattern for now..\n\t\t\t\t\t\t\t#turn on rgb_1, using ff0000\n\t\t\t\t\t\t\tself.gpio.pwm_rgb(pin_r,pin_g,pin_b,emc['rgb'])",
"def cangeMode( Tables, WarningMessage, Mode ):\n\n if ( Mode == 1 ):\n\n UniformValue = Tables[ \"ElasticModulus\" ].getValue( 0, 0 )\n Tables[ \"ElasticModulus\" ].setValue( 0, 1, UniformValue )\n Tables[ \"ElasticModulus\" ].setValue( 0, 2, UniformValue )\n\n UniformValue = Tables[ \"ElasticModulus\" ].getFloatValue( 0, 0 ) \\\n / ( 2.0 * ( 1.0 + Tables[ \"PoissonRatios\" ].getFloatValue( 0, 0 )))\n Tables[ \"ShearModulus\" ].setValue( 0, 0, '{:.2e}'.format( UniformValue ) )\n Tables[ \"ShearModulus\" ].setValue( 0, 1, '{:.2e}'.format( UniformValue ) )\n Tables[ \"ShearModulus\" ].setValue( 0, 2, '{:.2e}'.format( UniformValue ) )\n\n UniformValue = Tables[ \"PoissonRatios\" ].getValue( 0, 0 )\n Tables[ \"PoissonRatios\" ].setValue( 0, 1, UniformValue )\n Tables[ \"PoissonRatios\" ].setValue( 0, 2, UniformValue )\n\n try:\n testInputData( Mode, Tables[ \"PoissonRatios\" ].getData() )\n except VibroP_DataCorrupted as Error:\n WarningMessage.printMessage( str( Error ) )\n\n\n\n if ( Mode == 0 ):\n\n Tables[ \"ElasticModulus\" ].restoreValue( 0, 1 )\n Tables[ \"ElasticModulus\" ].restoreValue( 0, 2 )\n\n Tables[ \"ShearModulus\" ].restoreValue( 0, 0 )\n Tables[ \"ShearModulus\" ].restoreValue( 0, 1 )\n Tables[ \"ShearModulus\" ].restoreValue( 0, 2 )\n\n Tables[ \"PoissonRatios\" ].restoreValue( 0, 1 )\n Tables[ \"PoissonRatios\" ].restoreValue( 0, 2 )\n\n precomputePoissonRatios( Tables )",
"def save_mode(self):\n def save_mode_file():\n name = dialog.entry.get()\n self.init_modes[name] = self.save_list\n with open(self.modes_file, 'wb') as fp:\n json.dump(self.init_modes, fp)\n self.world_alive = old_world_status[0]\n self.world_setable = old_world_status[1]\n self.modes_names.insert(1, name)\n self.modes_selector._list.setlist(self.modes_names)\n self.modes_selector.selectitem(name)\n dialog.destroy()\n\n old_world_status = [self.world_alive, self.world_setable]\n self.world_alive = False\n self.world_setable = False\n lu_row = 0\n lu_col = 0\n rb_row = self.cell_row - 1\n rb_col = self.cell_col - 1\n for row in range(self.cell_row):\n for col in range(self.cell_col):\n if (self.init_world[row, col]):\n lu_row = row\n break\n\n for col in range(self.cell_col):\n for row in range(self.cell_row):\n if (self.init_world[row, col]):\n lu_col = col\n break\n\n for row in range(self.cell_row - 1, -1, -1):\n for col in range(self.cell_col - 1, -1 , -1):\n if (self.init_world[row, col]):\n rb_row = row\n break\n\n for col in range(self.cell_col - 1, -1, -1):\n for row in range(self.cell_row -1, -1, -1):\n if (self.init_world[row, col]):\n rb_col = col\n break\n\n self.save_list = [[False for col in range(rb_col, lu_col + 1)]\n for row in range(rb_row, lu_row + 1)]\n\n for row in range(rb_row, lu_row + 1):\n for col in range(rb_col, lu_col + 1):\n self.save_list[row - rb_row][col - rb_col] = self.init_world[\n row, col]\n\n # show a dialog window to get name or new mode\n dialog = tk.Toplevel(self)\n dialog.label = tk.Label(dialog, text=\"Please enter the name of new mode\")\n dialog.label.grid(row = 0, column = 0, sticky = tk.N)\n dialog.entry = tk.Entry(dialog)\n dialog.entry.grid(row = 1, column = 0, sticky = tk.N)\n dialog.ok_button = tk.Button(dialog, text = \"OK\", command = save_mode_file)\n dialog.ok_button.grid(row = 2, column = 0, sticky = tk.N)\n self.wait_window(dialog)",
"def set_comms_mode(self):",
"def set_mode(self, new_mode):\n\n\t\tself._log.info('Mode changed to: %s' % new_mode.name)\n\t\tself._mode = new_mode\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()",
"def _change_state(self, state):\r\n self.table_entry.state = state\r\n self.creator_admin.save_model(self.request, self.table_entry, None, True)",
"def redo_settings(self):\r\n cF.redo_settings()",
"def activate(self, id):\n self.db.commit([\n 'UPDATE comments SET',\n ' mode=1',\n 'WHERE id=%s AND mode=2'], (id, ))",
"def _save(self):\n\n # Get option\n if self.button1.isChecked():\n option = 'Steunpunt'\n uncertainties = self.supportloc_unc_table\n elif self.button2.isChecked():\n # Check if the harboruncertainties are filled:\n if pd.isnull(self.harbor_unc_table.model._data).any().any():\n raise ValueError('Niet alle modelonzekerheden voor het havenmodel zijn ingevuld.')\n option = 'Havenmodel'\n uncertainties = self.harbor_unc_table\n elif self.button3.isChecked():\n if pd.isnull(self.combined_unc_table.model._data).any().any():\n raise ValueError('De gecombineerde modelonzekerheden zijn nog niet berekend.')\n option = 'Combinatie'\n uncertainties = self.combined_unc_table\n else:\n raise ValueError('Selecteer een optie voor de te gebruiken onzekerheid')\n\n self.parent.adjust_selection(uncertainties=uncertainties, option=option)\n\n # Toegevoegd Svasek 31/10/2018 - Sluit het onzekerheden input scherm als er op opslaan gedrukt wordt\n self.close()",
"def run():\n\n # establish connection\n with sqlite3.connect(DB_PATH) as conn:\n db = conn.cursor()\n\n # run reset queries\n db.execute(\"\"\"\n WITH toReset AS (\n SELECT DISTINCT table_id\n FROM cea\n WHERE mapped IS NULL\n )\n\n UPDATE tables\n SET returned=0\n WHERE table_id IN toReset\n \"\"\")",
"def reloadMode(self): \n\t\tpass",
"def set_companion_mode(self, data):\n mode_name = data.mode_to_set\n if self.cur_mode.name is not mode_name:\n if mode_name == \"Inactive\":\n result = self.to_Inactive()\n elif mode_name == \"RTD\":\n result = self.to_RTD()\n elif mode_name == \"Autospray\":\n result = self.to_Autospray()\n else:\n rospy.logerr(\"Service mode transition: Mode (%s) not found.\" % mode_name)\n result = False\n else:\n rospy.logerr(\"Service mode transition: Already in this mode, not transitioning.\")\n result = False\n\n return result",
"def save_state(self):\n pass",
"def on_ok(self, _event):\n dd = mg.DATADETS_OBJ\n if self.read_only:\n self.exiting = True\n self.Destroy()\n else:\n ## NB any changes defined in recode are already done\n new_tbl, tblname_changed, data_changed = self.get_change_status()\n if new_tbl or tblname_changed or data_changed:\n try:\n if not new_tbl:\n orig_tblname = self.tblname_lst[0]\n dd.set_tbl(tbl=orig_tblname)\n else:\n dd.set_tbl(tbl=None)\n self.make_changes()\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n except FldMismatchException:\n wx.MessageBox(\n _('Unable to modify table. Some data does not match the'\n ' column type. Please edit and try again.'))\n return\n except Exception as e:\n wx.MessageBox(\n _(\"Unable to modify table.\\nCaused by error: %s\")\n % b.ue(e))\n return\n elif self.changes_made: ## not in tableconf. Must've been in recoding\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n return\n else:\n wx.MessageBox(_('No changes to update.'))\n return",
"def save_conduits(self):\n # self.save_attrs()\n update_qry = \"\"\"\n UPDATE user_swmm_conduits\n SET\n conduit_name = ?,\n conduit_inlet = ?, \n conduit_outlet = ?,\n conduit_inlet_offset = ?,\n conduit_outlet_offset = ?,\n xsections_shape = ?,\n xsections_barrels = ?,\n xsections_max_depth = ?,\n xsections_geom2 = ?, \n xsections_geom3 = ?, \n xsections_geom4 = ?, \n conduit_length = ?,\n conduit_manning = ?,\n conduit_init_flow = ?,\n conduit_max_flow = ?,\n losses_inlet = ?,\n losses_outlet = ?,\n losses_average = ?,\n losses_flapgate = ? \n WHERE fid = ?;\"\"\"\n\n for row in range(0, self.conduits_tblw.rowCount()):\n item = QTableWidgetItem()\n # fid = row + 1\n fid = self.conduit_name_cbo.itemData(row)\n\n item = self.conduits_tblw.item(row, 0)\n if item is not None:\n conduit_name = str(item.text())\n\n item = self.conduits_tblw.item(row, 1)\n if item is not None:\n conduit_inlet = str(item.text())\n\n item = self.conduits_tblw.item(row, 2)\n if item is not None:\n conduit_outlet = str(item.text())\n\n item = self.conduits_tblw.item(row, 3)\n if item is not None:\n conduit_inlet_offset = str(item.text())\n\n item = self.conduits_tblw.item(row, 4)\n if item is not None:\n conduit_outlet_offset = str(item.text())\n\n item = self.conduits_tblw.item(row, 5)\n if item is not None:\n xsections_shape = str(item.text())\n\n item = self.conduits_tblw.item(row, 6)\n if item is not None:\n xsections_barrels = str(item.text())\n\n item = self.conduits_tblw.item(row, 7)\n if item is not None:\n xsections_max_depth = str(item.text())\n\n item = self.conduits_tblw.item(row, 8)\n if item is not None:\n xsections_geom2 = str(item.text())\n\n item = self.conduits_tblw.item(row, 9)\n if item is not None:\n xsections_geom3 = str(item.text())\n\n item = self.conduits_tblw.item(row, 10)\n if item is not None:\n xsections_geom4 = str(item.text())\n\n item = self.conduits_tblw.item(row, 11)\n if item is not None:\n conduit_length = str(item.text())\n\n item = self.conduits_tblw.item(row, 12)\n if item is not None:\n conduit_manning = str(item.text())\n\n item = self.conduits_tblw.item(row, 13)\n if item is not None:\n conduit_init_flow = str(item.text())\n\n item = self.conduits_tblw.item(row, 14)\n if item is not None:\n conduit_max_flow = str(item.text())\n\n item = self.conduits_tblw.item(row, 15)\n if item is not None:\n losses_inlet = str(item.text())\n\n item = self.conduits_tblw.item(row, 16)\n if item is not None:\n losses_outlet = str(item.text())\n\n item = self.conduits_tblw.item(row, 17)\n if item is not None:\n losses_average = str(item.text())\n\n item = self.conduits_tblw.item(row, 18)\n if item is not None:\n losses_flapgate = str(item.text())\n\n self.gutils.execute(\n update_qry,\n (\n conduit_name,\n conduit_inlet,\n conduit_outlet,\n conduit_inlet_offset,\n conduit_outlet_offset,\n xsections_shape,\n xsections_barrels,\n xsections_max_depth,\n xsections_geom2,\n xsections_geom3,\n xsections_geom4,\n conduit_length,\n conduit_manning,\n conduit_init_flow,\n conduit_max_flow,\n losses_inlet,\n losses_outlet,\n losses_average,\n losses_flapgate,\n fid,\n ),\n )",
"def ChangeMode(self, mode):\n if mode in MODE_DICT:\n self.ImportCover(MODE_DICT[mode], layer = MODE_LAYER)",
"def visit_table(self, sytable):\n self.current.update(sytable)",
"def visit_table(self, sytable):\n self.current.update(sytable)",
"def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)",
"def saveActivate():\n save()\n activate(block=\"true\")",
"def set_old_states(modes):\n for mode in modes:\n if mode['name'] in old_modes_states:\n mode['state'] = old_modes_states[mode['name']]\n if mode.get('sub'):\n set_old_states(mode['sub'])",
"def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")",
"def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()",
"def _mode_changed(self, index: int):\n if index == 0:\n self.table.setEnabled(False)\n self.table.clearSelection()\n self.name_widget.setEnabled(True)\n self.name_widget.validate_name()\n\n else:\n self.table.setEnabled(True)\n self.name_widget.setEnabled(False)\n\n self.valid_source_signal.emit(False)",
"def add_mode_index(self) -> None:",
"def change_mode(self, mode):\r\n self.update_enrollment(mode=mode)"
] | [
"0.60449374",
"0.5693288",
"0.5666721",
"0.563098",
"0.5616466",
"0.5605347",
"0.55648464",
"0.5559937",
"0.5443037",
"0.5381708",
"0.53650904",
"0.5321684",
"0.53107697",
"0.52835166",
"0.52474654",
"0.5224185",
"0.5222314",
"0.51933074",
"0.51613885",
"0.5130906",
"0.51263046",
"0.51263046",
"0.5121009",
"0.5116409",
"0.51021105",
"0.5099453",
"0.50960505",
"0.5095956",
"0.5074074",
"0.50667346"
] | 0.5883549 | 1 |
Returns the metrics from the registry in latest text format as a string. | def generate_latest(registry=Registry):
def sample_line(line, metric_type):
if line.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(line.labels.items())]))
else:
labelstr = ''
timestamp = ''
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))
name = line.name
if metric_type == 'counter' and name.endswith('_total'):
name = name[:-6]
return '{0}{1} {2}{3}\n'.format(
name, labelstr, int(line.value), timestamp)
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the structure better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
help_str = '# HELP {0} {1}\n'.format(mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))
if 'Multiprocess' not in help_str:
continue
output.append('# HELP {0} {1}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0} {1}\n'.format(mname, mtype))
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
break
else:
line = sample_line(s, mtype)
if not line:
continue
output.append(line)
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
return ''.join(output).encode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def generate_latest_metrics(client):\n resp = await client.get(prometheus.API_ENDPOINT)\n assert resp.status == HTTPStatus.OK\n assert resp.headers[\"content-type\"] == CONTENT_TYPE_TEXT_PLAIN\n body = await resp.text()\n body = body.split(\"\\n\")\n\n assert len(body) > 3\n\n return body",
"def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output",
"def registry_to_text(registry):\n output = [CREDITS.format(dt=datetime.utcnow().isoformat())]\n for collector, samples in registry.get_samples():\n output.append(collector.text_export_header)\n for sample in samples:\n output.append(sample.export_str)\n output.append(\"\")\n return \"\\n\".join(output)",
"def metrics_text(self, x, extra=None):\n metrics = self.compute_metrics(x, extra=extra)\n if metrics is not None:\n s = \", \".join([\"%s=%0.5f\"%(k, v) for k, v in metrics.items()])\n return s\n return \"\"",
"def stats_get_str(self):\n return self.stats.get_all_str()",
"def stats_get_str(self):\n return self.stats.get_all_str()",
"def as_text(self) -> str:\n txt = ''\n with self._th_lock:\n # purge expired value (reach ttl_s) from values dict\n purge_l = []\n for key, (_value, _timestamp_ms, expire_at) in self._values_d.items():\n if expire_at and time.monotonic() > expire_at:\n purge_l.append(key)\n for rm_key in purge_l:\n self._values_d.pop(rm_key)\n # if any value exists, format an exposition message\n if self._values_d:\n # add a comment line if defined\n if self.comment:\n # apply escapes to comment\n esc_comment = str(self.comment)\n for rep_args in [('\\\\', '\\\\\\\\'), ('\\n', '\\\\n')]:\n esc_comment = esc_comment.replace(*rep_args)\n txt += f'# HELP {self.name} {esc_comment}\\n'\n # add a type line if defined\n if self.type is not MetricType.UNTYPED:\n txt += f'# TYPE {self.name} {self.type.value}\\n'\n # add every \"name{labels} value [timestamp]\" for the metric\n for lbl_id_str, (value, ts, _expire_at) in self._values_d.items():\n if self._type is MetricType.HISTOGRAM:\n txt += self._data2txt_histogram(lbl_id_str, value)\n elif self._type is MetricType.SUMMARY:\n txt += self._data2txt_summary(lbl_id_str, value)\n else:\n txt += self._data2txt_default(lbl_id_str, value, ts)\n return txt",
"def get_monitor_string(self):\n\n return self.reporter.get_overview_string(self.info)",
"def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')",
"def __str__(self):\n columns = list(self.metrics.keys())\n columns.sort()\n out = '%s\\n' % ','.join(columns)\n values = [str(self.metrics[c]) for c in columns]\n out += '%s\\n' % ','.join(values)\n return out",
"def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def inspect_instance(self):\n url = \"http://%s:1337/metrics\" % self.host\n r = requests.get(url)\n try:\n s = r.json()\n except TypeError:\n s = r.text\n return s",
"def _send_textmetrics(metrics):\n\n data = [\" \".join(map(str, metric)) for metric in metrics] + [\"\"]\n\n return \"\\n\".join(data)",
"def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics",
"def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))",
"def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']",
"def get_metric(ms):\n\treturn '['+','.join(str(m) for m in ms)+']'",
"def get_str_metadata(self):\n return \"\\n\".join([\"Guessed by {}\".format(self.guessed_by), \"{} metaphors used\".format(self.metaphors_used)])",
"def create_metrics_text_from_dict(metrics_dict: dict):\n\n metrics_text_string = f\"\"\"Last {metrics_dict['num_days_back']} days: \\\n{metrics_dict['num_days_run']} runs, \\\n{metrics_dict['pct_days_run']}% of days, \\\n{metrics_dict['tot_miles_run']} miles, \\\n{metrics_dict['miles_per_day']} miles/day, \\\n{metrics_dict['miles_per_run']} miles/run\\\n\"\"\"\n return metrics_text_string",
"def get_metric_list(self) -> List[str]:\n ...",
"def get_performance_str(self, metrics=['avr_accuracy', 'avr_loss'], \n abbrev={'avr_accuracy':'acc','avr_loss':'loss'}):\n mystr = \"\"\n if self.mode == 'val':\n mystr += \"best_epoch={} best_acc={:.3f} \".format(\n self.current_best[self.mode]['epoch'], \n self.current_best[self.mode]['acc']\n )\n \n if self.epoch_cache[self.mode][\"num_performances\"] > 0:\n for m in metrics:\n mystr += \"{}={:.3f} \".format(abbrev[m], self.epoch_cache[self.mode][m])\n return mystr\n else:\n return \"Use ptracker.add_task_performance() to track performance! \"",
"def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics",
"def summary_string(self) -> str:",
"def metric(self) -> str:\r\n return self._metric",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results",
"def get_printed_eval_results(self, general_metrics, report):\n printed_eval_results = \"----- Evaluation results -----\"\n for key in sorted(general_metrics.keys()):\n printed_eval_results += \"\\n %s = %s\" % (key, str(general_metrics[key]))\n printed_eval_results += f\"\\n{report}\"\n return printed_eval_results",
"def format_metrics(metrics, split):\n result = format_partial_metrics(metrics, split)\n result += '\\n'\n result += format_partial_metrics(metrics, split, extra='_r')\n return result",
"def gather_metric(self):\n result = self._shell.run(self.KERNEL_COMMAND).stdout\n response = {self.KERNEL_RELEASE: result}\n return response",
"def textual(self):\n return [self.version_label, self.timestamp_label,\n self.message_label, self.battery_label,\n self.battery_icon, self.record_sensors]"
] | [
"0.6561686",
"0.65185255",
"0.64778507",
"0.6407903",
"0.6371694",
"0.6371694",
"0.6159014",
"0.6076035",
"0.59855705",
"0.5963776",
"0.5963161",
"0.58073926",
"0.57971",
"0.57738096",
"0.57541287",
"0.57380533",
"0.57340425",
"0.5730773",
"0.56887656",
"0.5684547",
"0.56844574",
"0.56288505",
"0.5621304",
"0.5618147",
"0.56077665",
"0.5601288",
"0.559879",
"0.5596331",
"0.5593543",
"0.5592733"
] | 0.65264964 | 1 |
Generates a list of random colours in RGB given a random number generator and the size of this list | def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:
return [random_colour(rng) for _ in range(size)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]",
"def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]",
"def get_color_list(cluster_count):\n color_list = []\n for i in xrange(cluster_count):\n color_list.append(random_color_gen())\n return color_list",
"def makeRGB(ncol = 16, minc = 32, maxc = 216):\n subd = int((maxc - minc)/ncol)\n numpy.random.seed(1)\n RGB = [[]]\n for r in range(minc, maxc, subd):\n for g in range(minc, maxc, subd):\n for b in range(minc, maxc, subd):\n RGB.append(numpy.array([r,g,b]))\n #print \"# of colors: \", len(self.RGB)\n rgb_order = numpy.random.permutation(len(RGB)) # randomize the order\n RGB = [RGB[x] for x in rgb_order]\n return RGB",
"def randcolour():\n colour = [0,0,0]\n while sum(colour)<450:\n for i in range(3):\n colour[i] = int(random.random()*255)\n return(tuple(colour))",
"def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret",
"def pretty_colours(how_many):\r\n golden_ratio_conjugate = (1 + math.sqrt(5)) / 2\r\n hue = random.random() # use random start value\r\n final_colours = []\r\n for tmp in range(how_many):\r\n hue += golden_ratio_conjugate * (tmp / (5 * random.random()))\r\n hue = hue % 1\r\n temp_c = [x for x in hsv_to_rgb(hue, 0.5, 0.95)]\r\n final_colours.append(temp_c)\r\n # originally returned ['rgb(123,123,123)', 'rgb(123,123,123)']\r\n # now [[0.123,0.123,0.123],[0.123,0.123,0.123]]\r\n return final_colours",
"def get_random_rgb(seed):\n random.seed(seed)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n return [r, g, b]",
"def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb",
"def createColors():\n\n colors = \"Blue\", \"Green\", \"Yellow\", \"Red\"\n color_list = []\n color_colum = []\n\n for i in range(15): #Create 2D list of 15*25 with colors\n color_colum = []\n for k in range(25):\n color_colum.append(random.choice(colors))\n color_list.append(color_colum)\n \n return color_list",
"def random_rgb() -> List[int, int, int]:\n hsl_color = (random.random(), 0.3, 0.8)\n rgb_color = colorsys.hls_to_rgb(*hsl_color)\n return [round(c * 255) for c in rgb_color]",
"def random_color(num):\n # 为每个类别的边界框随机匹配相应颜色\n np.random.seed(80)\n COLORS = np.random.randint(0, 256, size=(num, 3), dtype='uint8') #\n return COLORS",
"def _get_goal_colours() -> List[Tuple[int, int, int]]:\n colour_lst = COLOUR_LIST[:]\n random.shuffle(colour_lst)\n return colour_lst",
"def generate_colour():\n red = random.randrange(0, 256)\n green = random.randrange(0, 256)\n blue = random.randrange(0, 256)\n alpha = random.randrange(0, 256)\n return (red, green, blue, alpha)",
"def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)",
"def random_colors(N,bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i/N,1,brightness)for i in range(N)]\n colors = list(map(lambda c: clolorsys.hsv_to_rgb(*c),hsv))\n random.shuffle(colors)\n return colors",
"def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(self, N, bright=True):\r\n brightness = 1.0 if bright else 0.7\r\n hsv = [(i / N, 1, brightness) for i in range(N)]\r\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\r\n random.shuffle(colors)\r\n return colors",
"def randColor():\r\n return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))",
"def random_colors(N, bright=True):\n import random\n import colorsys\n\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(N, bright=True):\n import random\n import colorsys\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def generate_rgb(exist_colors: List[List[int, int, int]]) -> List[int, int, int]:\n largest_min_distance = 0\n best_color = random_rgb()\n if len(exist_colors) > 0:\n for _ in range(100):\n color = random_rgb()\n current_min_distance = min(_color_distance(color, c) for c in exist_colors)\n if current_min_distance > largest_min_distance:\n largest_min_distance = current_min_distance\n best_color = color\n _validate_color(best_color)\n return best_color",
"def random_colour(rng: random.Random) -> TupleInt3:\n r = rng.randint(0, 255)\n g = rng.randint(0, 255)\n b = rng.randint(0, 255)\n return r, g, b",
"def random_colors(n, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / n, 1, brightness) for i in range(n)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))",
"def generate_list(size):\n items = [randint(0, MAX_NUM) for i in range(size)]\n return items",
"def get_colors(num_colors):\n import colorsys\n colors = []\n for i in np.arange(0., 360., 360. / num_colors):\n hue = i/360.\n lightness = (50 + np.random.rand() * 10)/100.\n saturation = (90 + np.random.rand() * 10)/100.\n colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))\n return colors"
] | [
"0.7871541",
"0.748539",
"0.7485229",
"0.74038225",
"0.72706544",
"0.71166867",
"0.70931363",
"0.7082866",
"0.7052505",
"0.696605",
"0.6933918",
"0.69295365",
"0.6914378",
"0.6914372",
"0.6912016",
"0.6897092",
"0.6876162",
"0.68433",
"0.68433",
"0.68433",
"0.6812373",
"0.6789727",
"0.6764938",
"0.67035794",
"0.66703063",
"0.6662116",
"0.66046154",
"0.658365",
"0.6582132",
"0.6577821"
] | 0.8515019 | 0 |
learn novel phrases by looking at cooccurrence of candidate term pairings; docs should be input in tokenized (`tdocs`) and untokenized (`docs`) form | def extract_phrases(tdocs, docs, idf):
# Gather existing keyphrases
keyphrases = set()
for doc in tdocs:
for t in doc:
if len(t.split(' ')) > 1:
keyphrases.add(t)
# Count document co-occurrences
t_counts = defaultdict(int)
pair_docs = defaultdict(list)
for i, terms in enumerate(tdocs):
# We dont convert the doc to a set b/c we want to preserve order
# Iterate over terms as pairs
for pair in zip(terms, terms[1:]):
t_counts[pair] += 1
pair_docs[pair].append(i)
# There are a lot of co-occurrences, filter down to those which could
# potentially be phrases.
t_counts = {kw: count for kw, count in t_counts.items() if count >= 2}
# Identify novel phrases by looking at
# keywords which co-occur some percentage of the time.
# This could probably be more efficient/cleaned up
for (kw, kw_), count in t_counts.items():
# Only consider terms above a certain avg global IDF (to reduce noise)
if (idf[kw]+idf[kw_])/2 <= 0.4:
continue
# Look for phrases that are space-delimited or joined by 'and' or '-'
ph_reg = re.compile('({0}|{1})( |-)(and )?({0}|{1})'.format(kw, kw_))
# Extract candidate phrases and keep track of their counts
phrases = defaultdict(int)
phrase_docs = defaultdict(set)
for i in pair_docs[(kw, kw_)]:
for m in ph_reg.findall(docs[i].lower()):
phrases[''.join(m)] += 1
phrase_docs[''.join(m)].add(i)
if not phrases:
continue
# Get the phrase encountered the most
top_phrase = max(phrases.keys(), key=lambda k: phrases[k])
top_count = phrases[top_phrase]
# Only count phrases that appear in _every_ document
if top_count/count == 1:
# Check if this new phrase is contained by an existing keyphrase.
if any(top_phrase in ph for ph in keyphrases):
continue
keyphrases.add(top_phrase)
# Add the new phrase to each doc it's found in
for i in phrase_docs[top_phrase]:
tdocs[i].append(top_phrase)
return tdocs, keyphrases | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass",
"def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots",
"def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD",
"def similar_docs(self, doc=None, docs=[], count=10):\n #import ipdb; ipdb.set_trace()\n if doc is not None:\n docs = [doc]\n docs = [text_utils.lemmatize_text(doc) for doc in docs]\n vec = self.vectorizer.transform(docs)\n tvec = self.transformer.transform(vec)\n sims, docids = self.knn.kneighbors(tvec, return_distance=True)\n #return [self.docs[docid] for docid in docids[0][:count]], [1-sim for sim in sims[0][:count]]\n results = []\n for idx in range(len(docids[0])):\n docid = docids[0][idx]\n results.append({\n \"id\": docid,\n \"text\": self.docs[docid],\n \"score\": 1-sims[0][idx], #distance to similarity\n })\n results = sorted(results, key=lambda x: -x[\"score\"])\n return results[:count]",
"def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus",
"def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)",
"def q_tokenize(document):\n final_words = []\n avoided_words = [] # WORDS WHICH ARE TO BE AVOIDED IN THE FINAL LIST\n \n # making the avoided_words list\n for word in string.punctuation: # the string library has a string of punctuations\n avoided_words.append(word)\n for word in nltk.corpus.stopwords.words(\"english\"): # the nltk lib. has a list of stopwords commonly used in english\n avoided_words.append(word)\n\n tokens = nltk.word_tokenize(document)",
"def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document",
"def doc_analyzer(self, doc):\n\n if self.lowercase is None or self.lowercase == 'none':\n lowercase = set()\n elif self.lowercase in {'both', 'all'}:\n lowercase = {'char', 'word'}\n else: lowercase = {self.lowercase}\n\n # character n-grams\n if 'char' in lowercase:\n docfeat = self.get_ngrams(list(doc.lower()),\n self.c_ngmin, self.c_ngmax)\n else:\n docfeat = self.get_ngrams(list(doc),\n self.c_ngmin, self.c_ngmax)\n # word n-grams\n if 'word' in lowercase:\n docfeat.extend(self.get_ngrams(self.tokenizer(doc.lower()),\n self.w_ngmin, self.w_ngmax,\n suffix=\"⅏\", separator=\" \"))\n else:\n docfeat.extend(self.get_ngrams(self.tokenizer(doc),\n self.w_ngmin, self.w_ngmax,\n suffix=\"⅏\", separator=\" \"))\n return docfeat",
"def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type",
"def extract_passages(s, docs):\n if os.name == 'windows':\n docs = [doc.replace('/', '\\\\') for doc in docs]\n \n query_terms = set(tokenize(s))\n passages = []\n for doc in docs:\n with io.open(doc, encoding='utf-8', errors='ignore') as f:\n for para in f:\n for sent in sent_tokenize(para):\n if len(query_terms.intersection(set(tokenize(sent)))) == 0:\n continue\n passages.append(sent)\n return passages",
"def terms_teach(dataset: str, vectors: str, seeds: List[str]):\n # Connect to the database using the settings from prodigy.json and add the\n # seed terms to the dataset\n DB = connect()\n if dataset and dataset in DB:\n seed_tasks = [set_hashes({\"text\": s, \"answer\": \"accept\"}) for s in seeds]\n DB.add_examples(seed_tasks, datasets=[dataset])\n\n # Load the spaCy model with vectors\n nlp = spacy.load(vectors)\n\n # Create two Doc objects for the accepted and rejected terms\n accept_doc = Doc(nlp.vocab, words=seeds)\n reject_doc = Doc(nlp.vocab, words=[])\n score = 0\n\n def predict(term):\n \"\"\"Score a term given the current accept_doc and reject_doc.\"\"\"\n if len(accept_doc) == 0 and len(reject_doc) == 0:\n return 0.5\n # Use spaCy's .similarity() method to compare the term to the\n # accepted and rejected Doc\n if len(accept_doc) and accept_doc.vector_norm != 0.0:\n accept_score = max(term.similarity(accept_doc), 0.0)\n else:\n accept_score = 0.0\n if len(reject_doc) and reject_doc.vector_norm != 0:\n reject_score = max(term.similarity(reject_doc), 0.0)\n else:\n reject_score = 0.0\n score = accept_score / (accept_score + reject_score + 0.2)\n return max(score, 0.0)\n\n def update(answers):\n # Called whenever Prodigy receives new annotations\n nonlocal accept_doc, reject_doc, score\n accept_words = [t.text for t in accept_doc]\n reject_words = [t.text for t in reject_doc]\n for answer in answers:\n # Increase or decrease score depending on answer and update\n # list of accepted and rejected terms\n if answer[\"answer\"] == \"accept\":\n score += 1\n accept_words.append(answer[\"text\"])\n elif answer[\"answer\"] == \"reject\":\n score -= 1\n reject_words.append(answer[\"text\"])\n # Update the target documents in place\n accept_doc = Doc(nlp.vocab, words=accept_words)\n reject_doc = Doc(nlp.vocab, words=reject_words)\n\n def score_stream(stream):\n # Get all lexemes in the vocab and score them\n lexemes = [lex for lex in stream if lex.is_alpha and lex.is_lower]\n while True:\n seen = set(w.orth for w in accept_doc)\n seen.update(set(w.orth for w in reject_doc))\n lexemes = [w for w in lexemes if w.orth not in seen and w.vector_norm]\n by_score = [(predict(lex), lex) for lex in lexemes]\n by_score.sort(reverse=True)\n for _, term in by_score:\n score = predict(term)\n # Return (score, example) tuples for the scored terms\n yield score, {\"text\": term.text, \"meta\": {\"score\": score}}\n\n # Sort the scored vocab by probability and return examples\n stream = Probability(score_stream(nlp.vocab))\n\n return {\n \"view_id\": \"text\", # Annotation interface to use\n \"dataset\": dataset, # Name of dataset to save annotations\n \"stream\": stream, # Incoming stream of examples\n \"update\": update, # Update callback, called with answers\n }",
"def __init__(self, text=None, bare=False, stem='gap', pos=False, roman = False, stopwords=False, punct=False, conjunction=False, article=False, demonstrative=False, preposition=False, question=False, pronoun=False, quantifier=False, date=False, number=False, ssn=False, telephone=False, name=False, address=False, sentiment=False, gender=False, age = False, dob=False, unit=False, standard=False, metric=False, spell=None ):\n self._text = text # raw text\n self._words = None # list of words\n self._punct = punct # keep/remove punctuation\n self._stemming = stem # on/off stemming\n self._pos = pos # on/off parts of speech\n self._roman = roman # on/off romanization \n self._porter = stopwords # keep/remove stopwords\n self._bare = bare # on/off bare tokenizing\n self._standard = standard # convert metric to standard units\n self._metric = metric # convert standard to metric units\n self._spell = None # spell checking\n self._bow = None # bag of words\n self._freq = None # word count frequency\n self._tf = None # term frequency\n \n # More than just bare tokenizing\n if self._bare == False:\n self._spell = spell # do (not) spell checking\n \n # Keep Stopwords\n if stopwords is True:\n self._quantifier = True # keep words indicating a size\n self._preposition = True # keep prepositions\n self._article = True # keep articles\n self._conjunction = True # keep conjunctions\n self._demonstrative = True # keep demonstratives\n self._question = True # keep question words\n self._pronoun = True # keep pronouns \n self._sentiment = True # keep sentiment words\n self._number = True # keep numbers \n self._date = True # keep date\n self._ssn = True # keep social security number\n self._telephone = True # keep telephone numbers\n self._address = True # keep street addresses\n self._name = True # keep proper names\n self._gender = True # keep gender words\n self._age = True # keep age\n self._dob = True # keep date of birth words\n self._unit = True # keep unit of measurement\n # Remove Stopwords\n else:\n self._quantifier = quantifier # keep/remove words indicating a size\n self._preposition = preposition # keep/remove prepositions\n self._article = article # keep/remove articles\n self._conjunction = conjunction # keep/remove conjunctions\n self._demonstrative = demonstrative # keep/remove demonstratives\n self._question = question # keep/remove question words\n self._pronoun = pronoun # keep/remove pronouns\n self._sentiment = sentiment # keep/remove sentiment words\n self._number = number # keep/remove numbers\n self._date = date # keep/remove date\n self._ssn = ssn # keep/remove social security number\n self._telephone = telephone # keep/remove telephone numbers\n self._address = address # keep/remove street addresses\n self._name = name # keep/remove proper names\n self._gender = gender # keep/remove gender words\n self._age = age # keep/remove age\n self._dob = dob # keep/remove date of birth words\n self._unit = unit # keep/remove unit of measurement words\n \n if isinstance(stopwords, bool) is False:\n raise TypeError(\"Stopwords must be a boolean\")\n if isinstance(bare, bool) is False:\n raise TypeError(\"Bare must be a boolean\")\n if isinstance(quantifier, bool) is False:\n raise TypeError(\"Quantifier must be a boolean\")\n if isinstance(preposition, bool) is False:\n raise TypeError(\"Preposition must be a boolean\")\n if isinstance(conjunction, bool) is False:\n raise TypeError(\"Conjunction must be a boolean\")\n if isinstance(article, bool) is False:\n raise TypeError(\"Article must be a boolean\")\n if isinstance(demonstrative, bool) is False:\n raise TypeError(\"Demonstrative must be a boolean\")\n if isinstance(question, bool) is False:\n raise TypeError(\"Question must be a boolean\")\n if isinstance(pronoun, bool) is False:\n raise TypeError(\"Pronoun must be a boolean\")\n if isinstance(number, bool) is False:\n raise TypeError(\"Number must be a boolean\")\n if isinstance(date, bool) is False:\n raise TypeError(\"Date must be a boolean\")\n if isinstance(ssn, bool) is False:\n raise TypeError(\"SSN must be a boolean\")\n if isinstance(telephone, bool) is False:\n raise TypeError(\"Telephone must be a boolean\")\n if isinstance(name, bool) is False:\n raise TypeError(\"Name must be a boolean\")\n if isinstance(address, bool) is False:\n raise TypeError(\"Address must be a boolean\")\n if isinstance(sentiment, bool) is False:\n raise TypeError(\"Sentiment must be a boolean\")\n if isinstance(gender, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(dob, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(age, bool) is False:\n raise TypeError(\"Age must be a boolean\")\n if isinstance(punct, bool) is False:\n raise TypeError(\"Punct must be a boolean\")\n if isinstance(unit, bool) is False:\n raise TypeError(\"Unit must be a boolean\")\n if isinstance(standard, bool) is False:\n raise TypeError(\"Standard must be a boolean\")\n if isinstance(metric, bool) is False:\n raise TypeError(\"Metric must be a boolean\")\n if text is not None:\n if isinstance(text, str) is False:\n raise TypeError(\"String expected for text\")\n if spell is not None:\n if spell not in ['en', 'fr', 'es', 'it', 'de']:\n raise ValueError(\"Wrong value for spell: en, es, fr, it or de\")\n \n if text is not None:\n self._split()\n if self._bare == False:\n # preprocess the tokens\n self._preprocess()\n # word stemming\n if self._stemming == 'gap':\n self._stem()\n elif self._stemming == 'porter':\n self._nltkStemmer('porter')\n elif self._stemming == 'snowball':\n self._nltkStemmer('snowball')\n elif self._stemming == 'lancaster':\n self._nltkStemmer('lancaster')\n elif self._stemming == 'lemma':\n self._lemma()\n # remove stop words\n self._stopwords()\n # Do unit conversions\n self._conversion()\n # Do POS tagging\n if self._pos == True:\n self._partsofspeech()",
"def learn(self, documents, labels):\n for i in xrange(len(documents)):\n text = documents[i]\n\n words = text.split()\n self.learn_from_one(words)",
"def set_doc_phrases(doc_phrases, docs, phrases):\n for doc in docs:\n if not doc in doc_phrases:\n doc_phrases[doc] = []\n doc_phrases[doc] = doc_phrases[doc] + phrases",
"def _tokenize(self, raw_text):\n\n doc = self.nlp(raw_text.strip())\n\n # Loop through tokens and find known entities aren't already marked\n for token in doc:\n # Is this word in our known_entities, but is not recognized by the spaCy parser?\n if token.text.lower() in self.known_entities and token.ent_type not in self.entities:\n # We need to set the new entity to doc.ents directly (I believe the getter for doc.ents does\n # some important massaging. However, counter to the online docs, setting doc.ents wipes out\n # all of the previously recognized ents, so we stash the value, then we combine and reset.\n stash = doc.ents\n doc.ents = [(token.text.title(), doc.vocab.strings['PERSON'], token.i, token.i + 1)]\n doc.ents = doc.ents + stash\n\n # Find proper noun n-grams: (a) find a known entity, (b) is the next word also a known entity?,\n # (c) merge, (d) repeat\n # TODO: Joining multi-word named entities sometimes causes us trouble.\n doc_len = len(doc) # Helps us know when to exit the 'for loop' (since we change the # of items via merge)\n for token in doc:\n # if we're not at the end of the loop, and we recognize this as a proper noun and it's not a stop word\n # and the token isn't a space...\n if token.i + 1 < doc_len and token.ent_type in self.entities and \\\n token.text.lower() not in self.stop_words and token.text not in ' ':\n next_token = doc[token.i + 1]\n # keep looping while we're not at the end of the loop and this token has the same entity type as\n # the previous token and it's not a stop word or a space.\n while token.i + 1 < doc_len and next_token.ent_type == token.ent_type and \\\n next_token.text.lower() not in self.stop_words and next_token.text not in ' ':\n n_gram = doc[token.i:token.i + 2]\n n_gram.merge()\n doc_len -= 1 # the merge changes the list length, so we just shrunk the list!\n # print(x)\n if token.i + 1 >= doc_len:\n break\n\n return doc",
"def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2",
"def pos_text(text):\n nlp = spacy.load('en')\n doc = nlp(text)\n # all tokens that arent stop words or punctuations\n words = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and token.is_punct != True]\n\n # noun tokens that arent stop words or punctuations\n final_tokens = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and \\\n token.is_punct != True and (token.pos_ == \"NOUN\" or token.pos_ == \"VERB\")]\n\n # frequency dictionary for all tokens\n word_freq = Counter(words)\n\n #top 100 words to display in wordcloud which are noun or verb\n #frequency will be used to show big/small words in wordcloud\n final_tokens_freq = Counter(final_tokens)\n result = final_tokens_freq.most_common(config.config[\"MAX_FREQUENCY\"])\n #print result\n return result",
"def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q",
"def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)",
"def doc_topics(self, docs):\n assert(self.has_vocab)\n assert(self.is_trained)\n tknzd = [self.tokenizer.tokenize(doc) for doc in docs]\n bows = [self.vocab.doc2bow(tkns) for tkns in tknzd]\n return [self._lda_model[bow] for bow in bows]",
"def add_new_doc(self, document, documents_list_length=10000):\n\n try:\n document_dictionary = document.term_doc_dictionary\n # self.countDoc += 1\n for term in document_dictionary.keys():\n if self.stemming == 'y':\n my_stemmer = Stemmer()\n term = my_stemmer.stem_term(term)\n # Update inverted index and posting\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, [\n (document_dictionary[term], document.tweet_id)]] # amount of doc, freq in the doc, doc id.\n\n else:\n self.inverted_idx[term][0] += 1 # amount of doc\n self.inverted_idx[term][1].append((document_dictionary[term],\n document.tweet_id)) # freq in the doc # doc id\n\n if term not in self.postingDict.keys():\n self.postingDict[term] = [(document.tweet_id, document_dictionary[term])]\n else:\n self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n # self.countTweet -= 1\n\n if document.tweet_id not in self.tweet_dict.keys():\n self.tweet_dict[document.tweet_id] = [[term, document_dictionary[term]], 1,\n 0] # [term,freq in tweet], amount of unique terms in tweet, amount of terms in tweet\n elif document_dictionary[term] > self.tweet_dict[document.tweet_id][0][\n 1]: # tweet exist, compering between freq in two terms\n if self.tweet_dict[document.tweet_id][0][\n 1] == 1: # before change term check if the last term is unique\n self.tweet_dict[document.tweet_id][\n 1] += 1 # last term is unique: add to the amount of uniqe terms in tweet\n self.tweet_dict[document.tweet_id][0] = [term,\n document_dictionary[term]] # change between the terms\n self.tweet_dict[document.tweet_id][2] += 1\n elif document_dictionary[term] == 1: # tweet exist, not most common, check if unique\n self.tweet_dict[document.tweet_id][1] += 1\n self.tweet_dict[document.tweet_id][2] += 1\n except:\n # print('problem in indexer : add_new_doc')\n # print(traceback.print_exc())\n pass",
"def analyze_text(self, text, doc_num=None):\n doc_words = frozenset(x[0] for x in text)\n top_ids_in_doc = self.relevant_ids.intersection(doc_words)\n for word_id in top_ids_in_doc:\n self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)",
"def get_terms(document):\n q = get_mapped(document)\n tokens = tockenizer(q)\n terms = analizer(tokens)\n\n return terms",
"def __call__(self, doc: Doc):\n # Find matches in doc\n matches = self.matcher(doc)\n\n # If none are found then return None\n if not matches:\n return doc\n\n for match_id, start, end in matches:\n predicate = self.nlp.vocab.strings[match_id]\n\n # if the predicate is in the list where the hypernym is last, else hypernym is first\n if predicate in self.last:\n hypernym = doc[end - 1]\n hyponym = doc[start]\n else:\n # An inelegent way to deal with the \"such_NOUN_as pattern\"\n # since the first token is not the hypernym.\n if doc[start].lemma_ == \"such\":\n start += 1\n hypernym = doc[start]\n hyponym = doc[end - 1]\n\n hypernym = self.find_noun_compound_head(hypernym)\n hyponym = self.find_noun_compound_head(hyponym)\n\n # For the document level, we expand to contain noun phrases.\n hypernym_extended = self.expand_to_noun_compound(hypernym, doc)\n hyponym_extended = self.expand_to_noun_compound(hyponym, doc)\n\n doc._.hearst_patterns.append(\n (predicate, hypernym_extended, hyponym_extended)\n )\n\n for token in hyponym.conjuncts:\n token_extended = self.expand_to_noun_compound(token, doc)\n if token != hypernym and token is not None:\n doc._.hearst_patterns.append(\n (predicate, hypernym_extended, token_extended)\n )\n\n return doc",
"def __parse_docs(self, docs, analyses=True):\n # iter over docs\n for i, doc in enumerate(docs):\n _meta = doc.attrib['title']\n # iter over examples in *doc*\n for snip in doc.getchildren()[1:]:\n _text = str()\n _idx = 0\n _target_idxs = list()\n _ana = list()\n # iter over words in cur example\n for word in snip.getchildren():\n if word.tag == 'text':\n _text += word.text\n _idx += len(word.text)\n \n if len(word.attrib) > 0:\n _text += word.attrib['text']\n # process target\n if word.attrib.get('target') is not None:\n _target_idxs.append((_idx, _idx + len(word.attrib['text'])))\n if analyses:\n _ana.append(self.__get_ana(word))\n \n _idx += len(word.attrib['text'])\n \n if _target_idxs:\n for i, ixs in enumerate(_target_idxs):\n if analyses:\n yield _text, ixs, _meta, _ana[i]\n else:\n yield _text, ixs, _meta, _ana\n else:\n continue",
"def postprocess_docs(self, doc_scores, docs, input_strings, add_eos, prefix, print_docs=False):\n\n def cat_input_and_doc(doc_score, domain, entity_name, doc_title, doc_text, input_string, add_eos, prefix, print_docs=False):\n # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation\n # TODO(piktus): better handling of truncation\n if doc_title.startswith('\"'):\n doc_title = doc_title[1:]\n if doc_title.endswith('\"'):\n doc_title = doc_title[:-1]\n if prefix is None:\n prefix = \"\"\n if entity_name is None:\n entity_name = \"*\"\n suffix = self.generator_tokenizer.eos_token if add_eos else \"\"\n out = (\n prefix + domain + self.config.title_sep + entity_name + self.config.title_sep + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string + suffix\n ).replace(\" \", \" \")\n if print_docs:\n logger.info(\"{} {}\".format(doc_score, out))\n return out\n\n rag_input_strings = [\n cat_input_and_doc(\n doc_scores[i][j],\n docs[i][j]['domain'],\n docs[i][j]['entity_name'],\n docs[i][j]['doc']['title'],\n docs[i][j]['doc']['body'],\n input_strings[i],\n add_eos,\n prefix,\n print_docs,\n )\n for i in range(len(docs))\n for j in range(self.n_docs)\n ]\n\n contextualized_inputs = self.generator_tokenizer.batch_encode_plus(\n rag_input_strings,\n max_length=self.config.max_combined_length,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=False,\n ).to(doc_scores.device)\n\n return contextualized_inputs[\"input_ids\"], contextualized_inputs[\"attention_mask\"]",
"def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)",
"def make_vocab_from_docs(docs):\n vocab_words=set()\n for doc in docs:\n doc=doc.lower()\n doc=re.sub(r'-',' ',doc)\n doc=re.sub(r' +',' ',doc) # turn multiple spaces into a single space\n doc=re.sub(r'[^a-z ]','',doc) # remove anything that is not a-z or space\n words=set(doc.split())\n vocab_words=vocab_words.union(words)\n vocab=dict(zip(vocab_words,range(len(vocab_words))))\n return vocab",
"def trainInternal():\n\n con_counts = Counter()\n deflike = Counter()\n\n for record in records:\n data = [re.split(\"\\t\", d) for d in re.split(\"\\n\", record)]\n tokens, tags = zip(*data)\n\n for i, token in enumerate(tokens):\n denom = len(token)\n for indices, f in fqs(token, 0.5): #perform analysis on one word at a time\n context, numer = internalContext(indices, token)\n if tags[i] != \"O\": #only want the named entities\n deflike[context] += f * numer/denom #need to normalize by word length\n con_counts[context] += f * numer/denom\n\n deflike = Counter({context: deflike[context]/con_counts[context] for context in deflike}) #perform division on each entry\n\n return deflike"
] | [
"0.6524535",
"0.6303925",
"0.63031244",
"0.6283015",
"0.62118506",
"0.61696297",
"0.6147306",
"0.6121404",
"0.610831",
"0.61022097",
"0.6042071",
"0.6039818",
"0.59821063",
"0.5974946",
"0.5971",
"0.5961817",
"0.58901966",
"0.58670616",
"0.5834076",
"0.58197343",
"0.58094156",
"0.5794027",
"0.5781877",
"0.5761632",
"0.57606167",
"0.57583195",
"0.5749307",
"0.572466",
"0.57217175",
"0.5717187"
] | 0.7074772 | 0 |
returns `top_n` keywords for a list of articles. keywords are returned as (keyword, score) tuples. | def keywords(articles, top_n=25):
# compute term idfs
token_docs = [lemma_tokenize(clean(a.text)) for a in articles]
local_term_idf = IDF(token_docs)
token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)
titles = [a.title for a in articles]
title_tokens = [lemma_tokenize(clean(t)) for t in titles]
term_counts = defaultdict(int)
for doc in token_docs:
for t in set(doc):
if t:
term_counts[t] += 1
title_terms = set()
for title_tks in title_tokens:
title_terms = title_terms | set(title_tks)
for ph in phrases:
if any(ph in title.lower() for title in titles):
title_terms.add(ph)
# Score terms
term_scores = []
for t, count in term_counts.items():
# Ignore numbers, they are very specific to a particular event and
# introduce noise
try:
float(t)
continue
except ValueError:
# TODO This is a troublesome token, not sure why it's not filtered out by
# IDF. needs more investigation
if t == 'n\'t':
continue
score = count * (global_term_idf[t] - local_term_idf[t])
if t in title_terms:
score *= 1.5
term_scores.append((t, score))
return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top_keywords(urls, count=10):\n try:\n res = Counter()\n for url in urls:\n res += Counter(get_keyword_dict(url))\n return [w[0] for w in res.most_common(count)]\n except:\n print('Error finding top keywords')",
"def extract_keywords(article_list, n=10):\n vectorizer = TfidfVectorizer()\n tfidf = vectorizer.fit_transform(article_list)\n words = vectorizer.get_feature_names()\n # check N > total_words_length or not\n maxn = tfidf.shape[1] if tfidf.shape[1] < n else n\n weights = tfidf.toarray()\n # sort by decrease order\n indices = map(lambda w: np.argsort(-w)[:maxn], weights)\n keywords = [list(map(lambda i: words[i], indy)) for indy in indices]\n return keywords",
"def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict",
"def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]",
"def get_relevant_topics(model, keywords, topn=None, weight_threshold=None):\n if topn is None and weight_threshold is None:\n raise ValueError('One of topn or weight_threshold required')\n topic_term = model.get_topics() #topic term matrix of weights num_topics x num terms\n keywords = np.array(model.id2word.doc2idx(keywords)) #makes keywords into id format\n relevant_topics = []\n i= 0\n for topic in topic_term:\n if topn is not None:\n top = np.argsort(topic)[-topn:]\n if pd.Series(keywords).isin(top).any():\n relevant_topics.append(i)\n else:\n eligible = np.argwhere(topic > weight_threshold)\n if pd.Series(keywords).isin(eligible).any():\n relevant_topics.append(i)\n\n i+=1\n return relevant_topics",
"def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]",
"def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords",
"def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result",
"def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]",
"def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]",
"def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words",
"def keywords(text):\r\n from operator import itemgetter # for sorting\r\n text = split_words(text)\r\n numWords = len(text) # of words before removing blacklist words\r\n text = [x for x in text if x not in stopWords]\r\n freq = Counter()\r\n for word in text:\r\n freq[word] += 1\r\n\r\n minSize = min(10, len(freq))\r\n keywords = tuple(freq.most_common(minSize)) # get first 10\r\n keywords = dict((x, y) for x, y in keywords) # recreate a dict\r\n\r\n for k in keywords:\r\n articleScore = keywords[k]*1.0 / numWords\r\n keywords[k] = articleScore * 1.5 + 1\r\n\r\n keywords = sorted(keywords.iteritems(), key=itemgetter(1))\r\n keywords.reverse()\r\n return dict(keywords)",
"def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]",
"def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]",
"def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]",
"def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked",
"def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]",
"def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results",
"def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")",
"def top_terms(self, nterms):\n return self.sql_session.query(Term)\\\n .filter(Term.term != '*')\\\n .order_by(desc(Term.relevance))[:nterms]",
"def filter_top_n_words(topic_words_dict, n, word_list):\n # First remove any redundant words in word_list\n words = set(word_list)\n # Now get the intersection with words, that appear as keys in the dict\n topic_words_intersect = set(topic_words_dict.keys()).intersection(words)\n # Now get the words with their scores, sort descending for the scores\n # and return the first n words:\n score_wordlist = [(x, topic_words_dict[x]) for x in topic_words_intersect]\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]",
"def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops",
"def textrank(sentences, top_n, stopwords=None):\n S = build_similarity_matrix(sentences, stopwords) \n sentence_ranking = page_rank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranking), key=lambda item: -item[1])]\n selected_sentences = sorted(ranked_sentence_indexes[:top_n])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary",
"def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences",
"def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()",
"def get_relevant_documents(self, n_top_hits: Optional[int] = 10) -> List[str]:\n search_string_clean = [self.clean_document(self.search_string)]\n q_vec = self.vectorizer.transform(search_string_clean).toarray().reshape(self.df_tdm.shape[0], )\n sim = {} # Calculate the similarity\n for i in range(n_top_hits):\n print(i)\n sim[i] = np.dot(self.df_tdm.loc[:, i].values, q_vec) / np.linalg.norm(\n self.df_tdm.loc[:, i]) * np.linalg.norm(q_vec)\n # Sort the values\n sim_sorted = sorted(sim.items(), key=lambda item: item[1],\n reverse=True) # Print the articles and their similarity values\n for k, v in sim_sorted:\n if v != 0.0:\n self.search_results.append(self.documents_names[k])\n # print(docs[k])\n return self.search_results",
"def get_top_tags(tags):\n d = {i: tags.count(i) for i in tags if tags.count(i)}\n return ((k, d[k]) for k in sorted(d, key=d.get, reverse=True)[:TOP_NUMBER])",
"def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')"
] | [
"0.78360444",
"0.7450141",
"0.6965421",
"0.694177",
"0.6873025",
"0.68340343",
"0.6781121",
"0.6672731",
"0.66101223",
"0.6608954",
"0.659795",
"0.6593758",
"0.6579277",
"0.6569332",
"0.6565945",
"0.65614015",
"0.64829606",
"0.6468362",
"0.64578843",
"0.6425248",
"0.63832724",
"0.6370794",
"0.6320914",
"0.6286867",
"0.62842464",
"0.62804496",
"0.6278238",
"0.6277115",
"0.6266947",
"0.6263113"
] | 0.826135 | 0 |
return current thread name. | def get_threadname():
cur_thread = threading.current_thread()
return cur_thread.name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_thread_name(self) -> Optional[str]:\n return self.thread_name",
"def get_name(thread_id):\r\n for thread in threading.enumerate():\r\n if thread.ident == thread_id:\r\n return thread.name",
"def get_uname(self):\n return Server.t_usernames.get(threading.get_ident())",
"def task_name(self) -> str:\n return self._task_name",
"def get_process_name(self):\n\n return self._args.t",
"def name_thread( cls, id, ):\n if cls.main_thread_id is None:\n y= 1/0 # cheap exception when main_thread not set up\n\n if id == cls.main_thread_id:\n ret = f\"Main\"\n else:\n ret = f\"Helper\"\n\n return ret",
"def curr_name(self):\n return self.name_stack[-1]",
"def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)",
"def name(self):\n return self._job",
"def get_name():\n return __name__",
"def get_job_name(self) -> Text:\n return self._job_name",
"def job_name(self) -> str:\n return pulumi.get(self, \"job_name\")",
"def getTaskName(self):\n return self._taskName",
"def get_current_window_name():\n\n hwnd = get_current_window_hwnd()\n length = GetWindowTextLength(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n GetWindowText(hwnd, buff, length + 1)\n\n return buff.value",
"def get_name(self):\n\n\t\treturn self.__name",
"def getname(self):\n return self.__class__.__name__",
"def extract_current_thread(maybe_thread_str: str) -> Optional[str]:\n match = CURRENT_THREAD_RE.search(maybe_thread_str)\n if match is not None:\n return match.group(1)\n return None",
"def get_name(self):\n\t\treturn self.__name",
"def thread_id(self):\n return self._thread_id",
"def GetCurrentFuncName():\n return sys._getframe(1).f_code.co_name",
"def task_name(self):\n pass",
"def get_self_name(self) -> str:\n return self.__class__.__name__.lower()",
"def get_active_name(self):\n return self.get_name()",
"def get_name(self):\n return self.__name",
"def get_name(self):\n return self.__name",
"def get_name(self):\n return self.__name",
"def current_method_name():\n return inspect.currentframe().f_back.f_code.co_name",
"def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]",
"def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)",
"def _get_thread_id() -> int:\n # NOTICE:\n # we do not use threading.get_ident() to identify a thread, as Python recycles these identifiers\n return id(threading.current_thread())"
] | [
"0.80912495",
"0.77005726",
"0.69585633",
"0.6890283",
"0.6844813",
"0.67890924",
"0.674684",
"0.670793",
"0.6689467",
"0.668167",
"0.6630126",
"0.6606617",
"0.6594408",
"0.6587215",
"0.65800315",
"0.65798545",
"0.6551568",
"0.6512428",
"0.6499626",
"0.648431",
"0.6480289",
"0.64627475",
"0.6462688",
"0.64577925",
"0.64577925",
"0.64577925",
"0.6453045",
"0.6444775",
"0.6438291",
"0.64344996"
] | 0.90061885 | 0 |
remove duplicate keys while preserving order. optionally return values. | def find_uniq_preserve_order(orig_keys, orig_values=None):
seen = {}
keys = []
values = []
for i, item in enumerate(orig_keys):
if item in seen:
continue
seen[item] = 1
keys.append(item)
if orig_values:
values.append(orig_values[i])
return keys, values | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_duplicate(x):\n return list(dict.fromkeys(x))",
"def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))",
"def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))",
"def removeDups(lst):\n\n return list(dict.fromkeys(lst) )",
"def duplicates(self, x):\n return list(dict.fromkeys(x))",
"def dedup(iterable):\n return iter(OrderedDict.fromkeys(iterable))",
"def deduped(items):\n \n return list(set(items))",
"def delDoublon(values):\n\treturn list(set(values))",
"def remove_duplicate_dicts(data: List[dict]) -> List[dict]:\n return [dict(y) for y in set(tuple(x.items()) for x in data)]",
"def dedupe(items):\n seen = set()\n for item in items:\n if item not in seen:\n yield item\n seen.add(item)",
"def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data",
"def unique(self, key, lst=None):\n d = self.find(key, lst)\n vals = set(d.values())\n return sorted(list(vals))",
"def _unique(iterable):\n return list(dict.fromkeys(iterable))",
"def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)",
"def remove_duplicates(words):\n\n no_duplicates = {}\n\n # adding all words to a dictionary, since dictionaries cannot contain duplicate keys.\n for word in words:\n no_duplicates[word] = None\n\n return no_duplicates.keys()",
"def remove_duplicates(self):\n cur = self.head\n prev = None\n\n dup_values = dict()\n\n while cur:\n if cur.data in dup_values:\n # Remove node:\n prev.next = cur.next\n else:\n # Have not encountered element before.\n dup_values[cur.data] = 1\n prev = cur\n cur = prev.next",
"def uniq(seq: Iterable):\n seen, result = {}, []\n for item in seq:\n if item in seen:\n continue\n seen[item] = None\n result.append(item)\n return result",
"def __removeDuplicateDictsFromList(self, listOfDicts: List[Dict[str, str]]) -> List[Dict[str, str]]:\n return list({frozenset(item.items()): item for item in listOfDicts}.values())",
"def keys_sorted_by_value_unique(d, **sort_kwargs):\n values = d.values()\n values.sort(**sort_kwargs)\n i = inverse(d)\n return [i[val] for val in values]",
"def find_remove_duplicates(list_of_values):\r\n output = []\r\n seen = set()\r\n for value in list_of_values:\r\n if value not in seen:\r\n output.append(value)\r\n seen.add(value)\r\n return output",
"def get_duplicates(lines):\n duplicates = []\n keys_checked = {}\n for line in lines:\n key, value = get_key_and_value_from_line(line=line)\n if key:\n if key in keys_checked:\n duplicates.append(u\"{key}={value}\".format(key=key, value=value))\n translation_in_list = u\"{key}={value}\".format(key=key, value=keys_checked[key])\n if translation_in_list not in duplicates:\n duplicates.append(translation_in_list)\n else:\n keys_checked[key] = value\n return duplicates",
"def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it",
"def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))",
"def deleteduplicates(iterable):\n seen = []\n for x in iterable:\n if x not in seen:\n yield x\n seen.append(x)",
"def _remove_duplicates(seq):\n d = {}\n for item in seq:\n item = item.lower()\n if item not in d:\n d[item] = True\n yield item",
"def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())",
"def uniq(val, key=None):\n if not isinstance(val, list):\n return val\n if key is None:\n try:\n return list(set(val))\n except TypeError:\n pass\n keys = []\n values = []\n for value in val:\n try:\n thiskey = value[key]\n except:\n thiskey = repr(value)\n if thiskey not in keys:\n keys.append(thiskey)\n values.append(value)\n return values",
"def remdup_preserve_order(lst):\n val = set()\n val_add = val.add\n return [x for x in lst if not ((x in val) or val_add(x))]",
"def dedupe_list(input):\n return list(set(input))",
"def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items"
] | [
"0.75429296",
"0.72203714",
"0.7036629",
"0.7017405",
"0.6760333",
"0.6731918",
"0.6579014",
"0.65164787",
"0.64843994",
"0.6472747",
"0.6425572",
"0.6410459",
"0.6388204",
"0.6381012",
"0.63454247",
"0.6309064",
"0.62704206",
"0.62684274",
"0.6259605",
"0.61603624",
"0.6158878",
"0.6149532",
"0.61385286",
"0.61254144",
"0.61216986",
"0.60641205",
"0.6054935",
"0.60513294",
"0.60300004",
"0.6008779"
] | 0.775089 | 0 |
resets server figure by deleting lines and clearing legend. | def _handle_reset(self):
stream_data = self.server.stream_data
# remove lines from graph, and reset legends
for name in stream_data:
stream_data[name]['line'].remove()
for name in self.server.axes:
self.server.axes[name].legend([]) # TODO: find a better way.
stream_data = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n\n self.fig.clear()\n self.ax = self.fig.add_subplot(111)\n self.hasLegend.set(False)\n self.title(Graph.default_title)\n # Lines is a list of DataSet objects. The user should take care to make\n # DataSet names unique, as there is no error checking done by Graph. \n # If a DataSet line is deleted by its formal name, Graph will delete the\n # first line in the list that matches the name.\n self.lines = {}\n self.line_counter = 1",
"def plot_clear():\n plt.cla()",
"def reset(self):\n self.G = nx.Graph()\n self.form.plot_canvas.plot(self.G)",
"def clear_figure(self):\n self.figure.clf()",
"def clear(self):\n self._fig = go.Figure()",
"def reset_graph(self):\n self.sick_per_timestep = []\n self.steps = []\n self.ax.clear()\n self.ax.set_xlabel(self.xlabel)\n self.ax.set_ylabel(self.ylabel)\n self.ax.set_title(self.title)",
"def clear(self):\n self._plt.clear()\n self._layer_items = {}",
"def __del__(self):\n pyplot.clf()",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()",
"def update_figure(self):\n # if number of kinetics in model did not change\n # update just last lines\n if self.N_lines - 1 == len(self.model.spectra.keys()) * 2:\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_first()\n # delete all and redraw\n else:\n n = int((self.N_lines - 1) / 2)\n for _ in range(n):\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_total()\n\n self.dataplot.relim()\n\n self.dataplot.autoscale_view(True, True, True)\n\n self.draw()",
"def clear(self):\n\n # Clear\n self.axes.cla()\n try:\n self.figure.clf()\n except KeyError:\n FlatCAMApp.App.log.warning(\"KeyError in MPL figure.clf()\")\n\n # Re-build\n self.figure.add_axes(self.axes)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # Re-draw\n self.canvas.draw_idle()",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def clear(self):\n self._plots[:] = []",
"def reset(self):\n self.dims.clear()\n self.xlabels.clear()\n self.annotators.clear()\n self._figTitle = None\n self.tbmTitle = None\n self._isSubplot = False\n self._universal_xlabel = False\n self._plotter = None\n self.Nsp = 0",
"def clear_visualization(self) -> None:\n if self._drawing_handle is not None:\n sim.simAddDrawingObjectItem(self._drawing_handle, None)",
"def clearAllPlots(self):\n self.dataPoints = [[{'x': 0, 'y': 0}]]\n self.sendPreviousDataPoints()",
"def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])",
"def reset_graph(self):\n log.debug(\"reset graph\")\n self.auto_scale = True\n self.select_tool.action.setChecked(True)\n\n dgplot = self.main_curve_dialog.get_plot()\n dgplot.do_autoscale()\n\n dgimage = self.main_curve_dialog.get_plot()\n dgimage.do_autoscale()",
"def clear_legends(self):\n self.fig.legends[:] = []\n for ax in self.axes.flat:\n legend = ax.get_legend()\n if legend:\n legend.remove()\n return self",
"def reset(self):\r\n self.myOutputs = list()\r\n self.myPlots = list()\r\n self.pause = 0\r\n self.doMPL = False\r\n self.graphLabelsX = []\r\n self.graphLabelsY = []\r\n for i in self.xData.iterkeys():\r\n self.xData[i] = []\r\n self.yData[i] = []\r\n self.xyData[i] = []\r\n self.graphs[i] = Gnuplot(debug=0)\r\n self.figures[i] = 0\r\n self.mplFigCount = 0",
"def erase_plot(self, line_position=0):\n self.axplot.lines.pop(line_position).remove\n self.fig.canvas.draw()\n return",
"def resetGraph(self):\n self.colours = [self.uncompletedColor] * self.num_points\n self.setData(pos=self.pos, symbolBrush=self.colours, size=1, symbol=self.symbols, pxMode=False, text=self.text)",
"def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)",
"def redraw_figures(self):\n pass",
"def redraw_figures(self):\n pass",
"def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure",
"def plot_refresh():\n figure.canvas.draw()",
"def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}",
"def clear(self):\n\n # Inform the user\n log.info(\"Clearing the scatter plotter ...\")"
] | [
"0.7666481",
"0.72009104",
"0.7048835",
"0.7034993",
"0.6928158",
"0.6892367",
"0.6811622",
"0.6684505",
"0.664847",
"0.6554894",
"0.65438944",
"0.653004",
"0.65285474",
"0.64676446",
"0.6457668",
"0.6457389",
"0.6455251",
"0.6446467",
"0.64300036",
"0.6376522",
"0.63337445",
"0.62947136",
"0.6294008",
"0.6293521",
"0.62560004",
"0.62560004",
"0.6237085",
"0.62195015",
"0.6203188",
"0.6189872"
] | 0.7724574 | 0 |
Updates the legend for single_axes, listing duplicate labels once. | def _handle_update_legend(self, single_axes):
# lines are bundled with an axes.
# legends are printed per axes.
# line data is in stream_data without reference to axes sets.
# for each current line, get label, get axes
# for unique axes-labels create a list to pass to legend()
artists, labels = single_axes.get_legend_handles_labels()
uniq_labels, uniq_artists = find_uniq_preserve_order(labels, artists)
leg = single_axes.legend(uniq_artists, uniq_labels,
bbox_to_anchor=(0., 0.91, 1., .09),
loc=2, borderaxespad=0.)
for text in leg.get_texts():
text.set_fontsize('small') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def legend (self, **kwargs):\n axes = self.twin_axes or self.axes\n self.mpl_legend = axes.legend (self.mpl_lines, self.labels, **kwargs)",
"def legend(self):\n if self.nplots == 1:\n lax = self.ax\n loff = 0.2\n else:\n lax = self.ax1\n loff = 0.4\n box = lax.get_position()\n\n lax.figure.subplots_adjust(bottom=loff) # make space on bottom for legend\n lax.legend(self.plots, self.labels, loc='upper center', bbox_to_anchor=(0.5, -loff), fancybox=True, shadow=True, ncol=3, prop={'size': 8})",
"def remove_legend_duplicates(figure):\n seen = []\n for n,i in enumerate(figure['data']):\n name = figure['data'][n]['name']\n if name in seen:\n figure.data[n].update(showlegend=False)\n else:\n figure.data[n].update(showlegend=True)\n seen.append(name)",
"def _declare_legend(self):\n self.legend = []\n for idx, name_el in enumerate(self.name_elements):\n if idx is not None and idx in self.axes_idx.to_first.map_idx:\n current_legend = f\"{self.name}_{name_el}\"\n for i in range(self.ocp.n_phases):\n if self.as_states:\n current_legend += f\"-{self.ocp.nlp[i].use_states_from_phase_idx}\"\n if self.as_controls:\n current_legend += f\"-{self.ocp.nlp[i].use_controls_from_phase_idx}\"\n self.legend += [current_legend]",
"def set_legend(ax):\n l = ax.legend()\n plt.setp(l.get_texts(), fontsize=8)",
"def _collect_legend(self) -> Dict[str, Any]:\n legend_labels = dict()\n for ax in self.axes_active:\n for handle, label in zip(*ax.get_legend_handles_labels()): # type: ignore\n if label in legend_labels:\n # TODO: Add warning/exception if color conflict is found\n pass\n else:\n legend_labels[label] = handle\n return legend_labels",
"def plot_one_axes(self, fig_num: int, title: str, y_label: str, labeled: np.ndarray, filled: np.ndarray,\n smoothed: np.ndarray, legend_entries: Dict[str, str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(1, 1)\n labeled_lines = kine_graph_init(ax, labeled, y_label, self.frame_nums, [{'ls': '', 'marker': 'o', 'ms': 2,\n 'fillstyle': 'none', 'mew': 0.5}] * 3)\n ax.set_prop_cycle(None)\n filled_lines = kine_graph_add(ax, filled, self.frame_nums, [{'ls': '-', 'lw': 0.75}] * 3)\n ax.set_prop_cycle(None)\n smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)\n plt.tight_layout()\n fig.suptitle(title, x=0.7)\n fig.legend((labeled_lines[0], smoothed_lines[2], filled_lines[1]),\n (legend_entries['labeled'], legend_entries['smoothed'], legend_entries['filled']),\n ncol=2, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='upper left')\n make_interactive()\n return fig",
"def legend(self, legend):\n\n self.container['legend'] = legend",
"def modify_legend_handles(ax, **kwargs):\r\n hndls, labls = ax.get_legend_handles_labels()\r\n _hndls = []\r\n for h in hndls:\r\n _h = copy(h)\r\n _h.update(kwargs)\r\n _hndls.append(_h)\r\n return _hndls, labls",
"def _plot1d(self, axes: List[Axes], data: DataDict, style: str):\n xname = data.axes()[0]\n x = data.data_vals(xname)\n depnames = data.dependents()\n deps = [data.data_vals(d) for d in depnames]\n hasLabels = False\n\n for i, d in enumerate(depnames):\n if style == 'singlepanel' and len(depnames) > 1:\n ax = axes[0]\n lbl = data.label(d)\n ylbl = None\n hasLabels = True\n else:\n ax = axes[i]\n lbl = None\n ylbl = data.label(d)\n\n if self.plotDataType is PlotDataType.scatter1d:\n fmt = 'o'\n else:\n fmt = 'o-'\n\n ax.plot(x, deps[i], fmt, mfc='None', mew=1, lw=0.5, label=lbl)\n ax.set_xlabel(xname)\n ax.set_ylabel(ylbl)\n\n if style == 'singlepanel' and hasLabels:\n ax.legend(fontsize='small', loc=1)",
"def test_manual_legend(self):\n # Draw a random scatter plot\n random = np.random.RandomState(42)\n\n Ax, Ay = random.normal(50, 2, 100), random.normal(50, 3, 100)\n Bx, By = random.normal(42, 3, 100), random.normal(44, 1, 100)\n Cx, Cy = random.normal(20, 10, 100), random.normal(30, 1, 100)\n\n _, ax = plt.subplots()\n ax.scatter(Ax, Ay, c=\"r\", alpha=0.35, label=\"a\")\n ax.scatter(Bx, By, c=\"g\", alpha=0.35, label=\"b\")\n ax.scatter(Cx, Cy, c=\"b\", alpha=0.35, label=\"c\")\n\n # Add the manual legend\n manual_legend(\n ax, (\"a\", \"b\", \"c\"), (\"r\", \"g\", \"b\"), frameon=True, loc=\"upper left\"\n )\n\n # Assert image similarity\n self.assert_images_similar(ax=ax, tol=0.5)",
"def toggle_span_legend(self, x):\r\n self.konfig.span.set_legend(x)\r\n self.spanGraf.toggle_legend(x)",
"def add_master_legend(mp, exclude_panels=None, loc='upper center', \n exclude_labels=[], **kwargs):\n\n handles, labels = [], []\n\n if 'bbox_to_anchor' not in kwargs:\n kwargs['bbox_to_anchor'] = (0.5, 1.0)\n\n if isinstance(mp, MultiPanel):\n for k, ax in enumerate(mp.grid):\n \n if exclude_panels is not None:\n if k in exclude_panels:\n continue\n \n h, l = ax.get_legend_handles_labels()\n\n for i, lab in enumerate(l):\n if lab in labels:\n continue\n if lab in exclude_labels:\n continue \n\n handles.append(h[i])\n labels.append(l[i])\n\n mp.fig.legend(handles, labels, loc=loc, **kwargs) \n\n else:\n h, l = mp.get_legend_handles_labels()\n\n for i, lab in enumerate(l):\n if lab in labels:\n continue\n if lab in exclude_labels:\n continue\n\n handles.append(h[i])\n labels.append(l[i])\n\n mp.legend(handles, labels, loc=loc, **kwargs) \n\n return mp",
"def plot_one_axes(self, fig_num: int, title: str, y_label: str, raw: np.ndarray, smoothed: np.ndarray,\n legend_entries: Sequence[str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(1, 1)\n raw_lines = kine_graph_init(ax, raw, y_label, self.frame_nums, [{'ls': ':', 'lw': 2}] * 3)\n ax.set_prop_cycle(None)\n smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)\n plt.tight_layout()\n fig.suptitle(title, x=0.7)\n legend_text = ('Raw (' + legend_entries[0] + ')', 'Smoothed (' + legend_entries[1] + ')',\n 'Smoothed (' + legend_entries[2] + ')')\n fig.legend((raw_lines[0], smoothed_lines[1], smoothed_lines[2]), legend_text, ncol=3, handlelength=0.75,\n handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig",
"def make_final_legend():\n fig = plt.figure(figsize=(10, 1))\n me.get_final_graph_legend(fig)\n fig.savefig(\"cumul_shuttle_leg.pdf\")",
"def _draw_legend(self, labels, title=None):\n\n if len(self.pos) < 1:\n print 'Legend can not be plotted for Gleckler, as no data available!'\n return\n\n pmax = max(self.pos.values())\n\n # generate separate figure for legend\n f = plt.figure()\n ax = f.add_subplot(111, frameon=True, aspect='equal', axisbg='grey')\n f.subplots_adjust(bottom=0.25, top=0.75, left=0.25, right=0.75)\n\n for k in labels.keys():\n if k == 1:\n pos = 'top'\n elif k == 2:\n pos = 'bottom'\n elif k == 3:\n pos = 'left'\n elif k == 4:\n pos = 'right'\n else:\n raise ValueError('Can not draw Gleckler legend! Invalid position value! %s' % str(k))\n\n oldval = self.show_value\n self.show_value = False\n self.__plot_triangle(ax, np.random.random(), pos=pos)\n self.show_value = oldval\n ax.set_xticks([])\n ax.set_yticks([])\n\n fontsize = 16\n linewidth = 3\n\n for k in labels.keys():\n if k == 1: # top\n ax.annotate(labels[k], xy=(0.5, 0.9), xycoords='axes fraction', xytext=(0., 1.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 2:\n ax.annotate(labels[k], xy=(0.5, 0.1), xycoords='axes fraction', xytext=(0., -0.3), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 3:\n ax.annotate(labels[k], xy=(0.1, 0.5), xycoords='axes fraction', xytext=(-0.6, 0.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 4:\n ax.annotate(labels[k], xy=(0.9, 0.5), xycoords='axes fraction', xytext=(1.1, 0.8), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n\n if title is not None:\n f.suptitle(title, size=fontsize)\n\n return f",
"def set_legend(self, **lgdkwargs):\n\n if 'loc' not in lgdkwargs.keys(): \n lgdkwargs['loc'] = 'upper right'\n \n if 'scatterpoints' not in lgdkwargs.keys(): \n lgdkwargs['scatterpoints'] = 1 \n\n self.sub.legend(**lgdkwargs) \n \n return None",
"def addLegend(ax, lines, impls, legendPos):\n\n # If there's only one piece of data being plotted, there's no need for a legend\n # since all the parameters will be in the title.\n # Compute the length (in characters) of the longest implementation.\n legendLen = max(list(map(len, impls)))\n if legendLen == 0:\n return\n legendItems = len(impls)\n fontSize = 10 if legendLen < 20 and legendItems <= 4 else 8\n prop = matplotlib.font_manager.FontProperties(size=fontSize)\n if legendPos in (\n \"best\",\n \"upper right\",\n \"upper left\",\n \"lower right\",\n \"lower left\",\n \"right\",\n \"center right\",\n \"center left\",\n \"lower center\",\n \"upper center\",\n \"center\",\n ):\n ax.legend(lines, impls, prop=prop, loc=legendPos)\n elif legendPos == \"below\":\n # Place the legend below the x-axis\n axisShrink = 0.15 if legendItems < 7 else 0.2\n box = ax.get_position()\n newHeight = box.height * (1 - axisShrink)\n ax.set_position([box.x0, box.y0 + box.height - newHeight, box.width, newHeight])\n ax.legend(\n lines,\n impls,\n prop=prop,\n bbox_to_anchor=(0, -0.1),\n borderaxespad=0.0,\n loc=\"upper left\",\n )\n else:\n # Place the legend on the right\n # Shink current axis by 15% to make room for the legend on the right.\n # If we were smarter we'd work out how much we need to shrink based on the\n # size of the legend box and so on, but this is OK for now.\n # See how much we think we need to shrink to fit in the legend\n axisShrink = 0.15 if legendLen < 20 else 0.2\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * (1 - axisShrink), box.height])\n ax.legend(\n lines,\n impls,\n prop=prop,\n bbox_to_anchor=(1.02, 1),\n borderaxespad=0.0,\n loc=\"upper left\",\n )",
"def add_to_legend(axes, text, **kwargs):\n text = mpatches.Patch(color='none', label=text)\n handles, labels = axes.get_legend_handles_labels()\n if 'handles' in kwargs:\n handles.append(kwargs.pop('handles'))\n handles.append(text)\n axes.legend(\n handles=handles,\n prop=kwargs.pop('prop', {'family': 'monospace'}),\n **kwargs\n )",
"def legend(self, include: bool = None):\n \n if include == None:\n if self.hasLegend.get() == True:\n include = True\n else:\n include = False\n \n if include == True:\n labels = []\n for line in self.lines.values():\n labels.append(line.name)\n self.ax.legend(labels).set_draggable(True)\n self.hasLegend.set(True)\n else:\n self.ax.legend().remove() # This line complains to the console if no legend exists when it's removed\n self.hasLegend.set(False)\n self.canvas.draw()",
"def clear_legends(self):\n self.fig.legends[:] = []\n for ax in self.axes.flat:\n legend = ax.get_legend()\n if legend:\n legend.remove()\n return self",
"def toggle_minutni_legend(self, x):\r\n self.konfig.minutni.set_legend(x)\r\n self.minutniGraf.toggle_legend(x)",
"def plot_legend(ax):\n\tlines = 4 * [None]\n\tcolors = [\"black\", \"deepskyblue\", \"lime\", \"crimson\"]\n\tlabels = [r\"Constant $y_\\text{Sr}^\\text{CC}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto 1 - e^{-kZ}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto Z$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC}$ = 0\"]\n\tfor i in range(4):\n\t\tlines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()[\"white\"],\n\t\t\tlabel = labels[i])[0]\n\tleg = ax.legend(loc = visuals.mpl_loc()[\"upper left\"], ncol = 1,\n\t\tbbox_to_anchor = (0.0, 0.99), frameon = False, handlelength = 0)\n\tfor i in range(4):\n\t\tlines[i].remove()\n\t\tleg.get_texts()[i].set_color(colors[i])",
"def decorate(**options):\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()",
"def test_legend():\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n\n for seed in [1, 9, 6, 8]:\n ax.scatter(*get_scatter_points(11, seed=seed), alpha=0.5, label=seed)\n\n ax.legend()",
"def add_labels_to_legend(_label_to_color, _ax=None):\n\tif _ax is None:\n\t\t_ax = plt.gca()\n\n\thandles, labels = _ax.get_legend_handles_labels()\n\n\tpatches = []\n\tfor label, color in _label_to_color.items():\n\t\ttry:\n\t\t\tname = dataset.class_name(label)\n\t\texcept KeyError:\n\t\t\tname = 'Label'\n\t\tpatches.append(mpatches.Patch(color=color, alpha=0.5, label=name + ' (%d)' % label))\n\t_ax.legend(handles=handles + patches)",
"def set_facetgrid_legend(facet_grid, **kwargs) -> None:\n #from matplotlib.collections import PathCollection\n legend_data = dict()\n for ax in facet_grid.axes.flat:\n handles, labels = ax.get_legend_handles_labels()\n for label, h in zip(labels, handles):\n #if type(h) is PathCollection:\n # From inspecting facet_grid._legend_data in cases where some labels\n # pointed to empty lines (the phenotype in the case where things\n # weren't behaving as I wanted), the empty lines had this empty\n # facecolor.\n facecolor = h.get_facecolor()\n if len(facecolor) == 0:\n continue\n #else:\n # print(type(h))\n # import ipdb; ipdb.set_trace()\n\n if label in legend_data:\n # TODO maybe assert a wide variety of properties of the\n # matplotlib.collections.PathCollection objects are the same\n # (line width, dash, etc)\n past_facecolor = legend_data[label].get_facecolor()\n # TODO TODO TODO fix! this is failing again 2020-08-25\n # (after re-installing requirements.txt, when running\n # kc_mix_analysis.py w/ no just -j arg)\n assert np.array_equal(facecolor, past_facecolor), \\\n f'{facecolor} != {past_facecolor}'\n else:\n legend_data[label] = h\n\n facet_grid.add_legend(legend_data, **kwargs)",
"def multi_plot(x, y, y_legend=[] ,title=\"Title\", xlab=\"x-axis\", ylab=\"y-axis\"):\n\n if y_legend==[]:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], linewidth=2)\n else:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], label=y_legend[i], linewidth=2)\n plt.legend(prop={'size': 12}) #legend details\n\n plt.title(title)\n plt.xlabel(xlab)\n plt.ylabel(ylab)",
"def _update_axislabels(self, x='x', **kwargs):\n if x not in 'xy':\n return\n # Update label on this axes\n axis = getattr(self, x + 'axis')\n axis.label.update(kwargs)\n kwargs.pop('color', None)\n\n # Defer to parent (main) axes if possible, then get the axes\n # shared by that parent\n ax = self._panel_parent or self\n ax = getattr(ax, '_share' + x) or ax\n\n # Apply to spanning axes and their panels\n axs = [ax]\n if getattr(ax.figure, '_span' + x):\n s = axis.get_label_position()[0]\n if s in 'lb':\n axs = ax._get_side_axes(s)\n for ax in axs:\n getattr(ax, x + 'axis').label.update(kwargs) # apply to main axes\n pax = getattr(ax, '_share' + x)\n if pax is not None: # apply to panel?\n getattr(pax, x + 'axis').label.update(kwargs)",
"def saliva_plot_combine_legend(fig: plt.Figure, ax: plt.Axes, saliva_types: Sequence[str], **kwargs):\n legend_loc = kwargs.get(\"legend_loc\", \"upper center\")\n legend_size = kwargs.get(\"legend_size\", \"small\")\n rect = kwargs.get(\"rect\", (0, 0, 1.0, 0.95))\n labels = [ax.get_legend_handles_labels()[1] for ax in fig.get_axes()]\n\n if all(len(label) == 1 for label in labels):\n # only one group\n handles = [ax.get_legend_handles_labels()[0] for ax in fig.get_axes()]\n handles = [h[0] for handle in handles for h in handle]\n labels = [_saliva_plot_params.get(\"legend_title\")[b] for b in saliva_types]\n ncol = len(handles)\n fig.legend(\n handles,\n labels,\n loc=legend_loc,\n ncol=ncol,\n prop={\"size\": legend_size},\n )\n else:\n handles = [ax.get_legend_handles_labels()[0] for ax in fig.get_axes()]\n handles = [h[0] for handle in handles for h in handle]\n labels = [ax.get_legend_handles_labels()[1] for ax in fig.get_axes()]\n labels = [\n \"{}: {}\".format(_saliva_plot_params.get(\"legend_title\")[b], \" - \".join(label))\n for b, label in zip(saliva_types, labels)\n ]\n ncol = len(handles)\n\n fig.legend(\n list(zip(handles[::2], handles[1::2])),\n labels,\n loc=legend_loc,\n ncol=ncol,\n numpoints=1,\n handler_map={tuple: HandlerTuple(ndivide=None)},\n prop={\"size\": legend_size},\n )\n ax.legend().remove()\n fig.tight_layout(pad=1.0, rect=rect)"
] | [
"0.663479",
"0.6562808",
"0.6533729",
"0.6064412",
"0.60395277",
"0.60051537",
"0.597655",
"0.597276",
"0.59519356",
"0.5942864",
"0.5882607",
"0.57779515",
"0.57668364",
"0.5744277",
"0.5723438",
"0.5700704",
"0.57001144",
"0.56860155",
"0.5684757",
"0.5676725",
"0.5665561",
"0.56645167",
"0.56515515",
"0.56415576",
"0.56410706",
"0.5634117",
"0.562058",
"0.5573123",
"0.5559093",
"0.5541701"
] | 0.8137135 | 0 |
creates a line on the given axes using style_args. returns line_name | def _handle_create_line(self, axes, style_args):
stream_data = self.server.stream_data
# sample data for initial create
x_data = numpy.arange(0, 2, 1)
y_data = numpy.array([0]*2)
line, = axes.plot(x_data, y_data, '-', **style_args)
# NOTE: client may set 'label'
line_name = style_args['label']
if line_name in stream_data:
# preserve old line data with a new name
stream_data[line_name+"_old_"+timestamp()] = stream_data[line_name]
# always start with no data for the new line
stream_data[line_name] = {'y': [], 'line': line, 'last_len': 0}
if FLAGS.timestamp:
stream_data[line_name]['x'] = []
return line_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line",
"def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)",
"def create_line(self, x1, y1, x2, y2, style=None, parent=None):\n attrs = {'d': 'M %5f %5f L %5f %5f' % (x1, y1, x2, y2)}\n return self.create_path(attrs, style, parent)",
"def my_simple_line(master, name, r, c, rsp, csp, px, py) -> object:\n line = tk.Label(master=master, text=name, anchor='w')\n line.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n return line",
"def setAxisLineColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'LINE', axes)",
"def DrawLine(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLine(*args, **kwargs)",
"def proxy_line(**kwargs):\r\n return matplotlib.lines.Line2D(range(1), range(1), **kwargs)",
"def draw_line(tick_length, tick_label=''):\n line = \"_\" * tick_length\n if tick_label:\n line += ' ' + tick_label\n print(line)",
"def make_line(self, name, selection, prescale, postscale, **kwargs):\n # Only create the line with positive pre- and postscales\n # You can disable each line by setting either to a negative value\n if prescale > 0 and postscale > 0:\n line = StrippingLine(\n name,\n selection=selection,\n prescale=prescale,\n postscale=postscale,\n **kwargs\n )\n self.registerLine(line)\n return line\n else:\n return False",
"def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)",
"def _format_line_axes_legends(*args, **kw):\n #format all axes\n for ax in args:\n #apply legend formatting\n leg = ax.get_legend()\n if(leg):\n rec = leg.get_frame()\n if(not _leg_edge_on):\n rec.set_edgecolor('white')\n #apply axis formatting\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width*0.7, box.height])\n ax.set_frame_on(_ax_frameon)\n if(not _ax_ticks_on):\n ax.tick_params(axis = 'both', which = 'both',\n bottom = 'off', top = 'off', left = 'off', right = 'off')",
"def line(l, color='k', **kwargs):\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)",
"def add_line(self, axis, value):\n\n if axis == 'x':\n self.axes.axvline(value)\n elif axis == 'y':\n self.axes.axhline(value)\n else:\n raise ValueError(\"Axis must be one of X or Y.\")",
"def _create_line_plot(experiment_param, nus, norms, ax, subtitle):\n for name in sorted(norms):\n errors = [experiment_param[nu][name] for nu in nus]\n ax.plot(nus, errors, label=name)\n\n ax.legend()\n ax.set_xticks(nus[1::2])\n ax.set_xticklabels(nus[1::2])\n ax.set_ylabel('Average error (%)', fontsize=15)\n ax.set_ylim([0,5])\n ax.set_title('Estimating {}\\n'.format(subtitle), fontsize=15)",
"def plot_line(self,x_0,y_0,x_1,y_1,col=\"black\",line_width=1,line_type=\"solid\"):\n self._fig.add_shape(\n go.layout.Shape(\n type=\"line\",\n x0=x_0,\n y0=y_0,\n x1=x_1,\n y1=y_1,\n line=dict(\n color=col,\n width=line_width,\n dash=line_type\n )\n )\n )",
"def DrawLine(*args, **kwargs):\n return _gdi_.DC_DrawLine(*args, **kwargs)",
"def _apply_lines(\n self, *args,\n stack=None, stacked=None,\n negpos=False, negcolor=None, poscolor=None,\n color=None, colors=None,\n linestyle=None, linestyles=None,\n lw=None, linewidth=None, linewidths=None,\n **kwargs\n):\n # Parse input arguments\n method = kwargs.pop('_method')\n name = method.__name__\n stack = _not_none(stack=stack, stacked=stacked)\n colors = _not_none(color=color, colors=colors)\n linestyles = _not_none(linestyle=linestyle, linestyles=linestyles)\n linewidths = _not_none(lw=lw, linewidth=linewidth, linewidths=linewidths)\n args = list(args)\n if len(args) > 3:\n raise ValueError(f'Expected 1-3 positional args, got {len(args)}.')\n if len(args) == 3 and stack:\n warnings._warn_proplot(\n f'{name}() cannot have three positional arguments with stack=True. '\n 'Ignoring second argument.'\n )\n if len(args) == 2: # empty possible\n args.insert(1, np.array([0.0])) # default base\n\n # Support \"negative\" and \"positive\" lines\n x, y1, y2, *args = args # standardized\n if not negpos:\n # Plot basic lines\n kwargs['stack'] = stack\n if colors is not None:\n kwargs['colors'] = colors\n result = method(self, x, y1, y2, *args, **kwargs)\n objs = (result,)\n else:\n # Plot negative and positive colors\n _check_negpos(name, stack=stack, colors=colors)\n y1neg, y2neg = _mask_array(y2 < y1, y1, y2)\n color = _not_none(negcolor, rc['negcolor'])\n negobj = method(self, x, y1neg, y2neg, color=color, **kwargs)\n y1pos, y2pos = _mask_array(y2 >= y1, y1, y2)\n color = _not_none(poscolor, rc['poscolor'])\n posobj = method(self, x, y1pos, y2pos, color=color, **kwargs)\n objs = result = (negobj, posobj)\n\n # Apply formatting unavailable in matplotlib\n for obj in objs:\n if linewidths is not None:\n obj.set_linewidth(linewidths) # LineCollection setters\n if linestyles is not None:\n obj.set_linestyle(linestyles)\n\n return result",
"def line(points):\n return LineString(points)",
"def HorizLine(self, parent, depth=3):\n line = sppasStaticLine(parent, orient=wx.LI_HORIZONTAL)\n line.SetMinSize(wx.Size(-1, depth))\n line.SetSize(wx.Size(-1, depth))\n line.SetPenStyle(wx.PENSTYLE_SOLID)\n line.SetDepth(depth)\n line.SetForegroundColour(self.GetForegroundColour())\n return line",
"def StrokeLine(*args, **kwargs):\n return _gdi_.GraphicsContext_StrokeLine(*args, **kwargs)",
"def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"line\", x=x, y=y, **kwargs)",
"def yline(y,farright, width, dash, grayamount):\r\n aline([[0,y],[farright,y]],width, dash, grayamount)",
"def assign_linestyles(data, aes, gg):\r\n\r\n if 'linestyle' in aes:\r\n linestyle_col = aes['linestyle']\r\n possible_linestyles = np.unique(data[linestyle_col])\r\n linestyle = line_gen()\r\n linestyle_mapping = {value: six.next(linestyle) for value in possible_linestyles}\r\n data['linestyle_mapping'] = data[linestyle_col].apply(lambda x: linestyle_mapping[x])\r\n gg.add_to_legend('linestyle', {v: k for k, v in linestyle_mapping.items()})\r\n\r\n return data",
"def create_new_line(self, coords, **options):\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.line_width\n\n shape_id = self.create_line(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.LINE, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id",
"def line_style(self, line_style):\n\n self.container['line_style'] = line_style",
"def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )",
"def add_line(self, x0, y0, x1, y1, style=None):\n style = self.__prepare_style(style, 'o')\n if x0 > x1:\n # swap A and B\n x1, x0 = x0, x1\n y1, y0 = y0, y1\n # get delta x, y\n dx = x1 - x0\n dy = y1 - y0\n # if a length of line is zero just add point\n if dx == 0 and dy == 0:\n if self.check_coord_in_range(x0, y0):\n self.canvas[y0][x0] = style\n return\n # when dx >= dy use fill by x-axis, and use fill by y-axis otherwise\n if abs(dx) >= abs(dy):\n for x in range(x0, x1 + 1):\n y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float((dx))))\n if self.check_coord_in_range(x, y):\n self.canvas[y][x] = style\n else:\n if y0 < y1:\n for y in range(y0, y1 + 1):\n x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float((dy))))\n if self.check_coord_in_range(x, y):\n self.canvas[y][x] = style\n else:\n for y in range(y1, y0 + 1):\n x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float((dy))))\n if self.check_coord_in_range(x, y):\n self.canvas[y][x] = style",
"def set_line_style(label, line, itr=None):\n style = get_line_style(label)\n line.set_linestyle(style.style)\n if style.marker is not None:\n line.set_marker(style.marker)\n if style.markersize is not None:\n line.set_markersize(style.markersize)\n line.set_linewidth(style.width)\n if style.dashes is not None:\n line.set_dashes(style.dashes)\n if itr is not None and itr % 2 != 0:\n line.set_linestyle('--')",
"def line(value):\r\n return '({}, {}), ({}, {})'.format(value.x1(), value.y1(), value.x2(), value.y2())",
"def draw_line(self, DISP, side:str, indizes:tuple, pink = False):\r\n offset = 1 #< Just to draw the line nicely\r\n pos = (indizes[0] - 1) * self.grid_size, indizes[1] * self.grid_size\r\n # Check if it's a pink line\r\n if pink:\r\n start_pos = pos[0], pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size, pos[1] + self.grid_size // 2\r\n # Check if the line should be vertically. u for up\r\n elif side == 'u':\r\n start_pos = pos[0] + self.width - offset + self.grid_size // 2, pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size + offset + self.grid_size // 2 - self.width, pos[1] + self.grid_size // 2\r\n # Check if the line should be horizontally. l for left\r\n elif side == 'l':\r\n start_pos = pos[0] + self.grid_size // 2, pos[1] + self.width - offset + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size // 2, pos[1] - self.width + self.grid_size + offset + self.grid_size // 2\r\n if not pink:\r\n pg.draw.line(DISP, Colors.colors['BLACK'], start_pos,end_pos, self.width + 2 * offset) \r\n else:\r\n pg.draw.line(DISP, Colors.colors['PINK'], start_pos,end_pos, self.width + 2 * offset)"
] | [
"0.62994033",
"0.6173251",
"0.61364174",
"0.6051291",
"0.5881077",
"0.58236307",
"0.5810255",
"0.58049214",
"0.5775597",
"0.5750251",
"0.5716448",
"0.57113814",
"0.5684895",
"0.5669201",
"0.5652392",
"0.56158364",
"0.5613863",
"0.55943674",
"0.5562825",
"0.55502826",
"0.5534071",
"0.55304205",
"0.5527313",
"0.5525373",
"0.55230933",
"0.5517411",
"0.5513424",
"0.55131274",
"0.5499167",
"0.54463416"
] | 0.7652667 | 0 |
Add a new axis, if axis_args are not already created. | def _handle_setup_axis(self, axis_args):
axis_name = axis_args['name']
axes_dict = self.server.axes
if axis_name not in [name for name, _ in axes_dict.items()]:
print "Adding a new axis:", axis_name
axis_count = len(axes_dict)
newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)
axes_dict[axis_name] = newaxis
axes_dict[axis_name].grid(True)
axes_dict[axis_name].set_xlabel(axis_args['x_label'])
axes_dict[axis_name].set_ylabel(axis_args['y_label'])
# TODO: support *.set_title("Title")
if FLAGS.logy:
axes_dict[axis_name].set_yscale('log', nonposy='clip')
if axis_count != 0:
# Resize other axes if the above wasn't the first.
axis_count = len(axes_dict)
for row,(name, _) in enumerate(axes_dict.items(), 1):
print name, axis_count, row
axes_dict[name].change_geometry(axis_count, 1, row) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])",
"def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):\n\n import cdms2 as cdms\n import MV2 as MV\n\n if newaxis is None:\n newaxis=cdms.createAxis([1,])\n newaxis.units=''\n\n # add new axis to axis list of input <slab>\n axislist=slab.getAxisList()\n axislist.insert(axis,newaxis)\n\n #----------------Reshape----------------\n shape=list(slab.shape)\n shape.insert(axis,len(newaxis))\n slab2=MV.reshape(slab,shape)\n\n #------------Create variable------------\n att_dict=attribute_obj2dict(slab)\n slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\\\n typecode='f')\n slab2.id=slab.id\n\n if verbose:\n print('\\n# <addExtraAxis>: Originial variable shape:',slab.shape)\n print('# <addExtraAxis>: New variable shape:',slab2.shape)\n\n return slab2",
"def add_to(self, axisloads, writer):\n WC.ArgsConstraint.add_to(self, axisloads, writer)\n axisloads.add_to_mesh_change(self.name)",
"def add_to(self, axisloads, writer):\n WC.ArgsConstraint.add_to(self, axisloads, writer)\n axisloads.add_to_mesh_change(self.name)",
"def add_to(self, axisloads, writer):\n WC.ArgsConstraint.add_to(self, axisloads, writer)\n axisloads.add_to_mesh_change(self.name)",
"def addAxis(self, tag, name, minimum, maximum, default, warpMap=None):\n axisElement = ET.Element(\"axis\")\n axisElement.attrib['name'] = name\n axisElement.attrib['tag'] = tag\n axisElement.attrib['minimum'] = str(minimum)\n axisElement.attrib['maximum'] = str(maximum)\n axisElement.attrib['default'] = str(default)\n if warpMap is not None:\n for a, b in warpMap:\n warpPt = ET.Element(\"map\")\n warpPt.attrib['input'] = str(a)\n warpPt.attrib['output'] = str(b)\n axisElement.append(warpPt)\n self.root.findall('.axes')[0].append(axisElement)",
"def add_axis(fig, axis_range, ax_label, ax_name=None):\n if fig.extra_x_ranges is None:\n fig.extra_x_ranges = {}\n\n if ax_name is None:\n ax_name = \"p_extra_xaxis\"\n\n fig.extra_x_ranges[ax_name] = Range1d(start=axis_range[0],\n end=axis_range[-1])\n\n linaxis = LinearAxis(x_range_name=ax_name, axis_label=ax_label,\n axis_label_text_font=\"monospace\",\n axis_label_text_font_style=\"normal\",\n axis_label_text_font_size=\"8pt\",\n major_tick_out=2,\n major_tick_in=2,\n major_label_text_font_size=\"8pt\",\n minor_tick_line_width=1,\n major_label_orientation='horizontal',\n name=ax_name,\n ticker=BasicTicker(desired_num_ticks=8))\n fig.add_layout(linaxis, 'above')\n return fig",
"def addAxes(self):\n numDims = len(self.relation.fieldNames) - 1\n angle = 360 / numDims\n axisDomains = self.relation.axisDomains\n for i in range(numDims):\n axis = PlotAxis(self)\n self.scene().addItem(axis)\n if self.axisAngles and i < len(self.axisAngles):\n axis.setRotation(self.axisAngles[i])\n else:\n axis.setRotation(angle * i)\n self.axes.append(axis)\n\n domain = axisDomains[i]\n text = PlotAxisLabel(\"{}\\n[{:.2f},{:.2f}]\".format(self.relation.fieldNames[i], domain[0], domain[1]))\n text.setFont(self.labelFont)\n self.axisLabels.append(text)\n text.setParentItem(axis)",
"def upAxis(*args, axis: Union[AnyStr, bool]=\"\", rotateView: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def _add_new_ax(self, name=None):\n if name in self.ax_names:\n return self.get_ax_by_name(name)\n\n self.ncols += 1\n for i, (ax, ax_name) in enumerate(zip(self.fig.axes, self.ax_names)):\n #print(f'Changing ax {ax_name} geom to: {(1,self.ncols,i+1)}')\n ax.change_geometry(1, self.ncols, i+1)\n\n new_ax = self.fig.add_subplot(1, self.ncols, self.ncols)\n self.ax.append(new_ax)\n self.ax_names.append(name)\n\n return new_ax",
"def add_argument(self, *args: Any, **kwargs: Any) -> None:\n self._arguments.append((args, kwargs))",
"def add_axes(self, ax):\n self._canvas.cd()\n self._axes = ax\n self._canvas.Modified()",
"def add_argument(self, *args, **kwargs):\n self.arguments[args[0]] = self._Argument(*args, **kwargs)",
"def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)",
"def _set_axis(axis):\n\n def axis_setter(self, labels):\n new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(\n self, axis=axis, labels=labels\n )\n self.__dict__.update(new_qc.__dict__)\n\n return axis_setter",
"def _add_pos_args(self, *args):\n arg_array = self._body.setdefault('args', [])\n arg_array.extend(args)",
"def put_along_axis(x1, indices, values, axis):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n indices_desc = dpnp.get_dpnp_descriptor(\n indices, copy_when_nondefault_queue=False\n )\n values_desc = dpnp.get_dpnp_descriptor(\n values, copy_when_nondefault_queue=False\n )\n if x1_desc and indices_desc and values_desc:\n if x1_desc.ndim != indices_desc.ndim:\n pass\n elif not isinstance(axis, int):\n pass\n elif axis >= x1_desc.ndim:\n pass\n elif indices_desc.size != values_desc.size:\n pass\n else:\n return dpnp_put_along_axis(x1_desc, indices_desc, values_desc, axis)\n\n return call_origin(\n numpy.put_along_axis, x1, indices, values, axis, dpnp_inplace=True\n )",
"def add_args(self):\n raise NotImplementedError",
"def add_arg(self, *args: Any, **kwargs: Any) -> None:\n # Normalize\n if len(args) == 1 and isinstance(args[0], Argument):\n arg = args[0]\n else:\n arg = Argument(*args, **kwargs)\n # Uniqueness constraint: no name collisions\n for name in arg.names:\n if name in self.args:\n msg = \"Tried to add an argument named {!r} but one already exists!\" # noqa\n raise ValueError(msg.format(name))\n # First name used as \"main\" name for purposes of aliasing\n main = arg.names[0] # NOT arg.name\n self.args[main] = arg\n # Note positionals in distinct, ordered list attribute\n if arg.positional:\n self.positional_args.append(arg)\n # Add names & nicknames to flags, args\n self.flags[to_flag(main)] = arg\n for name in arg.nicknames:\n self.args.alias(name, to=main)\n self.flags.alias(to_flag(name), to=to_flag(main))\n # Add attr_name to args, but not flags\n if arg.attr_name:\n self.args.alias(arg.attr_name, to=main)\n # Add to inverse_flags if required\n if arg.kind == bool and arg.default is True:\n # Invert the 'main' flag name here, which will be a dashed version\n # of the primary argument name if underscore-to-dash transformation\n # occurred.\n inverse_name = to_flag(\"no-{}\".format(main))\n self.inverse_flags[inverse_name] = to_flag(main)",
"def __init__(self, axis=-1):\n self.axis = axis",
"def series_axis(self, series_axis):\n\n self.container['series_axis'] = series_axis",
"def add_argument(*args, **kwargs):\n return _Argument(args, frozenset(kwargs.items()))",
"def category_axis(self, category_axis):\n\n self.container['category_axis'] = category_axis",
"def _increment_dims_right(self, axis: int = None):\n if axis is None:\n axis = self.last_used\n self.set_current_step(axis, self.current_step[axis] + 1)",
"def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')",
"def __init__(self, *args: Any, **kwargs: Any) -> None:\n\n super().__init__(*args, **kwargs)\n if len(args) and \"\" in self.axes.name:\n raise RuntimeError(\n f\"Each axes in the {self.__class__.__name__} instance should have a name\"\n )",
"def add_args(*args):\n num_args = len(args)\n for i in range(0,num_args,2):\n add_arg(args[i], args[i+1])",
"def _increment_dims_left(self, axis: int = None):\n if axis is None:\n axis = self.last_used\n self.set_current_step(axis, self.current_step[axis] - 1)",
"def __init__(self, *args: Any, **kwargs: Any) -> None:\n\n super().__init__(*args, **kwargs)\n if args and \"\" in self.axes.name:\n raise RuntimeError(\n f\"Each axes in the {self.__class__.__name__} instance should have a name\"\n )",
"def add_argument(self, *args, **kwds):\n # no argument to add to stack\n if not args:\n return self\n\n # consume Command objects if exists\n if isinstance(args[0], Command):\n self._arg_stack.extend(args[0]._arg_stack)\n target = args[0]\n return self.add_argument(*args[1:], **kwds)\n\n # stack args, kwds to pass to parser.add_argument\n self._arg_stack.append(('normal', args, kwds))\n return self"
] | [
"0.66521394",
"0.6354571",
"0.62128806",
"0.62128806",
"0.62128806",
"0.6071286",
"0.59581596",
"0.58640105",
"0.5820328",
"0.5758695",
"0.56854737",
"0.5659909",
"0.5653431",
"0.56397295",
"0.5619788",
"0.56062645",
"0.5583612",
"0.5512482",
"0.54416555",
"0.54299825",
"0.5412411",
"0.5393915",
"0.5347431",
"0.5325681",
"0.5320823",
"0.52917093",
"0.529135",
"0.5258208",
"0.52554166",
"0.52323735"
] | 0.7232592 | 0 |
create instance data for figure, axes, and stream data. | def setup(self, flags):
self.figure = pylab.figure(1)
self.axes = {}
self.stream_data = {}
self.flags = flags | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)",
"def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need",
"def __init__(self):\n super().__init__()\n\n # general attributes\n self.printTag = 'OUTSTREAM PLOT'\n self.options = {} # outstreaming options # no addl info from original developer\n self.counter = 0 # keeps track of how many times the same plot has been plotted\n self.dim = None # default plot is 2D\n self.sourceName = [] # list of source names\n self.sourceData = None # source of data\n self.outStreamTypes = [] # list of the outstream types\n self.destinations = None # where plots should go (screen, file, etc.)\n\n # plot types key is data dimension, value is list of available plot types\n self.availableOutStreamTypes = {2: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'step',\n 'pseudocolor',\n 'dataMining',\n 'contour',\n 'filledContour'],\n 3: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'surface',\n 'wireframe',\n 'tri-surface',\n 'contour',\n 'filledContour']}\n\n # interpolators that can be used in plots\n self.availableInterpolators = ['nearest',\n 'linear',\n 'cubic',\n 'multiquadric',\n 'inverse',\n 'gaussian',\n 'Rbflinear',\n 'Rbfcubic',\n 'quintic',\n 'thin_plate']\n\n # plot components\n self.fig = None # figure\n self.ax = None # axes\n self.actPlot = None # plot action, ie., ax.plot()\n self.gridSpace = None # subplot setup\n self.actcm = None # colormap\n self.xCoordinates = None # x coordinate name\n self.yCoordinates = None # y coordinate name\n self.zCoordinates = None # z coordinate name\n self.xValues = None # dictionary of x values\n self.yValues = None # dictionary of y values\n self.zValues = None # dictionary of z values\n self.colorMapCoordinates = {} # color map coordinates\n self.colorMapValues = {} # color map values\n\n # For the data-mining plot, I think?\n self.clusterLabels = None\n self.clusterValues = None\n\n # Gaussian Mixtures\n self.mixtureLabels = None\n self.mixtureValues = None\n self.mixtureMeans = None\n self.mixtureCovars = None",
"def __init__(self, shape, data):\n self.shape = shape\n self.data = data",
"def __init__(self, datafiles, plotter):\n self.datafiles = datafiles\n self.datasets = dict()\n self.plotter = plotter",
"def __init__(self, dat):\n self.data = dat",
"def __init__(self):\n\n data_extract=DataExtracter()\n self.data = tuple()",
"def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0",
"def init(self, data_len):\n self._t = 0\n self._data_len = data_len\n self._data = np.empty((data_len, 0))\n self._plots = [self._ax.plot([], [], '.', markersize=4, color='black', \n alpha=self._alpha)[0] for _ in range(data_len)]\n\n self._init = True",
"def __init__(self):\n self.data_set = []\n self.finalized_data = LogData()",
"def __init__(self, data):\n self.data = data\n self.columns = Columns(data)\n self.rows = Rows(data)",
"def __init__(self, data):\n self.data = data\n return",
"def __init__(self, data=None):\n self.data = data",
"def __init__(self):\n self.__dataset = None",
"def __init__(self, data):\n self.data = data",
"def __init__(self, data):\n self.data = data",
"def __init__(self, data):\n self.data = data",
"def __init__(self, data):\n self.data = data",
"def __init__(self, dataset: ds.Dataset, settings):\r\n self.dataset = dataset\r\n self.settings = settings\r\n\r\n self.visualizer = visualizer.Visualizer()",
"def __init__(self, ds=None, **kwargs) :\n self._name = self.__class__.__name__\n print('In %s.__init__' % self._name)\n\n HexDataIO.__init__(self, **kwargs)\n\n DIO = self\n if ds is None :\n DIO.open_input_data(self.DSNAME, **kwargs)\n else :\n DIO.use_psana_dataset(ds, pbits=0o377 if self.VERBOSE else 0)\n \n self._init_calib_and_sorter()\n\n self.t0_sec = self.t1_sec = time()",
"def __init__(self, x, y, data):\n super().__init__(x=x, y=y, data=data, has_analytic_ft=False)\n self._ee = {}\n self._mtf = None\n self._nu_p = None\n self._dnx = None\n self._dny = None",
"def __init__(self, args, data_path, data_dir, device, log, x_shape):\r\n self._args = args\r\n self._data_path = data_path\r\n self._data_dir = data_dir\r\n self._device = device\r\n self._x_shape = x_shape\r\n self._log = log",
"def __init__(self, dataset):\n self._dataset = dataset",
"def __init__(self, xy, **kwds):\n self.data = xy",
"def __init__(self, data, logger):\n self.data = data\n self.logger = logger",
"def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''",
"def __init__(self, data, chunksize, axis, **kwargs):\n\n self.data = data\n self._chunksize = int(chunksize)\n self.axis = axis\n self.kwargs = kwargs",
"def __init__(self, data, data_class, projection_dim=2):\n # data\n self.data = data\n self.data_class = data_class\n self.data_ninstances = data.shape[0]\n self.data_dim = data.shape[1]\n # projection\n self.projection = np.zeros((self.data_ninstances, projection_dim))\n self.projection_dim = projection_dim",
"def __init__(self, data):\n\n self.data = data\n self.calculator = Calculator(descriptors, ignore_3D=True)\n self.described_molecules = self.featurize()",
"def __init__(self):\n self.reader = vtk.vtkImageData()\n\n self.dims = self.reader.GetDimensions()\n self.bounds = self.reader.GetBounds()\n self.spacing = self.reader.GetSpacing()\n self.origin = self.reader.GetOrigin()\n self.value_range = self.reader.GetScalarRange()\n\n # self.plane_widget_x = vtk.vtkImagePlaneWidget()\n # self.plane_widget_y = vtk.vtkImagePlaneWidget()\n # self.plane_widget_z = vtk.vtkImagePlaneWidget()\n\n self.flag_read = False"
] | [
"0.69154173",
"0.6703749",
"0.6548121",
"0.64630646",
"0.63672024",
"0.63393867",
"0.6333037",
"0.63126576",
"0.63034093",
"0.6290425",
"0.6271378",
"0.626019",
"0.62436026",
"0.6238506",
"0.6222409",
"0.6222409",
"0.6222409",
"0.6222409",
"0.62218726",
"0.6218369",
"0.6214298",
"0.6166581",
"0.614592",
"0.6138107",
"0.6137707",
"0.61060166",
"0.6105074",
"0.60878736",
"0.60649186",
"0.605243"
] | 0.67423743 | 1 |
Return the last 5 published polls(Not including those to be published in the future) that have at least 2 choices | def get_queryset(self):
#Old get_queryset() method.
#Return last 5 published polls
#return Poll.objects.order_by('-pub_date')[:5]
#New get_queryset() method.
#return Poll.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
return Poll.objects.annotate(num_choices=Count('choice')).filter(pub_date__lte=timezone.now(), num_choices__gte=2).order_by('-pub_date')[:5] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_queryset(self):\n #return Poll.objects.filter(pub_date__lte=timezone.now())\n return Poll.objects.annotate(num_choices=Count('choice')).filter(pub_date__lte=timezone.now(), num_choices__gte=2)",
"def get_queryset(self):\n #.1 below code was showing future poll/questions\n #.1 return Question.objects.order_by('-pub_date')[:5]\n\n #re-defining\n \"\"\"\n Return the last five published questions (not including those set to be\n published in the future).\n \"\"\" \n #imported timezone\n \n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Poll.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n # the filter says that it only returns those w/ the pub_date\n # less or equal to timezone.now() (earlier or now)\n questions = Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')\n\n excludes = []\n for question in questions:\n if not question.choice_set.all().exists():\n excludes.append(question.id)\n \n return Question.objects.exclude(pk__in=excludes).filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def latest_question(questions):\n return questions.order_by('-pub_date')[:5]",
"def get_queryset(self):\n\t\t# version 1: \"\"\"Return the last five published questions.\"\"\"\n\t\t# version 1: # return Question.objects.order_by('-pub_date')[:5]\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def result_poll(votes):\n return sum(votes) >= 2 / 3 * len(votes)",
"def get_five_latest(self):\r\n selection = []\r\n sorted(self.tweets, key=lambda tweet: tweet.date, reverse=True)\r\n amount = 5\r\n if self.get_length() < 5:\r\n amount = self.get_length()\r\n for i in range(amount):\r\n selection.append(self.tweets[i])\r\n return selection",
"def get_best_five(self):\n return sorted(self.speakers.iteritems(),\n key=lambda (key, val): (val, key),\n reverse=True)[:5]",
"def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by(\n \"-pub_date\"\n )[:5]",
"def get_queryset(self):\n\t\treturn Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:5]",
"def test_question_with_out_choices(self):\n create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], [])",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]",
"def test_many_past_questions(self):\n create_question(question_text=\"Past question 1\", days=-20)\n create_question(question_text=\"Past question 2\", days=-30)\n res = self.client.get(reverse(\"polls:index\"))\n self.assertEqual(res.status_code, 200)\n self.assertQuerysetEqual(\n res.context[\"latest_questions\"],\n [\"<Question: Past question 1>\", \"<Question: Past question 2>\"],\n )",
"def get_queryset(self):\n\n return Question.objects.filter(\n pub_date__lte = timezone.now()\n ).order_by('-pub_date')[:5]"
] | [
"0.66441554",
"0.6153953",
"0.61504656",
"0.5914476",
"0.59008074",
"0.5822036",
"0.58044267",
"0.56992376",
"0.5674518",
"0.56489867",
"0.56489867",
"0.5606918",
"0.5585309",
"0.55579126",
"0.55579126",
"0.5490999",
"0.5477059",
"0.5477059",
"0.5477059",
"0.5477059",
"0.5477059",
"0.5477059",
"0.54471785",
"0.5435236",
"0.5435236",
"0.5435236",
"0.5435236",
"0.54341054",
"0.5378102",
"0.53755856"
] | 0.64896417 | 1 |
Constructor for decisions having just the low and high value | def __init__(self, low, high):
self.low = low
self.high = high | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, low_score=0, high_score=0):\n self.low_score = low_score\n self.high_score = high_score",
"def __init__(self):\n self.low = []\n self.high = []",
"def __init__(self):\n self.high_low = []",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, rng, low, high):\n\t\tself.rng = rng\n\t\tself.low = low\n\t\tself.high = high",
"def __init__(self, low, high):\n self.low, self.high = low, high\n self.__diff = self.high - self.low\n if self.low >= self.high:\n raise ValueError(\n \"Invalid ordering of low and high dimensions:\\t{} >= {}\".format(\n self.low, self.high\n )\n )",
"def __init__(self, allowable_min, allowable_max):\n self.allowable_min = allowable_min\n self.allowable_max = allowable_max\n self.value = None\n # Do it this way because we'll override in reset\n self.min_found = None\n self.max_found = None\n self.avg_found = None\n self.count = 0\n\n # override reset for the different data types\n self.reset()",
"def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, list(range(low, high + 1)))\n self.low = min(low, high)\n self.high = max(low, high)",
"def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, sp_uniform(low, high - low))\n self.low = min(low, high)\n self.high = max(low, high)",
"def __init__(self, low, high):\n\n\t\tself.__low = np.array(low).reshape(1, -1)\n\t\tself.__high = np.array(high).reshape(1, -1)\n\t\tself.__ndims = self.__low.size\n\t\tself.__range = self.__high - self.__low",
"def __init__(self):\n super().__init__()\n self.low = 0.0\n self.high = 1.0\n self.alpha = 0.0\n self.beta = 0.0\n self.type = 'Beta'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Jacobi'\n self.preferredPolynomials = 'Jacobi'",
"def __init__(self, values: dict):\n self.options = [\n \"lower\",\n \"regression\",\n \"upper\"\n ]",
"def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val",
"def __init__(self):\n self.data=0\n self.greater=None\n self.lesser=None\n self.classCounter=[0,0,0,0]\n self.leaf=False\n self.split=None",
"def __init__(self, limits):\n\t\tself.low = limits[0]\n\t\tself.high = limits[1]\n\t\tself.squares = []\n\t\tself.square_palindromes = []\n\t\tself.fair_squares = []",
"def __init__(self, lower=None, upper=None, param=None):\n\n if param is not None:\n\n self.lower = param.lower\n self.upper = param.upper\n self.var = param.var\n\n else:\n\n if lower > upper:\n raise Exception(f'{lower} > {upper}') \n\n self.lower = lower\n self.upper = upper\n self.var = 0.5 * (upper - lower)\n\n self.val = cap( (self.upper-self.lower) * random.random() + self.lower, self.lower, self.upper )",
"def __init__(self,\n low,\n high,\n clipping_lower_bound=-np.inf,\n clipping_upper_bound=np.inf):\n super().__init__()\n self._low = low\n self._high = high\n self._clipping_lower_bound = clipping_lower_bound\n self._clipping_upper_bound = clipping_upper_bound",
"def __init__(self):\n\t\tself.upper, self.lower = 0,0\n\t\tself.timestamp = 0",
"def __init__(self, values=None, min_value=None, max_value=None):\n if values is not None:\n # If list if provided, use it to determine min and max values\n self.min = min(values)\n self.max = max(values)\n\n else:\n self.min = min_value\n self.max = max_value",
"def __init__(self, unique_values, is_categorical):\n\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n self.min = min(unique_values)\n self.max = max(unique_values)",
"def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.ucb_values = [0] * 10\n self.minmax = 0",
"def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)",
"def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)",
"def __init__(\n self, low=None, high=None, exclude_low=False, exclude_high=False\n ):\n vtype = type(high)\n if (low is not None) and (vtype is not float):\n vtype = type(low)\n if vtype not in RangeTypes:\n raise TraitError(\n \"TraitRange can only be use for int, long or \"\n \"float values, but a value of type %s was \"\n \"specified.\" % vtype\n )\n if vtype is float:\n self.validate = self.float_validate\n kind = 4\n self._type_desc = \"a floating point number\"\n if low is not None:\n low = float(low)\n if high is not None:\n high = float(high)\n elif vtype is LONG_TYPE:\n self.validate = self.long_validate\n self._type_desc = \"a long integer\"\n if low is not None:\n low = LONG_TYPE(low)\n if high is not None:\n high = LONG_TYPE(high)\n else:\n self.validate = self.int_validate\n kind = 3\n self._type_desc = \"an integer\"\n if low is not None:\n low = int(low)\n if high is not None:\n high = int(high)\n exclude_mask = 0\n if exclude_low:\n exclude_mask |= 1\n if exclude_high:\n exclude_mask |= 2\n if vtype is not LONG_TYPE:\n self.fast_validate = (kind, low, high, exclude_mask)\n\n # Assign type-corrected arguments to handler attributes\n self._low = low\n self._high = high\n self._exclude_low = exclude_low\n self._exclude_high = exclude_high",
"def __init__(self, factor: float = 0.5, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.factor = factor\n self.threshold = threshold\n self.initial_val = initial_val",
"def __init__( # pylint: disable=too-many-arguments\n self,\n min: Optional[float] = None,\n max: Optional[float] = None,\n step: Optional[int] = None, # pylint: disable=redefined-outer-name\n include_min: bool = True,\n include_max: bool = True,\n ) -> None:\n #: The optional minimal allowed value.\n self.min = min\n\n #: The optional maximal allowed value.\n self.max = max\n\n #: The optional step between values.\n self.step = step\n\n #: Whether the minimal value is allowed.\n self.include_min = include_min\n\n #: Whether the maximal value is allowd.\n self.include_max = include_max",
"def __init__(self, values: dict):\n \n self.perform_analysis = bool\n self.percentage_exceedance = Percentage\n self.exclude_uncertain_values = bool\n self.exclude_radius_meters = PositiveFloat"
] | [
"0.7121283",
"0.6842653",
"0.6765476",
"0.66799957",
"0.66799957",
"0.66799957",
"0.66799957",
"0.64936614",
"0.6454951",
"0.6441182",
"0.635217",
"0.6306492",
"0.6245966",
"0.62103117",
"0.61977905",
"0.61973435",
"0.61709034",
"0.6144477",
"0.61384565",
"0.61371243",
"0.61002576",
"0.60703856",
"0.60626644",
"0.6059051",
"0.60318124",
"0.60318124",
"0.599797",
"0.5997579",
"0.5926057",
"0.59157056"
] | 0.7508133 | 0 |
Constructor for model having objectives, constraints and decisions | def __init__(self, objectives, constraints, decisions):
self.objectives = objectives
self.constraints = constraints
self.decisions = decisions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)",
"def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)",
"def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)",
"def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])",
"def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"preinfectious\", 0.)\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_progress\", 1. / param_dictionary[\"duration_preinfectious\"])\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])",
"def __init__(self,model,alpha=0,head_min=0,head_max=1,k=1,\r\n variables=[],priors=[]):\r\n \r\n import numpy as np\r\n \r\n # Append the base to the elementlist\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # Set orientation value\r\n self.alpha = alpha\r\n \r\n # Set potential scaling variables\r\n self.head_min = head_min\r\n self.head_max = head_max\r\n \r\n # Assign the hydraulic conductivity of the base model\r\n self.k = k\r\n \r\n # The model requires the base flow in terms of hydraulic potential (phi)\r\n # The function head_to_potential extracts the following variables:\r\n # phi_min hydraulic potential corresponding to head_min\r\n # phi_max hydraulic potential corresponding to head_max\r\n self.head_to_potential()\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.variables += [var]\r\n self.model.priors += [self.priors[idx]]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']",
"def __init__(self, objectives, constraints, declarations = None):\n \n self.objectives = objectives\n self.constraints = constraints\n self.declarations = declarations",
"def __init__(self, constraints={}):\n self.constraints = constraints",
"def __init__(self, model: Model1D):\n self._model = model",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return",
"def __init__(self, model, X_lower, X_upper):\n self.model = model\n self.X_upper = X_upper\n self.X_lower = X_lower",
"def __init__(self, model = None, cso = None, fast_classification = True, paper = None):\n self.cso = cso #Stores the CSO Ontology\n self.paper = paper #Paper to analyse\n self.model = model #contains the cached model\n self.min_similarity = 0.90 #Initialises the min_similarity\n self.fast_classification = fast_classification # if will use the full model or not\n self.explanation = dict()",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)",
"def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())",
"def __init__(self,\n constraints\n ):\n self.constraints = constraints\n self.num_constraints = sum([con.num_constraints for con in self.constraints])\n self.state_constraints = [con for con in self.constraints if con.constraint_input_type == ConstraintInputType.STATE]\n self.num_state_constraints = sum([con.num_constraints for con in self.state_constraints])\n self.input_constraints = [con for con in self.constraints if con.constraint_input_type == ConstraintInputType.INPUT]\n self.num_input_constraints = sum([con.num_constraints for con in self.input_constraints])\n self.input_state_constraints = [con for con in self.constraints if con.constraint_input_type == ConstraintInputType.INPUT_AND_STATE]\n self.num_input_state_constraints = sum([con.num_constraints for con in self.input_state_constraints])",
"def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))",
"def __init__(self, modelwithparams=None, random_number=-1, problem_type='infer'):\n self.modelwithparams = modelwithparams\n self.oldpara = self.modelwithparams\n self.random_number = random_number\n self.flag = True\n self.problem_type = problem_type",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, state_size, action_size, action_low, action_high):\n self.state_size = state_size\n self.action_size = action_size\n self.action_low = action_low\n self.action_high = action_high\n self.action_range = self.action_high - self.action_low\n\n ###\n\n self.build_model()",
"def __init__(self, target_model: Classifier):\n\n super().__init__(target_model, None, None, None, None)",
"def __init__(self, model):\n self._model = model",
"def build_model():"
] | [
"0.8211604",
"0.783042",
"0.7588628",
"0.71783847",
"0.711893",
"0.71124303",
"0.7095124",
"0.7054429",
"0.67368513",
"0.6669032",
"0.6669032",
"0.6669032",
"0.6669032",
"0.6669032",
"0.66268575",
"0.65448666",
"0.6534245",
"0.6528547",
"0.6521854",
"0.6510179",
"0.64817524",
"0.64674944",
"0.64661175",
"0.64661175",
"0.64661175",
"0.64661175",
"0.6462623",
"0.6450976",
"0.64507973",
"0.64317834"
] | 0.8309128 | 0 |
Evaluates the score for a given solution using all objectives | def evaluate(self, solution, total = 0):
for objective in self.objectives:
total = total + objective(solution)
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_score(self, solution: np.array) -> float:\n pass",
"def score_solution(g, s):\n pass",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def get_score(self, solution: np.array) -> float:\n score = 0\n for vehicle_count, vehicle_solution in enumerate(solution):\n distances = self.distance_matrix[vehicle_solution[0:-1], vehicle_solution[1:]]\n costs = distances * self.selected_transportation_cost[vehicle_count]\n score += np.sum(costs)\n return score",
"def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()",
"def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()"
] | [
"0.6913881",
"0.67433435",
"0.6517173",
"0.6517173",
"0.6517173",
"0.6517173",
"0.6517173",
"0.6517173",
"0.6514553",
"0.65127504",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536",
"0.64637536"
] | 0.7744005 | 0 |
Validates if given solutions is as per the constraints | def ok(self, solution):
if self.constraints is not None:
for constraint in self.constraints:
if not constraint(solution):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_constraints ( A, S, complete ) :\n\t\n\tok = True\n\t\n\tfor i in range(len(complete)) :\n\t\tif complete[i] :\n\t\t\tif not (dot(A[i],S) == 0) :\n\t\t\t\tok = False\n\t\t\t\tprint '\\n'\n\t\t\t\tprint '*** warning *** constraint %d not verified' % (i)\n\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0]\n\t\t\t\tprint 'variables involved:', vars_inds\n\t\t\t\tprint 'displacements:', S[vars_inds]\n\t\t\t\tprint\n\t\t\t\t#programPause = raw_input(\"Press the <ENTER> key to continue...\")\n\t\t\t\t\n\treturn ok",
"def validateSolution(solution) -> bool:\r\n # Does not use shortcut return, if invalidation found, to print all errors.\r\n isValid = True\r\n\r\n if not validateTeacherTimeConstraints(solution):\r\n logger.debug(\"Solution: %4i, TeacherTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateRoomTimeConstraints(solution):\r\n logger.debug(\"Solution: %4i, RoomTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateSemesterGroupTimeConstraints(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, SemesterGroupTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateStudyDays(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, StudyDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateAllLessonsAsBlockCourses(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, AllLessonsAsBlock Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateBlocksOnlyInSameRoom(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, BlocksOnlyInSameRoom Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateConsecutiveLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, ConsecutiveLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateForenoonLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, ForenoonLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateGivenTimeslots(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, GivenTimeslots Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateLessonTakePlaceOnOneDay(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, LessonTakePlaceOnOneDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateLessonTime(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, LessonTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLessonsPerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLessonsPerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLessonsPerDayPerSemesterGroup(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLessonsPerDayPerSemesterGroup Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLecturesPerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLecturesPerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLecturesAsBlockForTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLecturesAsBlockForTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateOneCoursePerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, OneCoursePerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateOnlyOneNotAllInOneBlockLessonPerDay(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, NotAllInOneBlockLessonsPerDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateRoomNotAvailableTimes(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, RoomNotAvailable Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateTeacherNotAvailableTimes(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, TeacherNotAvailable Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateSameTimeLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, SameTimeLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateTimeslotVarHelperVariables(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, TimeslotBoolVars Wrong Values!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n return isValid",
"def check_valid_solution(self, weights, proportions, assets, data):\n # Checking whether correct number of solutions has been picked\n if np.sum(weights != 0) != K:\n raise ValueError(\"More than \" + str(K) + \" assets selected (\", weights.tolist(), \") in solution: \" + str(weights))\n # Checking whether number and size of proportions is correct\n if np.any(proportions > 1) or np.any(proportions < 0) or len(proportions) != K:\n raise ValueError(\"The values of proportions are not valid: \" + str(proportions))\n # Checking whether proportions sum up to 1\n elif not np.isclose(weights.sum(), 1):\n raise ValueError(\"Proportions don't sum up to 1 (\" + str(weights.sum()) + \") in solution: \" + str(weights))\n # Checking whether maximum investment amount has not been exceeded\n elif np.any(weights > data.delta):\n raise ValueError(\"There's at least one proportion larger than delta: \" + str(weights))\n # Checking for duplicate assets in a solution\n elif len(np.unique(assets)) != len(assets):\n raise ValueError(\"Duplicated assets in the portfolio: \" + str(assets))",
"def solved(self):\n if not self.all_variables_assigned():\n return False\n for constraint in self.constraints:\n if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):\n return False\n return True",
"def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False",
"def check_constraints(Px,pk1,pk2,mu1,mu2,mu3):\n # Constraint 1: Check polarisation basis probabilities are valid.\n if (Px >= 1.0 or Px <= 0.0):\n print(\"Error! Constraint 1 < Px < 0: \", Px)\n exit(1)\n # Constraint 2: Check probability of pulse with intensity 1 is in bounds.\n if (pk1 >= 1.0 or pk1 <= 0.0):\n print(\"Error! Constraint 1 < pk1 < 0: \", pk1)\n exit(1)\n # Constraint 3: Check probability of pulse with intensity 2 is in bounds.\n if (pk2 >= 1.0 or pk2 <= 0.0):\n print(\"Error! Constraint 1 < pk2 < 0: \", pk2)\n exit(1)\n # Constraint 4: Check sum of probabilities for intensity 1 & 2 are less\n # than unity.\n if ((pk1 + pk2) >= 1.0):\n print(\"Error! Constraint (pk1 + pk2) < 1: \", pk1 + pk2)\n exit(1)\n # Constraint 5: Check value of intensity 1 is in bounds.\n if (mu1 >= 1.0 or mu1 <= 0.0):\n print(\"Error! Constraint 1 < mu1 < 0: \", mu1)\n exit(1)\n # Constraint 6: Check value of intensity 2 is in bounds.\n if (mu2 >= 1.0 or mu2 <= 0.0):\n print(\"Error! Constraint 1 < mu2 < 0: \", mu2)\n exit(1)\n # Constraint 7: Check values of all intensities are in bounds.\n if ((mu1 - mu3) <= mu2):\n print(\"Error! Constraint (mu1-mu3) > mu2: \", (mu1-mu3), mu2)\n exit(1)\n # Constraint 8: Check values of intensities 2 & 3 are in bounds.\n if (mu2 <= mu3):\n print(\"Error! Constraint mu2 > mu3: \", mu2, mu3)\n exit(1)\n return None",
"def feasibility_check(self,domains,constraint):\n # print(\"HEEEREEE\")\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n if (right_val[0] in domains[left_var[0]]):\n return True\n else:\n return False\n\n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n if len(set(domains[left_var[0]]) | set(domains[right_var[0]])) > 0:\n return True\n else:\n return False\n \n # Equation\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # For Equations (Equal sign)\n total_vars = left_var+right_var\n total_count = len(left_var+right_var)\n count = 0\n for var in total_vars:\n if len(domains[var]) == 1:\n count += 1\n\n # For Equations (Equal sign)\n if (count == total_count):\n if l == r:\n return True\n else:\n return False\n\n if l >= r:\n # print(l,r)\n return True\n else:\n return False",
"def check_all_constraints(csp) :\n\n for constraint in csp.get_all_constraints():\n assigned1 = csp.get_assigned_value(constraint.var1)\n assigned2 = csp.get_assigned_value(constraint.var2)\n check = constraint.check(assigned1,assigned2)\n if check==False and assigned1!=None and assigned2!=None:\n return False \n return True",
"def check_solutions(eq):\n s = diophantine(eq)\n\n factors = Mul.make_args(eq)\n\n var = list(eq.free_symbols)\n var.sort(key=default_sort_key)\n\n while s:\n solution = s.pop()\n for f in factors:\n if diop_simplify(f.subs(zip(var, solution))) == 0:\n break\n else:\n return False\n return True",
"def check_all_constraints(csp) :\n constraints=csp.get_all_constraints()\n for constraint in constraints:\n var1 = constraint.var1\n var2 = constraint.var2\n val1=csp.get_assigned_value(var1)\n val2=csp.get_assigned_value(var2)\n if val1!=None and val2!=None:\n if not constraint.check(val1,val2):\n return False\n return True",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()",
"def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True",
"def is_legal_solution(self, solution):\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return self.fit_score(solution) == 0\r\n else:\r\n return self.fit_score(solution) == sum(x for x in range(1, 12))",
"def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])",
"def allConstraintsSatisfied(self):\n # loop through all of the constraints\n for constraint in self.constraints:\n # if any of the constraints are not satisfied, then return False\n if (not constraint.satisfied(constraint.tail.value, constraint.head.value)):\n return False\n # no violations, so return true\n return True",
"def is_solution(self, csp):\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())",
"def _is_valid(self):\n\n if (\n self.poly.weight_0 != 0\n or len(self.poly.weight_1) != self.num_qubits\n or len(self.poly.weight_2) != int(self.num_qubits * (self.num_qubits - 1) / 2)\n or len(self.poly.weight_3)\n != int(self.num_qubits * (self.num_qubits - 1) * (self.num_qubits - 2) / 6)\n ):\n return False\n if (\n (self.linear).shape != (self.num_qubits, self.num_qubits)\n or len(self.shift) != self.num_qubits\n or not np.allclose((np.linalg.det(self.linear) % 2), 1)\n ):\n return False\n if (\n not (set(self.poly.weight_1.flatten())).issubset({0, 1, 2, 3, 4, 5, 6, 7})\n or not (set(self.poly.weight_2.flatten())).issubset({0, 2, 4, 6})\n or not (set(self.poly.weight_3.flatten())).issubset({0, 4})\n ):\n return False\n if not (set(self.shift.flatten())).issubset({0, 1}) or not (\n set(self.linear.flatten())\n ).issubset({0, 1}):\n return False\n return True",
"def validate_parameters(side_1, side_2, side_3):\n if side_1 > 0 and side_2 > 0 and side_3 > 0 and (side_1 + side_2 > side_3) and \\\n (side_1 + side_3 > side_2) and (side_3 + side_2 > side_1):\n return True\n else:\n return False",
"def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]",
"def check_all_constraints(Instance: dict):\r\n\r\n print(\"Checking constraints...\")\r\n # Schedule constraints\r\n check_schedule(Instance)\r\n # Resources constraints\r\n check_resources(Instance)\r\n # Exclusions constraints\r\n check_exclusions(Instance)\r\n if (\r\n check_exclusions(Instance)\r\n and check_resources(Instance)\r\n and check_schedule(Instance)\r\n ):\r\n print(\"Done\")\r\n return True\r\n else:\r\n return False",
"def check_occuring_variables(formula,variables_to_consider,allowed_variables) :\n variable_set=set(allowed_variables)\n for clause in formula :\n variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}\n if not variables_in_clause <= variable_set:\n return False, [v for v in variables_in_clause if not v in variable_set] \n return True, []",
"def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True",
"def has_solution(self) -> bool:\n pass",
"def check_ml_constraints(vector):\n\tglobal __ml_constraints\n\n\tfor con in __ml_constraints:\n\t\t# a vector must be either contained or missing for both instances *con* tuple\n\t\tif not (vector[con[0]] == vector[con[1]]):\n\t\t\treturn False\n\treturn True",
"def test_constraint_satisfaction(self, velocity_pc_data):\n data, pc = velocity_pc_data\n path, ss, vlim = data\n\n constraint_param = pc.compute_constraint_params(path, ss)\n _, _, _, _, _, _, xlimit = constraint_param\n\n qs = path(ss, 1)\n N = ss.shape[0] - 1\n\n sd = cvx.Variable()\n\n for i in range(0, N + 1):\n # 2. Compute max sd from the data\n constraints = [qs[i] * sd <= vlim[:, 1],\n qs[i] * sd >= vlim[:, 0],\n sd >= 0, sd <= JVEL_MAXSD]\n prob = cvx.Problem(cvx.Maximize(sd), constraints)\n try:\n prob.solve(solver=cvx.ECOS, abstol=1e-9)\n xmax = sd.value ** 2\n\n prob = cvx.Problem(cvx.Minimize(sd), constraints)\n prob.solve(solver=cvx.ECOS, abstol=1e-9)\n xmin = sd.value ** 2\n except cvx.SolverError:\n continue\n\n # 3. They should agree\n npt.assert_allclose([xmin, xmax], xlimit[i], atol=SMALL)\n\n # Assert non-negativity\n assert xlimit[i, 0] >= 0",
"def has_solution(self) -> bool:\n if self in [self.SATISFIED, self.ALL_SOLUTIONS, self.OPTIMAL_SOLUTION]:\n return True\n return False",
"def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())",
"def test_validate_answer(self):\r\n sample_dict = {'x': (1, 2)}\r\n problem = self.build_problem(\r\n sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\"\r\n )\r\n self.assertTrue(problem.responders.values()[0].validate_answer('14*x'))\r\n self.assertFalse(problem.responders.values()[0].validate_answer('3*y+2*x'))",
"def check_optimization_sanity(self):\n if len(self.parameters) == 0:\n msg = \"No parameters defined. Optimization not possible.\"\n raise ValueError(msg)\n\n if len(self.constraints) == 0:\n msg = \"No constraints defined. Optimization not possible.\"\n raise ValueError(msg)"
] | [
"0.7118976",
"0.7054554",
"0.6993789",
"0.6782743",
"0.66744995",
"0.6646843",
"0.6623786",
"0.66098017",
"0.65714705",
"0.6541072",
"0.6465648",
"0.6465648",
"0.6367656",
"0.6321812",
"0.63074505",
"0.6303715",
"0.62613684",
"0.62440664",
"0.6223591",
"0.62150496",
"0.62084955",
"0.62018764",
"0.61924404",
"0.61883605",
"0.60416585",
"0.6001071",
"0.5963198",
"0.5943802",
"0.59436697",
"0.5940148"
] | 0.7211056 | 0 |
Constructor for Osyczka2 model | def __init__(self):
self.name = "Osyczka"
objectives = [ob_os_1, ob_os_2]
constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]
decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]
Model.__init__(self, objectives, constraints, decisions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)",
"def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n self._model = model",
"def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def __init__(self, model = None, cso = None, fast_classification = True, paper = None):\n self.cso = cso #Stores the CSO Ontology\n self.paper = paper #Paper to analyse\n self.model = model #contains the cached model\n self.min_similarity = 0.90 #Initialises the min_similarity\n self.fast_classification = fast_classification # if will use the full model or not\n self.explanation = dict()",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n\t\tself.model = model",
"def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)",
"def __init__(self, number_of_cheeses, number_of_stools):\n self.model = TOAHModel(number_of_stools)\n self.model.fill_first_stool(number_of_cheeses)",
"def __init__(self,model,X_test, y_test):\n self.model = model\n self.X_test = X_test\n self.y_test = y_test",
"def __init__(self, controlparams, schizparams):\n self.controlparams = controlparams\n self.schizparams = schizparams\n super(BeemanNML2Model, self).__init__(\n controlparams=controlparams, schizparams=schizparams\n )",
"def __init__(self, swc=0.1, sor=0.05, kro0=0.9, no=2.0, krw0=0.4, nw=2.0):\n self.kro0 = kro0\n self.krw0 = krw0\n self.no = no\n self.nw = nw\n self.swc = swc\n self.sor = sor",
"def __init__(self, num_of_cheeses, num_of_stools):\n\n self.toah_model = TOAHModel(num_of_stools)\n self.toah_model.fill_first_stool(num_of_cheeses)",
"def __init__(self, model: object):\n self.model = model",
"def __init__(self, model=None):\n self.model = model\n self.model_f = None\n self.model_h = None\n self.cav = None\n self.sensitivity = None\n self.tcav_score = []\n self.y_labels = None",
"def __init__(self, word2vec_model):\n self._model = word2vec_model",
"def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model",
"def __init__(self, model):\n self._model = model",
"def __init__(self):\n super(SiLU, self).__init__()",
"def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'"
] | [
"0.73778796",
"0.7025361",
"0.6996897",
"0.6996897",
"0.6996897",
"0.6996897",
"0.6996897",
"0.6893153",
"0.656395",
"0.6520004",
"0.6520004",
"0.6498464",
"0.64387596",
"0.64387596",
"0.64387596",
"0.64387596",
"0.6397256",
"0.6388983",
"0.63875407",
"0.63851994",
"0.6379331",
"0.6376362",
"0.6332776",
"0.6323819",
"0.6323531",
"0.6308108",
"0.6305836",
"0.63035005",
"0.62864214",
"0.6257174"
] | 0.7899179 | 0 |
Constructor for Schaffer model | def __init__(self):
self.name = "Schaffer"
objectives = [o_sh_1, o_sh_2]
decisions = [Decision(-10 ** 5, 10 ** 5)]
Model.__init__(self, objectives, None, decisions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, model):\n self._model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model: str, **kwargs):\n super().__init__(model=model)",
"def __init__(self, model):\n\t\tself.model = model",
"def __init__(self, model: object):\n self.model = model",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)",
"def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object",
"def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def __init__(self, **kwargs):\n raise NotImplementedError",
"def __init__(self, **kwargs):\n pass",
"def __init__(self, **kwargs):\n pass",
"def __init__(self, **kwargs):\n pass",
"def __init__(self, **kwds):\n raise NotImplementedError",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n self._model = model",
"def __init__ (self):\n pass",
"def __init__(self,\n model: Union[str, io.IOBase, DM, None] = None,\n name: Optional[str] = None,\n database = None):\n super().__init__(model=model, name=name, database=database)",
"def __init__(self):\n raise NotImplementedError",
"def __init__(self):\n raise NotImplementedError",
"def __init__(self):\n raise NotImplementedError",
"def __init__(self):\n raise NotImplementedError"
] | [
"0.7268544",
"0.7181175",
"0.7181175",
"0.7181175",
"0.7181175",
"0.7096036",
"0.707674",
"0.70726955",
"0.70252573",
"0.70025045",
"0.6913892",
"0.6901609",
"0.6901609",
"0.68665624",
"0.68252325",
"0.68252325",
"0.68252325",
"0.67947894",
"0.67506754",
"0.67506754",
"0.67506754",
"0.67506754",
"0.67506754",
"0.67315733",
"0.67090905",
"0.6701765",
"0.6637201",
"0.6637201",
"0.6637201",
"0.6637201"
] | 0.74169093 | 0 |
Constructor for Kursawe model | def __init__(self):
self.name = "Kursawe"
objectives = [o_ku_1, o_ku_2]
decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]
Model.__init__(self, objectives, None, decisions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__( self, weights, topics ):\n\n # Number of topics and dictionary size\n self.W, self.K = topics.shape\n assert( self.W > self.K )\n\n self.topics = topics\n MixtureModel.__init__(self, weights, topics)",
"def __init__(self, corpus: Corpus):\n\n # the legomena counts parametrize this model\n self.M = corpus.M\n self.N = corpus.N\n self.k = corpus.k",
"def __init__( self, alphas, topics ):\n\n # Number of topics and dictionary size\n self.W, self.K = topics.shape\n assert( self.W > self.K )\n\n TopicModel.__init__(self, alphas, topics)\n self.alphas = alphas\n self.topics = topics",
"def __init__(self, mu, sig):\n self.mu = mu\n self.sig = sig",
"def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return",
"def __init__(self):\n self.weights = None\n self._epsilon = None\n self._num_training = None\n self._lambda = None\n return None",
"def __init__(self, samples):\n self.samples = samples",
"def __init__(self, encut, ldaul, Uparam, Jparam, name=\"DFTU_settings\"):\n\n dftu_settings = {\"LDAU\": \".TRUE.\" , \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LADAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)",
"def __init__(self, weights):\n self._weights = weights",
"def __init__(self, wks, bks, wqs, bqs):\r\n self.wks = wks\r\n self.bks = bks\r\n self.wqs = wqs\r\n self.bqs = bqs",
"def __init__(self, likelifun, params, sigma=0.0):\n self.likeli = likelifun\n self.params = sp.array(params)\n self.sigma = sp.array(sigma)\n self.Nparams = len(params)\n self.fixcov = False\n self.Nsamples = 200\n self.weigmin = 0.0\n self.blow = 1.0 #increase the enveloping Gauss\n\n self.tweight = 2.0\n self.maxGaus = 40\n self.mineffsam = self.Nsamples*1\n\n self.effsample = 0.0\n self.weightmax = 0.0\n self.maxlike = 0.0\n\n self.pickleBetween = False\n\n random.seed(100)\n #For plotting purposes\n self.plot = False",
"def __init__(self, trainset, k=3):\n self._trainset = trainset\n self.k = k",
"def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)",
"def __init__(self, samples, analysis):\r\n self.samples = samples\r\n self.analysis = analysis",
"def __init__(self, model, **kwargs):\n\n super().__init__(model)\n\n self._ut = UnscentedTransform(model, **kwargs)",
"def __init__(self, swc=0.1, sor=0.05, kro0=0.9, no=2.0, krw0=0.4, nw=2.0):\n self.kro0 = kro0\n self.krw0 = krw0\n self.no = no\n self.nw = nw\n self.swc = swc\n self.sor = sor",
"def __init__(self,m):\n # initialize model parameters\n \n # w is the m x 1 vector of weights.\n # m: num of features\n self.w = np.random.rand(m)",
"def __init__(self, model_name):\n\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems={}\n self.sentence_lengths={}\n self.endings={}\n self.total = 0",
"def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model",
"def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)",
"def __init__(self, hparams_string=None):\n ################################\n # Experiment Parameters #\n ################################\n self.epochs = 100\n self.precision = 32\n self.cudnn_enabled = True\n self.cudnn_benchmark = False\n self.use_labels = 'intended'\n self.model_version = '0.6.1'\n # v0.6: Introduced the ability to train the model next to the augmented data from GANtron\n # v0.6.1: GANtron data is only part of the training set.\n\n ################################\n # Data Parameters #\n ################################\n self.training_files = ['filelists/vesus_train.txt', 'filelists/cremad_train.txt', 'filelists/ravdess_train.txt']\n self.validation_files = ['filelists/vesus_val.txt', 'filelists/cremad_val.txt', 'filelists/ravdess_val.txt']\n self.test_files = ['filelists/vesus_test.txt', 'filelists/cremad_test.txt', 'filelists/ravdess_test.txt']\n self.n_emotions = 5\n\n ################################\n # Audio Parameters #\n ################################\n self.sampling_rate = 22050\n self.n_ftt = 1024\n self.hop_length = 256\n self.n_mel_channels = 80\n self.mel_offset = 0\n\n ################################\n # Model Parameters #\n ################################\n self.linear_model = True\n self.model_size = 256\n self.n_frames = 80\n\n ################################\n # Optimization Hyperparameters #\n ################################\n self.lr = 0.001\n self.weight_decay = 1e-6\n self.batch_size = 8\n self.max_noise = 5\n\n if hparams_string:\n self.add_params_string(hparams_string)",
"def __init__(self):\n super(SiLU, self).__init__()",
"def __init__(self):\n self.tokenizer = BOWTokenizer(\n English()\n ) # the tokenizer must have a tokenize() and parse() function.\n self.labelEncoder = LabelEncoder()\n self.vectorizer = CountVectorizer(\n tokenizer=self.tokenizer.tokenize, ngram_range=(1, 1)\n )\n self.decode_params = {}",
"def __init__(self, model=None):\n self.model = model\n self.model_f = None\n self.model_h = None\n self.cav = None\n self.sensitivity = None\n self.tcav_score = []\n self.y_labels = None",
"def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)",
"def __init__(self):\n self.bigramCounts = collections.defaultdict(lambda : 0)\n self.trigramCounts = collections.defaultdict(lambda : 0)\n self.unigramCounts = collections.defaultdict(lambda : 1)\n self.continuationCounts = collections.defaultdict(lambda: 0)\n self.followingCounts = collections.defaultdict(lambda: 0)\n self.total = 1\n self.totalBigramCounts = 0\n print \"Training Language Model...\"\n self.train(brown.sents())\n print \"--Training Complete--\"",
"def __init__(self, model_name):\r\n self.name = model_name\r\n self.words = ({})\r\n self.word_lengths = ({})\r\n self.stems = ({})\r\n self.sentence_lengths = ({})\r\n self.punctuation = ({})",
"def __init__(\n self,\n compressed_model: NNCFNetwork,\n weight_quantizers: Dict[WeightQuantizerId, WeightQuantizerInfo],\n non_weight_quantizers: Dict[NonWeightQuantizerId, NonWeightQuantizerInfo],\n ):\n self._compressed_model = compressed_model\n self._weight_quantizers = weight_quantizers\n self._non_weight_quantizers = non_weight_quantizers",
"def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems = {}\n self.sentence_lengths = {}\n self.punctuation = {}",
"def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'"
] | [
"0.6457493",
"0.6087304",
"0.60511214",
"0.5994956",
"0.5893758",
"0.5884114",
"0.5880795",
"0.5876523",
"0.5846621",
"0.58328944",
"0.5819378",
"0.5818917",
"0.5792546",
"0.5777222",
"0.5762234",
"0.57456267",
"0.5744699",
"0.57347846",
"0.5731228",
"0.572777",
"0.5710261",
"0.57030547",
"0.5702452",
"0.5685435",
"0.56818503",
"0.5679245",
"0.5667335",
"0.56666934",
"0.56645775",
"0.5650469"
] | 0.67541903 | 0 |
Add a phone number as a subscriber to the current Topic | def addSubscriber(self, phoneNumber):
if self.topicArn is None:
print 'ERROR: Notification topic not set!'
return
protocol = 'sms'
subscribeResponse = self.snsClient.subscribe(
TopicArn=self.topicArn,
Protocol=protocol,
Endpoint=phoneNumber
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _registerSubscriber(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterSubscribedTopics:\n self.__docWriter.addSub(callerId, topic, topicType)",
"def subscribe(self, subscriber):\n self.subscribers.append(subscriber)",
"def subscribe(self, transport, data):\r\n\r\n self.add(transport, address=data.get('hx_subscribe'))\r\n\r\n self.send(\r\n data.get('hx_subscribe'),\r\n {'message': \"%r is listening\" % transport}\r\n )",
"def phone(self, new_number):\n self._phone.number = new_number",
"def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)",
"def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)",
"def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )",
"def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )",
"def subscribe(self, user_token, topic):\n response = _request('POST',\n url=self.url_v1('/user/subscriptions/' + topic),\n user_agent=self.user_agent,\n user_token=user_token,\n )\n _raise_for_status(response)",
"def subscribe_mqtt(self, topic):\n if topic not in self.subscriptions:\n self.subscriptions.append(topic)\n self.mqtt.subscribe(topic)",
"def cbMqtt_on_subscribe(client, userdata, mid, granted_qos):\n # logger.debug('Subscribed to MQTT topic with message id %d', mid)\n pass",
"def telephone(self, telephone):\n\n self._telephone = telephone",
"def subscribe(self, topic: str, call_back):\n self.callback_dict[topic] = call_back\n self.client.subscribe(topic)",
"def publish_number(self):\n msg = Int64() # creating message\n msg.data = self.number_ # assigning message data\n self.publisher_.publish(msg) # publishing message data",
"def send(self, phone_number):\n #response = self.client.publish(PhoneNumber=phone_number, Message=self.message)\n return True",
"def add_subscriber(self, subscriber):\n # Append the subscriber to the list of subscribers\n if subscriber not in self.subscribers:\n self.subscribers.append(subscriber)",
"def telephone(self, telephone: str):\n\n self._telephone = telephone",
"def sms_phone_number(self, sms_phone_number):\n\n self._sms_phone_number = sms_phone_number",
"def subscribe(self, subject):\n pass",
"async def subscribe(self, topic: str, callback: aiowamp.SubscriptionHandler, *,\n match_policy: aiowamp.MatchPolicy = None,\n node_key: str = None,\n options: aiowamp.WAMPDict = None) -> int:\n ...",
"def AddTopic(self, topic_obj):\n self.topics.append(topic_obj)",
"def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")",
"def subscribe(request):\n address = request.POST.get('address')\n\n new_sub = Subscription(**{\n \"address\": address\n })\n new_sub.save()\n\n return HttpResponse(json.dumps({\n \"status\": \"success\"\n }, default=helpers.json_custom_parser), content_type='application/json')",
"def number(self, new_phone):\n returned_num = self.get_valid_num(new_phone)\n if returned_num is None:\n raise ValueError\n self._phone = returned_num",
"def service_phone_number(self, service_phone_number):\n\n self._service_phone_number = service_phone_number",
"def topic(self, topic):\n self.connection.topic(str(self), topic)",
"def phone_number(self, phone_number):\n\n self._phone_number = phone_number",
"def phone_number(self, phone_number):\n\n self._phone_number = phone_number",
"def phone_number(self, phone_number):\n\n self._phone_number = phone_number",
"def phone_number(self, phone_number):\n\n self._phone_number = phone_number"
] | [
"0.638156",
"0.6316568",
"0.6203704",
"0.6152549",
"0.61257267",
"0.5983778",
"0.59511983",
"0.59360534",
"0.59137255",
"0.58507955",
"0.5845568",
"0.58303136",
"0.5795692",
"0.5792568",
"0.57643414",
"0.5711302",
"0.57093185",
"0.5682131",
"0.5657824",
"0.5638501",
"0.5638212",
"0.5634427",
"0.5601938",
"0.5592495",
"0.5559785",
"0.55436075",
"0.5525535",
"0.5525535",
"0.5525535",
"0.5525535"
] | 0.76084113 | 0 |
Execute http probe and count metrics | async def exec_probes(self, session: aiohttp.ClientSession,
counter: dict):
self._logger.debug('Start exec probe %s', self.url)
regexp_metrics = [RegexpMetrics(pattern) for pattern in self._patterns]
status_code_metrics = StatusCodeMetrics()
time_metrics = TimeMetrics()
timestamp = time()
with ExitStack() as stack:
pattern_metrics = [stack.enter_context(
metric) for metric in regexp_metrics]
body = None
with status_code_metrics as set_status_code:
with time_metrics:
async with session.get(self.url) as response:
set_status_code(response.status)
body = await response.text()
for pattern_metric in pattern_metrics:
pattern_metric(body)
self._logger.debug('Publishing results (latency: %f)',
time_metrics.get_value())
counter[self.url.human_repr()] -= 1
return ModelV1(
timestamp=timestamp,
url=self.url.human_repr(),
latency=time_metrics.get_value(),
code=status_code_metrics.get_value(),
matches=list([Match(
pattern=regexp_metric.pattern.pattern,
count=regexp_metric.get_value(),
) for regexp_metric in regexp_metrics]),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()",
"def get(self):\n log.debug('/x-tree/FSMonitor.html: invoked')\n try:\n log.info('application health check...')\n host_name = socket.gethostname()\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT count(*) FROM \\\"ttd_devices\\\" \"}\n response = requests.request(\"GET\", url, params=querystring)\n D=json.loads(response.text)\n total_recs=str(max(D['results'][0]['series'][0]['values'][0][1:]))\n except:\n result = {}\n log.exception('Exception while doing HealthCheck')\n return Response ('<html><body>THE SERVER IS DOWN</body></html>', mimetype=\"text/html\", status=500)\n return Response('<html><body>INFLUX DB <p/> Count:' + total_recs + '</body></html>', mimetype=\"text/html\")",
"def main():\n print(\"runner\")\n runner = Runner()\n stop_on_idle = True\n probes = []\n for url in urls:\n probe_cls = random.choice((HttpProbe, ThreadProbe, ShellProbe))\n runner.probes.append(probe_cls(url))\n\n runner.run()",
"def test_success_metrics(self):\n @self.graph.route(self.ns.collection_path, Operation.Search, self.ns)\n def foo():\n return \"\"\n\n response = self.client.get(\"api/v1/foo\")\n assert_that(response.status_code, is_(equal_to(200)))\n\n self.graph.metrics.histogram.assert_called_with(\n \"route\",\n ANY,\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n ],\n )\n self.graph.metrics.increment.assert_called_with(\n \"route.call.count\",\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n \"classifier:2xx\",\n ],\n )",
"def periodically_request_websites_metrics() -> int:\n # Making a get request\n configuration_path = os.path.abspath('configuration/service_configuration.json')\n list_of_websites_to_check = read_service_configuration(\n configuration_file=configuration_path, section='list_of_websites_to_check')\n try:\n\n remote_service_url = os.environ.get('websites_checker_service_url','http://192.168.1.101:8080/api/v1/websites_metrics')\n response = requests.post(url=remote_service_url, json=list_of_websites_to_check)\n if response:\n logger.info(f\"The request has been sent to {remote_service_url} with payload: {list_of_websites_to_check}\")\n\n else:\n logger.error(f\"Error contacting the service {remote_service_url}\")\n except Exception as error:\n logger.error(f\"The Exception {error} occurred\")\n return 1",
"def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200",
"def test_all_http_stats(self):\n client = Client()\n response = client.get(reverse('home'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_produce'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_locations'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('search'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('faq'))\n self.assertEqual(200, response.status_code)",
"def test_health(self):\n self.assert_request('get', '/_health')",
"def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()",
"def test_health_get(self):\n pass",
"def probe(self):\n index, perfdat = self.update_performance_data(indexes=INDEXES, perfdata=PERFDATA,\n force_update_perfdata_from_host=True) # Getting Monitoring Data\n perfdat['timestamp'] = time.time()\n\n diff_results = self.check_diff(primary_perfdata=perfdat)\n\n return self.yield_metrics(diff_results['data'])",
"def healthcheck(parameters): \n\n print(\"In healthcheck module\")",
"def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')",
"def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())",
"def health():\n return jsonify(hostname=hostname, uptime=uptime(), \\\n cpu_percent=int(cpu_percent(interval=None, percpu=False)))",
"def run_diagnostics(self):\n request = {\n 'jsonrpc': '2.0',\n 'id': 0,\n 'method': 'ping'\n }\n result = CurlTestBase.send_request('&diag=1', request)\n response = '<html><body><pre>'\n response += cgi.escape(result.content)\n response += '</pre></body></html>'\n self.response.out.write(response)",
"def after_request(response):\n if hasattr(request, \"_prometheus_metrics_request_start_time\"):\n request_latency = max(\n default_timer() - request._prometheus_metrics_request_start_time, 0\n )\n REQUEST_DURATION_HISTOGRAM.labels(\n APP_NAME,\n request.method,\n request.endpoint,\n response.status_code,\n ).observe(request_latency)\n REQUESTS_COUNT.labels(\n APP_NAME,\n request.method,\n request.endpoint,\n response.status_code,\n ).inc()\n return response",
"def status(event):\n e = ''\n\n try:\n logger.setLevel(event.get('loglevel'))\n logging.getLogger('urllib3').setLevel(event.get('loglevel'))\n except:\n pass\n try:\n pool = urllib3.PoolManager()\n except Exception as e:\n raise CreatePoolManagerFailure(e)\n\n if event.get('url', None) is None:\n raise AttributeError(\"url not specified\")\n\n # The code doesn't know how to handle POST\n # The code doesn't know how to handle these yet\n\n st = time.perf_counter()\n try:\n response = pool.request(\n event.get('method', 'GET'),\n event.get('url', None),\n retries=int(event.get('retries', 3)),\n timeout=float(event.get('timeout', 3)))\n except Exception as e:\n raise HttpRequestError(e)\n\n responseTime = (time.perf_counter() - st) * 1000\n\n logger.debug(\"checking endpoint: %s:%s status=%s bytes=%s time=%.3fms\",\n event.get('method', 'GET'),\n event.get('url', None), response.status,\n response._fp_bytes_read, responseTime)\n\n if response.status >= 200 and response.status <= 299:\n statusMessage = \"2xx\"\n elif response.status >= 300 and response.status <= 399:\n statusMessage = \"3xx\"\n elif response.status >= 400 and response.status <= 499:\n statusMessage = \"4xx\"\n elif response.status >= 500 and response.status <= 599:\n statusMessage = \"5xx\"\n endpointStatus = response.status\n\n ts = datetime.datetime.timestamp(datetime.datetime.now())\n \n logging.getLogger('urllib3').setLevel(logging.WARNING)\n \n return {\n 'statusCode': 200,\n 'body': \"OK\",\n 'url': event.get('url', None),\n 'error': e,\n 'timestamp': ts,\n 'endpoint': {\n 'status': endpointStatus,\n 'message': statusMessage,\n 'time': responseTime\n }\n }",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def url_health():\n return \"OK\"",
"def before_request():\n request._prometheus_metrics_request_start_time = time.time()",
"def test_http_speed(self):\n log.msg(\"timing retrival time for %s\"\n %self.http_url)\n def got_response(body):\n self.report['http_response_time'] = (datetime.now() - self.http_request_start_time).total_seconds()\n self.report['http_success'] = True\n log.msg(\"Successful http request\")\n\n self.http_request_start_time = datetime.now()\n return self.doRequest(self.http_url, method=\"GET\", \n body_processor=got_response)",
"def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')",
"def test(request, backend_usages, application, client, setup, expect_ok, expect_not_found):\n\n request.getfixturevalue(setup)\n\n assert len(backend_usages) == 2\n\n analytics = application.threescale_client.analytics\n\n for path in expect_ok:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 200, f\"For path {path} expected status_code 200\"\n\n hits_after = resilient.stats_service_usage(\n application.threescale_client, application[\"service_id\"], \"hits\", \"total\", hits_before+1)\n\n assert hits_before + 1 == hits_after, f\"For path {path} expected hits to be increased by 1\"\n\n for path in expect_not_found:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 404, f\"For path {path} expected status_code 400\"\n\n hits_after = hits(application, analytics)\n assert hits_before == hits_after, f\"For path {path} expected hits to be same before and after\"",
"def run(self, directories, params):\n\n ps = subprocess.Popen(\n \"ab -n 2000 -c 100 %s\" % (params[\"url\"]),\n shell=True,\n stdout=subprocess.PIPE,\n cwd=directories.builtins\n )\n\n out, err = ps.communicate()\n\n open(\n os.path.join(\n directories.artifacts,\n self.out\n ),\"w\"\n ).write(out)\n\n timetaken = re.findall('Time taken for tests:\\s+(\\d+\\.\\d+) seconds', out)[0]\n return {\n \"return_code\": ps.returncode,\n \"raw_output\": out,\n \"metrics\": [\n (\"load_test_seconds\", \"number\", timetaken)\n ]\n }",
"def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()",
"async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])",
"def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)",
"def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True",
"def test_get_virtual_machine_count_metrics1(self):\n pass"
] | [
"0.623642",
"0.5890013",
"0.5889076",
"0.588622",
"0.58364296",
"0.58144206",
"0.5729184",
"0.5712603",
"0.57124424",
"0.56684583",
"0.5641274",
"0.5602359",
"0.5582319",
"0.55592555",
"0.55570114",
"0.55567306",
"0.5549612",
"0.55015385",
"0.54746914",
"0.5471975",
"0.5470048",
"0.54614276",
"0.54493815",
"0.54462326",
"0.54241073",
"0.53972554",
"0.53933525",
"0.5389662",
"0.53876084",
"0.5377056"
] | 0.636973 | 0 |
Method to write the XML file containing the information regarding the stop condition for branching in DET method @ In, filename, string, filename (with absolute path) of the XML file that needs to be printed out @ In, trigger, string, the name of the trigger variable | def writeXmlForDET(filename,trigger,listDict,stopInfo):
# trigger == 'variable trigger'
# Variables == 'variables changed in the branch control logic block'
# associated_pb = 'CDF' in case multibranch needs to be performed
# stopInfo {'end_time': end simulation time (already stopped), 'end_ts': end time step}
root=ET.Element('Branch_info')
root.set("end_time",str(stopInfo['end_time']))
if "end_ts" in stopInfo.keys():
root.set("end_ts",str(stopInfo['end_ts']))
triggerNode=ET.SubElement(root,"Distribution_trigger")
triggerNode.set("name",trigger)
for varInfo in listDict:
var=ET.SubElement(triggerNode,'Variable')
var.text=varInfo['name']
var.set('type',varInfo['type'])
var.set('old_value',str(varInfo['old_value']))
var.set('actual_value',str(varInfo['new_value']))
if 'associated_pb' in varInfo.keys():
var.set('probability',str(varInfo['associated_pb']))
with open(filename,'w') as fileObject:
fileObject.write(minidom.parseString(ET.tostring(root, 'utf-8')).toprettyxml(indent="\t")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False",
"def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)",
"def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))",
"def write_xml(self, xmlfile):\n system.xml.write_file(xmlfile, self.status, 'status')",
"def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn",
"def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()",
"def write(self, filename):\n \n return self.model.write(filename,xml_declaration=True, encoding='utf-8')",
"def writeFile(self, filename):\n s = ET.tostring(self._root)\n\n #Remove all formatting\n s = s.replace('\\n','')\n s = s.replace('\\t','')\n s = s.replace('\\r','')\n\n f = open(filename, 'w')\n f.write(minidom.parseString(s).toprettyxml())\n f.close()",
"def createOutput():\n\n firstPeriod = True\n # get edge No\n edgesNo = 0\n edgesSet = set()\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n edgesSet.add(tup[1])\n edgesNo = len(edgesSet)\n\n outputFile = open(path.FQoutput, 'w')\n outputFile.write('<?xml version=\"1.0\"?>\\n')\n outputFile.write('<paramEffects aggregationInterval=\"%d\" vehicles=\"%d\" edges=\"%d\">\\n' % (\n aggInterval, vehSum, edgesNo))\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets(True):\n if quota is None:\n if not firstPeriod:\n outputFile.write(\"\\t</periods>\\n\")\n else:\n firstPeriod = False\n outputFile.write('\\t<periods period=\"%d\">\\n' % (period))\n else:\n simpleTaxiMeanVList = [0, 1]\n simpleEdgeMeanVList = [0, 1]\n drivenEdgesSet = set()\n\n if len(vtypeDictR) == 0: # if the processed FCD returns no Values\n print(\"noData p\", period, \" q\", quota)\n drivenEdgesSet.add(0)\n else: # create mean from all taxi speed values\n for timestep, taxiList in vtypeDictR.iteritems():\n for tup in taxiList: # all elements in this timestep\n simpleTaxiMeanVList[0] += tup[2]\n simpleTaxiMeanVList[1] += 1\n drivenEdgesSet.add(tup[1])\n # create mean from all edge speed values which are driven by the\n # chosen taxis\n drivenEdgesList = list(drivenEdgesSet)\n drivenEdgesList.sort()\n # print \"dataSets \",simpleTaxiMeanVList[1]\n\n # --edgeDump-- #\n \"\"\"\n for i in edgeDumpDict.keys(): #all intervals\n for edge,v in edgeDumpDict[i]:\n if BinarySearch.isElmInList(drivenEdgesList,edge):\n simpleEdgeMeanVList[0]+=v\n simpleEdgeMeanVList[1]+=1\n \"\"\"\n # --vtype-- #\n\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n if BinarySearch.isElmInList(drivenEdgesList, tup[1]):\n simpleEdgeMeanVList[0] += tup[2]\n simpleEdgeMeanVList[1] += 1\n\n # calc values for output\n detectedEdges = len(drivenEdgesSet)\n relDetectedEdges = detectedEdges * 100.0 / edgesNo\n vSim = simpleEdgeMeanVList[0] / simpleEdgeMeanVList[1]\n vSimFCD = simpleTaxiMeanVList[0] / simpleTaxiMeanVList[1]\n vAbsDiff = vSimFCD - vSim\n if vSim != 0:\n vRelDiff = vAbsDiff / vSim * 100\n else:\n vRelDiff = 100\n if vRelDiff < -40:\n vRelDiff = -35\n\n outputFile.write('\\t\\t<values taxiQuota=\"%f\" taxis=\"%d\" simMeanSpeed=\"%f\" simFcdMeanSpeed=\"%f\" ' % (\n quota, taxiSum, vSim, vSimFCD,))\n outputFile.write('detectedEdges=\"%d\" notDetectedEdges=\"%d\" ' % (\n detectedEdges, edgesNo - detectedEdges))\n outputFile.write('absSpeedDiff=\"%f\" relSpeedDiff=\"%f\" relDetectedEdges=\"%f\" relNotDetectedEdges=\"%f\"/>\\n' %\n (vAbsDiff, vRelDiff, relDetectedEdges, 100 - relDetectedEdges))\n outputFile.write(\"\\t</periods>\\n</paramEffects>\")\n outputFile.close()",
"def write(self, fd):\n indent = \" \"\n in2 = indent + indent\n print >>fd, \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n if self.__topComment is not None:\n print >>fd, \"<!--%s-->\" % self.__topComment\n print >>fd, \"<runConfig>\"\n for d in self.__domCfgList:\n print >>fd, d.xml(indent)\n for n in self.__domCfgNames:\n print >>fd, n.xml(indent)\n if self.__replayBaseDir is not None:\n print >>fd, \"%s<hubFiles baseDir=\\\"%s\\\">\" % \\\n (indent, self.__replayBaseDir)\n for r in self.__replayHubList:\n print >>fd, r.xml(in2)\n print >>fd, \"%s</hubFiles>\" % indent\n print >>fd, \"%s<triggerConfig>%s</triggerConfig>\" % \\\n (indent, self.__trigCfg)\n for c in self.__comps:\n if not c.isHub():\n print >>fd, \"%s<runComponent name=\\\"%s\\\"/>\" % \\\n (indent, c.name())\n\n if self.__strayStream is not None:\n (name, prescale) = self.__strayStream\n in3 = in2 + indent\n\n print >>fd, \"%s<stream name=\\\"%s\\\">\" % (in2, name)\n print >>fd, \"%s<prescale>%d</prescale>\" % (in3, prescale)\n print >>fd, \"%s</stream>\" % in2\n\n if self.__senderOption is not None:\n (hub, fwdIsolatedHits) = self.__senderOption\n fwdName = \"forwardIsolatedHitsToTrigger\"\n if fwdIsolatedHits:\n fwdVal = \"true\"\n else:\n fwdVal = \"false\"\n\n in3 = in2 + indent\n in4 = in3 + indent\n\n print >>fd, \"%s<stringHub hubId=\\\"%d\\\">\" % (in2, hub)\n print >>fd, \"%s<sender>\" % in3\n print >>fd, \"%s<%s>%s</%s>\" % (in4, fwdName, fwdVal, fwdName)\n print >>fd, \"%s</sender>\" % in3\n print >>fd, \"%s</stringHub>\" % in2\n\n print >>fd, \"</runConfig>\"",
"def write_xosc(self, generated_xml):\n reparsed_xml = minidom.parseString(generated_xml).toprettyxml(indent=\" \")\n xosc_file = open(self._filepath, \"w\")\n xosc_file.write(reparsed_xml)\n xosc_file.close()\n\n msg = QMessageBox()\n if self._warning_message:\n msg.setIcon(QMessageBox.Warning)\n text = f\"Exported OpenSCENARIO file {self._filepath} has warnings!\\n\\n\"\n text += \"\\n\".join(self._warning_message)\n else:\n msg.setIcon(QMessageBox.Information)\n text = f\"Successfully exported OpenSCENARIO file to {self._filepath}\"\n msg.setText(text)\n msg.setWindowTitle(\"OpenSCENARIO Export\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec()",
"def Write_XML(gui): \n # lock buttons\n gui.action_lock('Lock', gui.save_button)\n \n # clear output\n gui.output_clear() \n \n # get the desired delay from the gui.\n delay_time = gui.get_delay()\n \n \n # get the desired ascii delay from the gui.\n ascii_time = gui.get_ascii_delay()\n \n \n # get the desired I2C address from the gui.\n addr = \"0x%X\" % gui.get_i2c_address()\n \n \n # get the list of commands from the gui\n command_list = gui.get_command_list()\n \n # wrap up the writing directives\n directives = pySCPI_config.write_directives(command_list, addr,\n delay_time, ascii_time)\n \n # create the xml file\n filename = create_XML(directives, gui)\n \n # update the filename display window to show the filename saved\n gui.update_filename(filename = filename) \n \n # unlock the buttons\n gui.action_lock('Unlock')",
"def writexml(file):\n OUTFILE=open(file,\"w\")\n doc = xml.dom.minidom.Document()\n\n # Create the <dec_reg_list> base element\n decl_reg_list = doc.createElement(\"decl_reg_list\")\n doc.appendChild(decl_reg_list)\n\n regname_old=\"\"\n rows.pop(0)\n for row in rows:\n (regdesc,regname,offset,default,regtype,expose_reg,depth,incsz,bitdesc,bitname,loc,bittype)= row\n if regname != regname_old:\n # Create the register element\n register = doc.createElement(\"register\")\n register.setAttribute(\"name\", regname)\n register.setAttribute(\"offset\", offset)\n if default != \"\" : register.setAttribute(\"default\", default)\n register.setAttribute(\"type\", regtype)\n if expose_reg == \"1\": register.setAttribute(\"usr\", expose_reg)\n if depth != \"\": register.setAttribute(\"size\", depth)\n if incsz != \"\": register.setAttribute(\"incsz\", incsz)\n text = doc.createTextNode(regdesc)\n register.appendChild(text)\n decl_reg_list.appendChild(register)\n \n # Create the field element\n if bitname != \"\":\n field = doc.createElement(\"field\")\n field.setAttribute(\"name\", bitname)\n if loc !=\"\": field.setAttribute(\"loc\", addcolon(loc))\n if bittype != \"\": field.setAttribute(\"type\", bittype)\n if bitdesc != \"\":\n text = doc.createTextNode(bitdesc)\n field.appendChild(text)\n register.appendChild(field)\n regname_old = regname\n\n\n # Print our newly created XML\n #print doc.toprettyxml(indent=\" \")\n #OUTFILE.write(doc.saveXML(decl_reg_list))\n OUTFILE.write(doc.toprettyxml(indent=\" \"))\n OUTFILE.close()",
"def write_evt(evt, ev_id):\n print(\"Writing xml file\")\n print(\"../../refined_events/{:}.xml\".format(ev_id))\n evt.write(\"../../refined_events/{:}.xml\".format(ev_id), format=\"QUAKEML\")",
"def write(self, file_or_filename):\n etMap = revert(self)\n xmlTree = ET.ElementTree(etMap)\n xmlTree.write(file_or_filename)",
"def writeToTempXml(self):\n name = self.fileToProcess.name\n all_tokens = ET.Element(\"tokens\")\n for token in self.tokensTable:\n if token.getType() == KEYWORD:\n keyword = ET.SubElement(all_tokens, \"keyword\")\n keyword.text = ' '+token.getValue()+' '\n elif token.getType() == IDENTIFIER:\n identifier = ET.SubElement(all_tokens, \"identifier\")\n identifier.text = ' '+token.getValue()+' '\n elif token.getType() == SYMBOL:\n symbol = ET.SubElement(all_tokens, \"symbol\")\n symbol.text = ' '+token.getValue()+' '\n elif token.getType() == STRING_CONST:\n stringConstant = ET.SubElement(all_tokens, \"stringConstant\")\n stringConstant.text = ' '+token.getValue()+' '\n elif token.getType() == INT_CONST:\n integerConstant = ET.SubElement(all_tokens, \"integerConstant\")\n integerConstant.text = ' '+token.getValue()+' '\n tree = ET.ElementTree(all_tokens)\n tree.write(name + 'T' + '.xml')",
"def write_tour(graph, tsp_model, filename):\n with open(filename, 'w') as file: # open the textfile\n for decision_variable in tsp_model.getVars(): # for every decision variable in the model\n if decision_variable.getAttr(\"X\"): # if the value is true\n variable_name = decision_variable.getAttr(\"VarName\") # get the variable name\n i, j = (int(num) for num in variable_name.split(\"_\")) # retrieve the node names\n file.write(\" \".join([str(i), str(j), str(graph[i][j])]) + \"\\n\") # store the edge in a new line\n # store the cost of the optimal tour as the final line\n file.write(\"The cost of the best tour is: \" + str(tsp_model.getAttr(\"ObjVal\")) + \"\\n\")",
"def _write_particle_information(\n xml_file, structure, xyz, forcefield, ref_distance, ref_mass, ref_energy\n):\n xml_file.write('<position units=\"sigma\" num=\"{}\">\\n'.format(xyz.shape[0]))\n for pos in xyz:\n xml_file.write(\"{}\\t{}\\t{}\\n\".format(*pos / ref_distance))\n xml_file.write(\"</position>\\n\")\n if forcefield:\n types = [atom.type for atom in structure.atoms]\n else:\n types = [atom.name for atom in structure.atoms]\n\n xml_file.write(\"<type>\\n\")\n for atom_type in types:\n xml_file.write(\"{}\\n\".format(atom_type))\n xml_file.write(\"</type>\\n\")\n\n masses = [atom.mass for atom in structure.atoms]\n xml_file.write(\"<mass>\\n\")\n for mass in masses:\n if mass == 0:\n mass = 1.0\n xml_file.write(\"{}\\n\".format(mass / ref_mass))\n xml_file.write(\"</mass>\\n\")\n\n charges = [atom.charge for atom in structure.atoms]\n xml_file.write(\"<charge>\\n\")\n e0 = 2.396452e-04 # e^2 mol/(kcal A), permittivity of free space\n charge_factor = (4.0 * np.pi * e0 * ref_distance * ref_energy) ** 0.5\n for charge in charges:\n xml_file.write(\"{}\\n\".format(charge / charge_factor))\n xml_file.write(\"</charge>\\n\")\n if forcefield:\n pair_coeffs = list(\n set(\n (atom.type, atom.epsilon, atom.sigma)\n for atom in structure.atoms\n )\n )\n pair_coeffs.sort(key=lambda pair_type: pair_type[0])\n xml_file.write(\"<pair_coeffs>\\n\")\n for param_set in pair_coeffs:\n xml_file.write(\n \"{}\\t{:.4f}\\t{:.4f}\\n\".format(\n param_set[0],\n param_set[1] / ref_energy,\n param_set[2] / ref_distance,\n )\n )\n xml_file.write(\"</pair_coeffs>\\n\")",
"def to_xml(self):\n xml_strings = [' <trigger name=\"%s\">\\n' % self.name]\n xml_strings.append(' <scope>%s</scope>\\n' % self.scope)\n if self.level:\n xml_strings.append(' <level>%s</level>\\n' % self.level)\n xml_strings.append(' <events>%s</events>\\n' % self.events)\n xml_strings.append(' <sql>%s</sql>\\n' % self.sql)\n xml_strings.append(' </trigger>\\n')\n return \"\".join(xml_strings)",
"def write_output_file(self, xml_text, xml_file):\n xml_fo = open(xml_file, 'w')\n xml_fo.write(xml_text+'</xml>')\n xml_fo.close()\n return",
"def writeToXml(imageName, imageSize, imagePath, allCellInfo, outputFile, files):\n\n root = Element('annotation')\n root.set('verified', 'no')\n\n filesUsed = SubElement(root, 'files')\n # folder.text = 'WBC'\n filesUsed.text = str(files)\n filename = SubElement(root, 'filename')\n filename.text = imageName\n path = SubElement(root, 'path')\n path.text = imagePath\n source = SubElement(root, 'source')\n database = SubElement(source, 'database')\n database.text = 'Unknown'\n size = SubElement(root, 'size')\n width = SubElement(size, 'width')\n width.text = str(imageSize[0])\n height = SubElement(size, 'height')\n height.text = str(imageSize[1])\n depth = SubElement(size, 'depth')\n depth.text = '3'\n segmented = SubElement(root, 'segmented')\n segmented.text = \"0\"\n\n for cell in allCellInfo:\n name_str, xmin_str, ymin_str, xmax_str, ymax_str = cell\n objectTag = SubElement(root, 'object')\n name = SubElement(objectTag, 'name')\n name.text = name_str\n pose = SubElement(objectTag, 'pose')\n pose.text = 'Unspecified'\n truncated = SubElement(objectTag, 'truncated')\n truncated.text = '0'\n difficult = SubElement(objectTag, 'difficult')\n difficult.text = '0'\n bndbox = SubElement(objectTag, 'bndbox')\n xmin = SubElement(bndbox, 'xmin')\n xmin.text = xmin_str\n ymin = SubElement(bndbox, 'ymin')\n ymin.text = ymin_str\n xmax = SubElement(bndbox, 'xmax')\n xmax.text = xmax_str\n ymax = SubElement(bndbox, 'ymax')\n ymax.text = ymax_str\n\n tree = ET.ElementTree(root)\n tree.write(outputFile)",
"def write_body(file_ptr, attribute, inverse_relation):\n depth = 0\n # The body will only consist of the main function.\n # Function def\n fwrite(\"def main(argv):\", file_ptr, depth)\n\n # Docstring\n depth += 1\n fwrite(\"\\\"\\\"\\\"Read the data from the provided file then attempt to \"\n \"classify it using\", file_ptr, depth)\n fwrite(\"a 1D binary sorting method. This function uses either height or \"\n \"age and\", file_ptr, depth)\n fwrite(\"uses a simple threshold test for sorting.\", file_ptr, depth, 2)\n\n fwrite(\":param data_file_arg: <str> Name of the CSV file of CDC data to\",\n file_ptr, depth)\n fwrite(\"make predictions for.\", file_ptr, depth)\n fwrite(\":return: None\", file_ptr, depth)\n fwrite(\"\\\"\\\"\\\"\", file_ptr, depth)\n fwrite(\"testing_data = read_data_file(argv[0])\", file_ptr, depth, 2)\n fwrite(\"relevant_data = testing_data[[\\\"{0}\\\"]]\".format(attribute),\n file_ptr, depth)\n fwrite(\"for row in relevant_data.itertuples(index=False):\",\n file_ptr, depth)\n # For loop\n depth += 1\n fwrite(\"if row.{0} > 0:\".format(attribute), file_ptr, depth)\n # Conditional\n if not inverse_relation:\n fwrite(\"print(\\\"1\\\")\", file_ptr, tabs=depth+1)\n fwrite(\"else:\", file_ptr, depth)\n fwrite(\"print(\\\"0\\\")\", file_ptr, tabs=depth+1, newlines=2)\n else:\n fwrite(\"print(\\\"0\\\")\", file_ptr, tabs=depth+1)\n fwrite(\"else:\", file_ptr, depth)\n fwrite(\"print(\\\"1\\\")\", file_ptr, tabs=depth+1, newlines=2)\n # End Conditional\n # End of function\n depth -= 2",
"def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)",
"def create_xml_regression(lfiles, lsbj, foxml):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n e = doc.createElement('subject')\n e.setAttribute('id', 'case')\n\n for i, fn in enumerate(lfiles):\n v = doc.createElement('visit')\n v.setAttribute('id', \"subj{}\".format(i))\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', \"face\")\n t = doc.createTextNode(fn)\n f.appendChild(t)\n\n a = doc.createElement('age')\n x = doc.createTextNode(str(lsbj[i][\"age\"]))\n a.appendChild(x)\n\n\n v.appendChild(f)\n v.appendChild(a)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())",
"def save_xunit(self,filename):\n f = open(filename,'w')\n f.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n f.write('<testsuite name=\"fbtest\" tests=\"%i\" errors=\"%i\" failures=\"%i\" untested=\"%i\" skip=\"%i\">' %\n (len(self.results),self.get_error_count(),self.get_fail_count(),\n self.get_untested_count(),self.get_skipped_count()))\n for result in self.values():\n if result.outcome == Result.PASS:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\" />' % (\n result.id,result.get_elapsed()))\n else:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\">' % (\n result.id,result.get_elapsed()))\n if result.outcome == Result.ERROR:\n if result.has_key(Result.EXCEPTION):\n e = result[Result.EXCEPTION]\n exc = e[:e.find(':')]\n msg = e[e.find(':')+2:]\n exc = exc[exc.find(\"'\")+1:exc.rfind(\"'\")]\n msg = msg.lstrip()\n f.write('<error type=%s message=%s>' % (self._quoteattr(exc),\n self._quoteattr(msg)))\n f.write('</error>')\n else:\n msg = result.get_cause()\n f.write('<error type=\"error\" message=%s>' % (self._quoteattr(msg)))\n f.write('</error>')\n elif result.outcome == Result.FAIL:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if result.has_key(key):\n cdata = as_utf8(result[key])\n f.write('<failure type=\"fail\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('<![CDATA[%s]]>' % escape_cdata(cdata))\n f.write('</failure>')\n elif result.outcome == Result.UNTESTED:\n f.write('<failure type=\"untested\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n elif result.outcome == Result.SKIPPED:\n f.write('<failure type=\"skipped\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n f.write('</testcase>')\n f.write('</testsuite>')\n f.close()",
"def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it",
"def print_xml(tree, file):\n tree.write(file, encoding=\"utf-8\", xml_declaration=True)",
"def make_outputfile(self, solved_status, filename):\n filename = filename.split(\".\")\n filename[0] = filename[0].replace(\"Input\",\"Output\")\n str_filename = \".\"\n str_filename = str_filename.join(filename)\n # print(str_filename)\n\n f = open(str_filename,\"w+\")\n\n if(solved_status):\n string_rep = self.values_to_grid()\n ptr = 0\n for row in range(0,9):\n for col in range(0,9):\n f.write(string_rep[ptr]+ \" \")\n ptr += 1\n f.write(\"\\r\\n\") #windows compatiable formatting...\n else:\n f.write(\"Unable to solve this puzzle.\")\n\n f.close()",
"def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"",
"def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()"
] | [
"0.5683157",
"0.5659544",
"0.55591834",
"0.55065686",
"0.5498814",
"0.5428093",
"0.54104936",
"0.5392612",
"0.53216076",
"0.5316584",
"0.5289246",
"0.52678",
"0.5199608",
"0.5194961",
"0.51714367",
"0.51580346",
"0.5156104",
"0.5138284",
"0.5084493",
"0.50813687",
"0.5064787",
"0.503962",
"0.50330055",
"0.50268286",
"0.5022817",
"0.50033885",
"0.5000793",
"0.49890828",
"0.4972994",
"0.49713236"
] | 0.79320467 | 0 |
Draw circle for face, sized relative to window size | def drawFace(win, winW, winH):
face = Circle(Point(winW/2, winH/2), min(winW, winH)*11/24)
face.setOutline("black")
face.setFill("burlywood")
face.draw(win) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)",
"def draw(self, window):\n radius = SQUARE_SIZE // 2 - PADDING\n if self.stack_size == 2:\n x1, y1 = self.x - SQUARE_SIZE//8, self.y - SQUARE_SIZE//8\n x2, y2 = self.x + SQUARE_SIZE//8, self.y + SQUARE_SIZE//8\n pygame.draw.circle(window, BLACK, (x1, y1), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x1, y1), radius)\n pygame.draw.circle(window, BLACK, (x2, y2), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x2, y2), radius)\n else:\n pygame.draw.circle(window, BLACK, (self.x, self.y), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (self.x, self.y), radius)",
"def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)",
"def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)",
"def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)",
"def draw(self, screen):\n\t\tpygame.draw.circle(screen, self.color, self.pos, self.radius)",
"def circle(self, center, radius, color=(255, 255, 255), width=0):\n center = self._transform(center)\n pygame.draw.circle(self.screen, color, center, radius, width)",
"def drawCenter(self):\n pygame.draw.circle(display, self.color, (self.screenx, self.screeny), 1, 0)",
"def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw(self):\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)),\n self.radius)",
"def draw_circle(color, position, radius, width=0):\n #print('(color={}, position={}, radius={}, width={})')\n pygame.draw.circle(screen, color, position, radius, width)",
"def draw(self, surface):\n color = pygame.Color(255, 255, 255)\n pygame.draw.circle(surface, color, self.position, Molecule.radius, 2)",
"def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)",
"def draw( self ):\n\t\t\t\n\t\ttransposition = lambda point: (point[0] + WINDOW_X, WINDOW_Y - point[1])\n\t\t\t \n\t\tx, y = transposition( self.position.xy )\n\t\tpygame.draw.circle(self.screen, self.color, ( int(x + 0.5), int(y + 0.5) ), self.r)",
"def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)",
"def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)",
"def render(self, screen):\n x,y = self.getBallPos()\n pygame.draw.circle(screen, (255, 255, 255), (x, y), self.RADIUS)",
"def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)",
"def draw_circle(self, color, position, radius, width = 0, anchor= 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, position + offset, radius, width)",
"def draw(self, draw_circle):\n draw_circle(self.color, (int(self.position[0]), int(self.position[1])), self.size)",
"def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)",
"def plot_circle(self):\n if self.lastmouse is not None:\n pygame.gfxdraw.circle(self.screen,\n self.lastmouse[0], self.lastmouse[1],\n int(self.drawsize), (255, 0, 255))",
"def draw_circle(mat, center, radius, color=(0, 0, 255), thickness=1):\n cv2.circle(mat, center, radius, color, thickness=thickness)",
"def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))",
"def draw(self):\n arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)",
"def draw_circle(self, color, position, radius, width=0, anchor='topleft'):\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, (position + offset).floor(),\n radius, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def draw_circle(self, circle, color, thickness=2):\n center = self._format_point(circle.center())\n opencv.circle(self.img, center.tuple(), int(circle.radius()), color.bgra(), thickness=thickness)"
] | [
"0.7119598",
"0.67873436",
"0.67307967",
"0.65992916",
"0.6582572",
"0.65336627",
"0.65306664",
"0.65284276",
"0.6486938",
"0.64669365",
"0.64669365",
"0.6432975",
"0.64328283",
"0.6406504",
"0.6402449",
"0.638859",
"0.63861144",
"0.63558817",
"0.6353007",
"0.63493526",
"0.63474345",
"0.6332529",
"0.6322617",
"0.62872577",
"0.62813514",
"0.62570417",
"0.623778",
"0.6231833",
"0.6222739",
"0.6213484"
] | 0.7234741 | 0 |
Draws eyes for face | def drawEyes(win, winW, winH):
# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))
leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),
Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))
leftEye.setFill("white")
leftEye.setOutline("black")
leftEye.draw(win)
leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)
leftIris.setOutline("black")
leftIris.setFill("darkcyan")
leftIris.draw(win)
leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)
leftPupil.setOutline("black")
leftPupil.setFill("black")
leftPupil.draw(win)
rightEye = leftEye.clone()
rightEye.move(winW/2-winW/10,0)
rightEye.draw(win)
rightIris = leftIris.clone()
rightIris.move(winW/2-winW/10,0)
rightIris.draw(win)
rightPupil = leftPupil.clone()
rightPupil.move(winW/2-winW/10,0)
rightPupil.draw(win) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_eyes(self):\n GREEN = (0, 255, 0)\n for eye in self.eyes:\n if eye:\n cv2.circle(self.eyes_frame, eye, 8, GREEN, 1)",
"def draw(self, context):\n # TODO: Add this color to Add-on option\n color = (1.0, 1.0, 0.5, 1.0)\n alpha = 2.0 * math.atan((18.0 / 2.0) / self.lens.value[0])\n dist = 0.5 / (math.tan(alpha / 2.0))\n if self.height.value[0] == 0:\n width = 0.7\n else:\n width = self.width.value[0] / self.height.value[0]\n \n points = dict()\n points['border'] = [None, None, None, None]\n points['center'] = [None]\n \n # Points of face\n points['right_eye'] = [\n mathutils.Vector((0.25, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((0.3, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((0.3, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((0.25, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((0.25, 0.25, self.distance.value[0] - dist))\n ]\n points['left_eye'] = [\n mathutils.Vector((-0.25, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((-0.25, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((-0.25, 0.25, self.distance.value[0] - dist))\n ]\n \n points['mouth'] = [\n mathutils.Vector((-0.40912365913391113, -0.11777058243751526, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3441678285598755, -0.15873458981513977, self.distance.value[0] - dist)),\n mathutils.Vector((-0.2563667893409729, -0.1998385488986969, self.distance.value[0] - dist)),\n mathutils.Vector((-0.18191590905189514, -0.22385218739509583, self.distance.value[0] - dist)),\n mathutils.Vector((-0.10375960171222687, -0.23957833647727966, self.distance.value[0] - dist)),\n mathutils.Vector((0.0, -0.2464955747127533, self.distance.value[0] - dist)),\n mathutils.Vector((0.10375960171222687, -0.23957833647727966, self.distance.value[0] - dist)),\n mathutils.Vector((0.18191590905189514, -0.22385218739509583, self.distance.value[0] - dist)),\n mathutils.Vector((0.2563667893409729, -0.1998385488986969, self.distance.value[0] - dist)),\n mathutils.Vector((0.3441678285598755, -0.15873458981513977, self.distance.value[0] - dist)),\n mathutils.Vector((0.40912365913391113, -0.11777058243751526, self.distance.value[0] - dist))\n ]\n \n # Put border points of camera to basic position\n points['border'][0] = mathutils.Vector((\n -width / 2.0,\n -0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][1] = mathutils.Vector((\n width / 2.0,\n -0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][2] = mathutils.Vector((\n width / 2.0,\n 0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][3] = mathutils.Vector((\n -width / 2.0,\n 0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n \n # Center of view\n points['center'][0] = mathutils.Vector((\n 0.0,\n 0.0,\n self.distance.value[0],\n 1.0\n ))\n \n # Create transformation (rotation) matrix\n rot_matrix = mathutils.Quaternion(self.rotation.value).to_matrix().to_4x4()\n \n # Transform points in all point groups\n for point_group in points.values():\n for index in range(len(point_group)):\n # Rotate points\n point_group[index] = (rot_matrix * point_group[index]).to_3d()\n # Move points\n point_group[index] += mathutils.Vector(self.location.value)\n\n border = points['border']\n center = points['center']\n\n # Store glColor4f\n col_prev = bgl.Buffer(bgl.GL_FLOAT, [4])\n bgl.glGetFloatv(bgl.GL_COLOR, col_prev)\n\n bgl.glColor4f(color[0], color[1], color[2], color[3])\n\n # Draw username\n coord_2d = location_3d_to_region_2d(\n context.region,\n context.space_data.region_3d,\n center[0])\n\n # When coordinates are not outside window, then draw the name of avatar\n if coord_2d is not None:\n # TODO: add to Add-on options\n font_id, font_size, my_dpi = 0, 12, 72\n blf.size(font_id, font_size, my_dpi)\n blf.position(font_id, coord_2d[0] + 2, coord_2d[1] + 2, 0)\n blf.draw(font_id, str(self.username))\n\n # Get & convert the Perspective Matrix of the current view/region.\n persp_matrix = context.space_data.region_3d.perspective_matrix\n temp_mat = [persp_matrix[j][i] for i in range(4) for j in range(4)]\n persp_buff = bgl.Buffer(bgl.GL_FLOAT, 16, temp_mat)\n \n # Store previous OpenGL settings.\n # Store MatrixMode\n matrix_mode_prev = bgl.Buffer(bgl.GL_INT, [1])\n bgl.glGetIntegerv(bgl.GL_MATRIX_MODE, matrix_mode_prev)\n matrix_mode_prev = matrix_mode_prev[0]\n \n # Store projection matrix\n proj_matrix_prev = bgl.Buffer(bgl.GL_DOUBLE, [16])\n bgl.glGetFloatv(bgl.GL_PROJECTION_MATRIX, proj_matrix_prev)\n \n # Store Line width\n line_width_prev = bgl.Buffer(bgl.GL_FLOAT, [1])\n bgl.glGetFloatv(bgl.GL_LINE_WIDTH, line_width_prev)\n line_width_prev = line_width_prev[0]\n \n # Store GL_BLEND\n blend_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_BLEND, blend_prev)\n blend_prev = blend_prev[0]\n \n # Store GL_DEPTH_TEST\n depth_test_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_DEPTH_TEST, depth_test_prev)\n depth_test_prev = depth_test_prev[0]\n \n # Store GL_LINE_STIPPLE\n line_stipple_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_LINE_STIPPLE, line_stipple_prev)\n line_stipple_prev = line_stipple_prev[0]\n \n # Prepare for 3D drawing\n bgl.glLoadIdentity()\n bgl.glMatrixMode(bgl.GL_PROJECTION)\n bgl.glLoadMatrixf(persp_buff)\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glEnable(bgl.GL_DEPTH_TEST)\n \n # Draw \"Look At\" point\n bgl.glLineWidth(1)\n bgl.glBegin(bgl.GL_LINES)\n bgl.glColor4f(color[0], color[1], color[2], color[3])\n \n bgl.glVertex3f(\n self.location.value[0] + 0.1,\n self.location.value[1],\n self.location.value[2]\n )\n bgl.glVertex3f(\n self.location.value[0] - 0.1,\n self.location.value[1],\n self.location.value[2]\n )\n \n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1] + 0.1,\n self.location.value[2]\n )\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1] - 0.1,\n self.location.value[2]\n )\n \n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2] + 0.1\n )\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2] - 0.1\n )\n \n bgl.glEnd()\n\n # Draw border of camera\n bgl.glBegin(bgl.GL_LINE_STRIP)\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glVertex3f(border[1][0], border[1][1], border[1][2])\n bgl.glVertex3f(border[2][0], border[2][1], border[2][2])\n bgl.glVertex3f(border[3][0], border[3][1], border[3][2])\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glEnd()\n \n # Draw left eye\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['left_eye']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n\n # Draw right eye\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['right_eye']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n \n # Draw mouth\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['mouth']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n \n # Draw dashed lines from center of \"camera\" to border of camera \n bgl.glEnable(bgl.GL_LINE_STIPPLE)\n bgl.glBegin(bgl.GL_LINES)\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[1][0], border[1][1], border[1][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[2][0], border[2][1], border[2][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[3][0], border[3][1], border[3][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glEnd()\n \n # Draw dashed line from Look At point and center of camera\n bgl.glBegin(bgl.GL_LINES)\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2]\n )\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glEnd()\n bgl.glDisable(bgl.GL_LINE_STIPPLE)\n\n # Restore previous OpenGL settings\n bgl.glLoadIdentity()\n bgl.glMatrixMode(matrix_mode_prev)\n bgl.glLoadMatrixf(proj_matrix_prev)\n bgl.glLineWidth(line_width_prev)\n if not blend_prev:\n bgl.glDisable(bgl.GL_BLEND)\n if not line_stipple_prev:\n bgl.glDisable(bgl.GL_LINE_STIPPLE)\n if not depth_test_prev:\n bgl.glDisable(bgl.GL_DEPTH_TEST)\n\n bgl.glColor4f(col_prev[0], col_prev[1], col_prev[2], col_prev[3])",
"def head_with_eyes_and_mouth(eyefunc, mouthfunc):\n print (hair_longer())\n print (eyefunc())\n print (nose_leftwards())\n print (mouthfunc())\n print (chin_curvy())",
"def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")",
"def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")",
"def update_eyes(self, up_down_part, left_right_part):\n if up_down_part and abs(up_down_part) > 5:\n y = up_down_part/abs(up_down_part)\n else:\n y = 0\n if left_right_part and abs(left_right_part) > 5:\n x = left_right_part/abs(left_right_part)\n else:\n x = 0\n self.looking = (x, y)",
"def _analyze(self, face, frame):\n try:\n landmarks = self._predictor(frame, face)\n self.eye_left = Eye(frame, landmarks, 0, self.calibration)\n self.eye_right = Eye(frame, landmarks, 1, self.calibration)\n\n except IndexError:\n self.eye_left = None\n self.eye_right = None",
"def highlight_faces(image, faces, output_filename, terminal_print=True):\n im = Image.open(image)\n draw = ImageDraw.Draw(im)\n\n for (face_ind, face) in enumerate(faces):\n\n # compute emotions\n list_emotion_scores = [face.sorrow_likelihood,\n face.joy_likelihood,\n face.anger_likelihood,\n face.surprise_likelihood]\n\n list_emotions = [\"SORROW\",\n \"JOY\",\n \"ANGER\",\n \"SURPRISE\"]\n\n string_label = generate_string_label(list_emotions, list_emotion_scores)\n\n if terminal_print:\n # print emotions on terminal\n print(\"\\n\")\n print(\"-----------------------\")\n print(\"Face {}\".format(face_ind))\n\n for (crrt_emotion, crrt_score) in zip(list_emotions, list_emotion_scores):\n print(\"{}: {}\".format(crrt_emotion, crrt_score))\n\n print(string_label)\n\n print(\"-----------------------\")\n\n # draw box around face\n box = [(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n # add legend in the face box\n fontsize = 35\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n\n offset = 5\n heigth_text = 40\n length_text = box[1][0] - box[0][0] - 2 * offset\n draw.rectangle(((box[0][0] + offset, box[0][1] + offset), (box[0][0] + length_text + offset, box[0][1] + heigth_text + offset)), fill=\"black\")\n draw.text((box[0][0] + offset, box[0][1] + offset), string_label, font=font, fill=(255, 255, 255, 255))\n\n # highlight significant points\n point_nbr = 0\n half_width_sqare = 2\n\n list_point_coords = []\n\n for point in face.landmarks:\n x = point.position.x\n y = point.position.y\n\n list_point_coords.append((x, y))\n\n draw.rectangle(((x - half_width_sqare, y - half_width_sqare), (x + half_width_sqare, y + half_width_sqare)), fill=\"red\")\n\n # fontsize = 15\n # font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n # draw.text((x, y), str(point_nbr), font=font, fill=(255, 255, 0, 0))\n\n point_nbr += 1\n\n all_lists_points = [\n [10, 11, 9],\n [10, 12, 11],\n [14, 7, 13, 15],\n [7, 6],\n [14, 6, 13, 7, 14],\n [16, 17, 18, 19],\n [21, 22, 23, 24],\n [30, 6],\n ]\n\n for crrt_list_points in all_lists_points:\n draw_line_list_points(draw, crrt_list_points, list_point_coords)\n\n draw_line_list_points(draw, [2, 26, 3], list_point_coords, close=False)\n draw_line_list_points(draw, [4, 27, 5], list_point_coords, close=False)\n draw_line_list_points(draw, [10, 8, 11], list_point_coords, close=False)\n\n im.save(output_filename)",
"def change_eyes(self, look_direction):\n self.image, _ = self.norm_images.get_image()\n self.curr_eye, _ = self.eyes.get_image(key=look_direction)\n self.image.blit(self.curr_eye, (0, 0)) # combine eyes and body",
"def main():\n # background\n background = background_maker()\n\n # face\n face = face_maker()\n\n # eye\n eye_l = eye_maker()\n eye_r = eye_maker()\n\n # mouth\n mouth = mouth_maker()\n mouth_1 = GArc(60, 60, 290, 60)\n mouth_2 = GArc(60, 60, 190, 60)\n\n # nose\n nose = GOval(10, 10)\n nose.filled = True\n\n # ear\n ear_l = ear_maker()\n ear_r = ear_maker()\n ear_ll = ear2_maker()\n ear_rr = ear2_maker()\n\n # body\n body = body_maker()\n body2 = body2_maker()\n body3 = body3_maker()\n\n # label\n label = label_maker('Rilakkuma', 70)\n label2 = label_maker('Min', 10, font='Dialog')\n\n # arm\n arm_l = arm1_maker()\n arm_r = arm2_maker()\n\n # leg\n leg = leg_maker()\n leg2 = leg_maker()\n\n # show my draw\n window.add(background)\n window.add(leg, (window.width - leg.width) / 2 - body.width/3.7, (window.height - leg.height) / 2 + body.height*1.1)\n window.add(leg2, (window.width - leg2.width) / 2 + body.width / 3.7,\n (window.height - leg2.height) / 2 + body.height * 1.1)\n window.add(body, (window.width - body.width) / 2, (window.height - body.height) / 2 + face.height/1.4)\n window.add(body2, (window.width - body2.width) / 2,\n (window.height - body2.height) / 2 + face.height/1.4 + body.height/3.3)\n window.add(body3, (window.width - body3.width) / 2, (window.height - body3.height) / 2 + face.height/1.2)\n window.add(arm_l, (window.width - arm_l.width) / 2 - body.width / 2.9,\n (window.height - arm_l.height) / 2 + face.height / 1.5)\n window.add(arm_r, (window.width - arm_r.width) / 2 + body.width / 2.9,\n (window.height - arm_r.height) / 2 + face.height / 1.5)\n window.add(label, (window.width-label.width)/2, window.height/4)\n window.add(ear_l, (window.width - ear_l.width) / 2 - face.width / 2.25,\n (window.height - ear_l.height) / 2 - face.height / 3)\n window.add(ear_ll, (window.width - ear_ll.width) / 2 - face.width / 2.25,\n (window.height - ear_ll.height) / 2 - face.height / 3.5)\n window.add(ear_r, (window.width - ear_r.width) / 2 + face.width / 2.25,\n (window.height - ear_r.height) / 2 - face.height / 3)\n window.add(ear_rr, (window.width - ear_rr.width) / 2 + face.width / 2.25,\n (window.height - ear_rr.height) / 2 - face.height / 3.5)\n window.add(face, (window.width - face.width) / 2, (window.height - face.height) / 2)\n window.add(eye_l, (window.width - eye_l.width) / 2 - face.width / 5, (window.height - eye_l.height) / 2)\n window.add(eye_r, (window.width - eye_r.width) / 2 + face.width / 5, (window.height - eye_r.height) / 2)\n window.add(mouth, (window.width - mouth.width) / 2, (window.height - mouth.height) / 2 + face.height / 8)\n window.add(nose, (window.width - nose.width) / 2, (window.height - nose.height) / 2 + face.height / 12)\n window.add(mouth_1, (window.width - mouth_1.width) / 2 - face.width / 20,\n (window.height - mouth_1.height) / 2 + face.height / 11)\n window.add(mouth_2, (window.width - mouth_2.width) / 2 + face.width / 20,\n (window.height - mouth_2.height) / 2 + face.height / 11)\n window.add(label2, window.width-label2.width, window.height)\n\n # kuma2\n kuma2_color = '0xFFEEDD'\n face2 = face_maker(140, color=kuma2_color)\n\n eye2_l = eye_maker(size=15)\n eye2_r = eye_maker(size=15)\n\n mouth2 = mouth_maker(size=40)\n mouth2_1 = GArc(60, 60, 290, 60)\n mouth2_2 = GArc(60, 60, 190, 60)\n\n nose2 = GOval(8, 8)\n nose2.filled = True\n\n ear2_l = ear_maker(size=50, color=kuma2_color)\n ear2_r = ear_maker(size=50, color=kuma2_color)\n ear2_ll = ear2_maker(size=30, color='0xFFC1E0')\n ear2_rr = ear2_maker(size=30, color='0xFFC1E0')\n\n body_2 = body_maker(size=100, color=kuma2_color)\n body2_2 = body2_maker(size=85, color=kuma2_color)\n body3_2 = body3_maker(size=60)\n\n arm2_l = arm1_maker(size=40, color=kuma2_color)\n arm2_r = arm2_maker(size=40, color=kuma2_color)\n\n leg_2 = leg_maker(size=25, color=kuma2_color)\n leg2_2 = leg_maker(size=25, color=kuma2_color)\n\n buttons = GOval(15, 15)\n buttons.filled = True\n buttons.fill_color = 'red'\n\n window.add(leg_2, (window.width - leg_2.width) / 2 - face.width / 1.05 - body_2.width/3.3,\n (window.height - leg_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(leg2_2, (window.width - leg2_2.width) / 2 - face.width / 1.05 + body_2.width/3.3,\n (window.height - leg2_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(body_2, (window.width - body_2.width) / 2 - face.width/1.05,\n (window.height - body_2.height) / 2 + face.height / 1.4)\n window.add(body2_2, (window.width - body2_2.width) / 2 - face.width/1.05,\n (window.height - body2_2.height) / 2 + face.height / 1.4 + body_2.height / 3.3)\n window.add(body3_2, (window.width - body3_2.width) / 2 - face.width/1.05,\n (window.height - body3_2.height) / 2 + face.height / 1.2)\n window.add(arm2_l, (window.width - arm2_l.width) / 2 - face.width / 1.05 - body_2.width/2.9,\n (window.height - arm2_l.height) / 2 + face2.height / 1.06)\n window.add(arm2_r, (window.width - arm2_r.width) / 2 - face.width / 1.05 + body_2.width/2.9,\n (window.height - arm2_r.height) / 2 + face2.height / 1.06)\n window.add(ear2_l, (window.width - ear2_l.width) / 2 - face.width / 0.8,\n (window.height - ear2_l.height) / 2 - face2.height / 9)\n window.add(ear2_ll, (window.width - ear2_ll.width) / 2 - face.width / 0.8,\n (window.height - ear2_ll.height) / 2 - face2.height / 15)\n window.add(ear2_r, (window.width - ear2_r.width) / 2 - face.width / 1.5,\n (window.height - ear2_r.height) / 2 - face2.height / 9)\n window.add(ear2_rr, (window.width - ear2_rr.width) / 2 - face.width / 1.52,\n (window.height - ear2_rr.height) / 2 - face2.height / 15)\n window.add(face2, (window.width-face2.width)/2 - face.width/1.05, (window.height-face2.height)/2 + face2.height/4)\n window.add(eye2_l, (window.width - eye2_l.width) / 2 - face.width / 0.9,\n (window.height - eye2_l.height) / 2 + face2.height/4)\n window.add(eye2_r, (window.width - eye2_r.width) / 2 - face.width / 1.25,\n (window.height - eye2_r.height) / 2 + face2.height/4)\n window.add(mouth2, (window.width - mouth2.width) / 2 - face.width/1.05,\n (window.height - mouth2.height) / 2 + face2.height / 2.4)\n window.add(nose2, (window.width - nose2.width) / 2 - face.width/1.05,\n (window.height - nose2.height) / 2 + face2.height / 2.5)\n window.add(mouth2_1, (window.width - mouth2_1.width) / 2 - face.width / 1,\n (window.height - mouth2_1.height) / 2 + face2.height / 2.5)\n window.add(mouth2_2, (window.width - mouth2_2.width) / 2 - face.width / 1.1,\n (window.height - mouth2_2.height) / 2 + face2.height / 2.5)\n window.add(buttons, (window.width-buttons.width)/2 - face.width/1.05,\n (window.height-buttons.height)/2 + face.height/1.62)",
"def drawFace():\r\n\tglPushMatrix()\r\n\tglTranslatef(-0.5,-0.5,0)\r\n\tglBegin(GL_LINE_LOOP)\r\n\t\r\n\tglVertex3f(0,VALUE,0)\r\n\tglVertex3f(VALUE,0,0)\r\n\t\r\n\tglVertex3f(LENGTH-VALUE,0,0)\r\n\tglVertex3f(LENGTH,VALUE,0)\r\n\t\r\n\tglVertex3f(LENGTH,LENGTH-VALUE,0)\r\n\tglVertex3f(LENGTH-VALUE,LENGTH,0)\r\n\t\r\n\tglVertex3f(VALUE,LENGTH,0)\r\n\tglVertex3f(0,LENGTH-VALUE,0)\r\n\t\r\n\tglEnd()\r\n\tglPopMatrix()",
"def draw(self):\r\n #method provided https://content.byui.edu/file/856c5360-ff89-4409-a7ae-bca07f06f19c/1/week06/skeet.html\r\n arcade.draw_circle_outline(self.center.x, self.center.y, self.radius, TARGET_COLOR)\r\n text_x = self.center.x - (self.radius / 2)\r\n text_y = self.center.y - (self.radius / 2)\r\n arcade.draw_text(repr(self.life), text_x, text_y, TARGET_COLOR, font_size=20)",
"def draw():",
"def draw_self(self, x, y):\n noStroke()\n fill(1.0, 0.5, 0.6)\n ellipse(x, y, 100, 100)\n bottom_half = createShape()\n bottom_half.beginShape()\n bottom_half.vertex(x, y)\n bottom_half.vertex(x+100, y)\n bottom_half.vertex(x+100, y+50)\n bottom_half.vertex(x+50, y+25)\n bottom_half.vertex(x, y+50)\n bottom_half.endShape()\n shape(bottom_half, -50, 0)\n\n self.eyes.display(x, y - 15, self.looking)",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing",
"def drawFace(win, winW, winH):\n face = Circle(Point(winW/2, winH/2), min(winW, winH)*11/24)\n face.setOutline(\"black\")\n face.setFill(\"burlywood\")\n face.draw(win)",
"def draw(img, p):\n\t\t\n\t########### First of all we gotta define which point connects together, there are points for the head(5), the shoulders, elbows, hands, top of thighs, knees, feet\n\t########### top of thighs, knees and feet. We also gonna add 2 extra points for the neck and the pelvis, to make everythonh look better.\n\t########### We also provide the color of each line here\n\tconnexions = [\t\n\t\t\t\t\t(5, 7, 'navy'),\t\t# shoulder => elbow\n\t\t\t\t\t(7, 9, 'navy'),\t\t# elbow => hand\n\t\t\t\t\t(6, 8, 'navy'),\t\t# same on the other side\n\t\t\t\t\t(8, 10, 'navy'),\n\t\t\t\t\t(11, 13, 'lime'),\t# thigh => knee\n\t\t\t\t\t(13, 15, 'lime'),\t# knee => foot\n\t\t\t\t\t(12, 14, 'lime'),\t# same on the other side\n\t\t\t\t\t(14, 16, 'lime'),\n\n\t\t\t\t\t###### With The Extra points :\n\n\t\t\t\t\t(0, 17, 'aqua'),\t# head => neck\n\t\t\t\t\t(17, 5, 'aqua'),\t# neck => shoulders\n\t\t\t\t\t(17, 6, 'aqua'),\n\t\t\t\t\t(17, 18, 'teal'),\t# neck => pelvis\n\t\t\t\t\t(18, 11, 'teal'),\t# pelvis => thighs\n\t\t\t\t\t(18, 12, 'teal')\n\t\t\t\t\t]\n\n\t###### now let's find out how many objects were detected \n\t\n\tl = len(p[0][\"scores\"])\n\n\t##### time to draw now, we'll only select objects with a score over .9\n\n\td = idw.Draw(img)\n\n\tfor k in range(l):\n\n\t\tif p[0][\"scores\"][k] > 0.98:\n\n\t\t\t##### Let's add the neck and pelvis:\n\t\t\tneck = (p[0][\"keypoints\"][k][5] + p[0][\"keypoints\"][k][6])/2\n\t\t\tpelv = (p[0][\"keypoints\"][k][11] + p[0][\"keypoints\"][k][12])/2\n\n\t\t\t#### it's getting tricky here\n\n\t\t\tnepe = t.zeros((2, 3))\n\t\t\tnepe[0] = neck ; nepe[1] = pelv \n\n\t\t\t### Now let's put everything into a single tensor\n\t\t\tbody = t.cat((p[0][\"keypoints\"][k], nepe))\n\n\t\t\t#### We can start drawing now, for real\n\n\t\t\tfor tp in connexions:\n\n\t\t\t\tp0 = (int(body[tp[0], 0]), int(body[tp[0], 1]))\n\t\t\t\tp1 = (int(body[tp[1], 0]), int(body[tp[1], 1]))\n\t\t\t\td.line([p0, p1], fill=tp[2], width=2)\n\n\t\t\t#### Now the points\n\n\t\t\tfor ts in t.cat((body[0:1], body[5:])):\n\t\t\t\td.ellipse((int(ts[0]-2), int(ts[1]-2), int(ts[0]+2), int(ts[1]+2)), 'fuchsia')\n\n\t### and finally\n\t#plt.imshow(np.asarray(img)) Not Like That\n\timg.show()",
"def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])",
"def head_hearteyes():\n print (hair_longer())\n print (eye_heart())\n print (nose_rightwards())\n print (mouth_smile())\n print (chin_curvy())",
"def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n for brick in self.model.bricks:\n pygame.draw.rect(self.screen, brick.color, pygame.Rect(brick.x,brick.y,brick.width,brick.height))\n pygame.draw.rect(self.screen, pygame.Color(255,255,255), pygame.Rect(self.model.paddle.x,self.model.paddle.y,self.model.paddle.width,self.model.paddle.height))\n pygame.draw.ellipse(self.screen, pygame.Color(128,128,128),(self.model.ball.x-self.model.ball.r, self.model.ball.y-self.model.ball.r, 2*self.model.ball.r,2*self.model.ball.r))\n pygame.display.update()",
"def draw_foe_mines(self):\n self.foe_top.draw(self.foe_top_rect.topleft)\n self.foe_middle.draw(self.foe_middle_rect.topleft)\n self.foe_midbot.draw(self.foe_midbot_rect.topleft)\n self.foe_bottom.draw(self.foe_bottom_rect.topleft)",
"def draw_pose(self, image):\r\n\t\tif self.nose:\r\n\t\t\tcv.circle(image, (self.nose[0], self.nose[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.neck:\r\n\t\t\tcv.circle(image, (self.neck[0], self.neck[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_shoulder:\r\n\t\t\tcv.circle(image, (self.r_shoulder[0], self.r_shoulder[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_elbow:\r\n\t\t\tcv.circle(image, (self.r_elbow[0], self.r_elbow[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_wrist:\r\n\t\t\tcv.circle(image, (self.r_wrist[0], self.r_wrist[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_hip:\r\n\t\t\tcv.circle(image, (self.r_hip[0], self.r_hip[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_knee:\r\n\t\t\tcv.circle(image, (self.r_knee[0], self.r_knee[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_eye:\r\n\t\t\tcv.circle(image, (self.r_eye[0], self.r_eye[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_ear:\r\n\t\t\tcv.circle(image, (self.r_ear[0], self.r_ear[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_shoulder:\r\n\t\t\tcv.circle(image, (self.l_shoulder[0], self.l_shoulder[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_elbow:\r\n\t\t\tcv.circle(image, (self.l_elbow[0], self.l_elbow[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_wrist:\r\n\t\t\tcv.circle(image, (self.l_wrist[0], self.l_wrist[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_hip:\r\n\t\t\tcv.circle(image, (self.l_hip[0], self.l_hip[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_knee:\r\n\t\t\tcv.circle(image, (self.l_knee[0], self.l_knee[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_eye:\r\n\t\t\tcv.circle(image, (self.l_eye[0], self.l_eye[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_ear:\r\n\t\t\tcv.circle(image, (self.l_ear[0], self.l_ear[1]), 5, [0, 0, 255], -1, cv.LINE_AA)",
"def add_eye(self):\n self.scenes[self.current_scene].add_object(Eye())\n self.redraw()",
"def draw(self):\r\n #if the UFO has only 1 life left, turn it red\r\n if(self.life <= 1):\r\n TARGET_UFO_COLOR = arcade.color.RED\r\n #If UFO has more than 1 life left, keep it silver\r\n else:\r\n TARGET_UFO_COLOR = arcade.color.SILVER\r\n arcade.draw_circle_outline(self.center.x, self.center.y, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 3)\r\n arcade.draw_ellipse_filled(self.center.x, self.center.y, TARGET_UFO_WIDTH, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 15)",
"def paintHair(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"hair_\"+self.avatarConfiguration[\"hairStyle\"], self.avatarConfiguration[\"hairColor\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"hair\")",
"def graphics(env, fovea, objects, unit):\n plt.clf()\n\n env = environment.redraw(env, unit, objects)\n fovea_im = fovea.get_focus_image(env)\n\n plt.subplot(121)\n plt.title('Training environment')\n plt.xlim(0, unit)\n plt.ylim(0, unit)\n plt.imshow(env)\n\n # PLOT DESK EDGES\n plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit],\n [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-'\n )\n\n # PLOT FOVEA EDGES\n fov_indices = fovea.get_index_values()\n plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1],\n fov_indices[0][1], fov_indices[0][0]],\n [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1],\n fov_indices[1][0], fov_indices[1][0]], 'w-'\n )\n\n plt.subplot(122)\n plt.title('Focus image')\n plt.imshow(fovea_im)\n\n plt.draw()\n plt.pause(0.01)",
"def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150) # draws the stem",
"def draw(self, screen):",
"def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for x in range(3):\n draw_flower_advanced()"
] | [
"0.8009308",
"0.6916443",
"0.63930106",
"0.6312382",
"0.6312382",
"0.6248364",
"0.62259424",
"0.61367136",
"0.61108315",
"0.6107865",
"0.60468125",
"0.60296154",
"0.6011443",
"0.59987676",
"0.59953946",
"0.5938486",
"0.58893174",
"0.5887674",
"0.58734787",
"0.5868707",
"0.5858498",
"0.5846393",
"0.5822241",
"0.57960147",
"0.5788597",
"0.5676866",
"0.5659516",
"0.56542337",
"0.5652197",
"0.5646316"
] | 0.7662611 | 1 |
Draws arc for mouth | def drawMouth(win, winW, winH):
drawArc(win, winW/2, winH/2, winH/4, 60, 1.5) # draw mouth | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_arc(self, center_x, center_y, radius, thickness, start_angle, end_angle, edge_shine=False):\n\n if end_angle >= start_angle:\n pass\n else:\n start_angle, end_angle = end_angle, start_angle\n\n rad = radius\n while rad <= radius + thickness:\n angle = start_angle\n while angle <= end_angle:\n x = center_x + rad * cos(radians(angle))\n y = center_y - rad * sin(radians(angle))\n if self.image_width >= x >= 0 and self.image_height >= y >= 0: # for the frames' limit protection.\n distance = int(sqrt((center_x - x) ** 2 + (center_y - y) ** 2))\n x = int(x)\n y = int(y)\n if radius <= distance <= radius + thickness:\n [b, g, r] = self.image[y, x] = numpy.array(self.image[y, x]) * numpy.array([0, 0, 1.1])\n\n # Following lines are for increase the visibility when the \"mark\" comes on the dark areas.\n if r <= 100:\n if r == 0:\n r = 1\n self.image[y, x] = [0, 0, 1]\n redness_rate = (255 / r) / 0.12\n self.image[y, x] = numpy.array(self.image[y, x]) * numpy.array([0, 0, redness_rate])\n\n if edge_shine:\n for thick in range(60, 100, 4):\n if radius + thickness * thick / 100 <= distance <= radius + thickness:\n # [b, g, r] = self.image[y, x]\n self.image[y, x] = numpy.array(self.image[y, x]) + numpy.array([thick * 0.06, thick * 0.06, 255])\n angle += 0.25\n rad += 1",
"def draw_self(self, x, y):\n noStroke()\n fill(1.0, 1.0, 0.0)\n\n arc(x, y, self.CHAR_WIDTH, self.CHAR_HEIGHT,\n radians(self.rot_begin + self.mouth_angle),\n radians(self.rot_end - self.mouth_angle))",
"def draw_arc_outline(center_x, center_y, width, height, color, start_angle,\n end_angle, border_width=1, tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_STRIP)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()",
"def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)",
"def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)",
"def draw_arc(self, color, position, size, start_angle, end_angle, border_width = 0, anchor = 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor, size)\n pygame.draw.arc(self._surf, color, (position + offset, size), start_angle, end_angle, border_width)",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def draw_housing():\r\n green.pensize(3)\r\n green.color(\"black\", \"darkgrey\")\r\n green.begin_fill()\r\n green.forward(80)\r\n green.left(90)\r\n green.forward(200)\r\n green.circle(40, 180)\r\n green.forward(200)\r\n green.left(90)\r\n green.end_fill()",
"def DrawArc(*args, **kwargs):\n return _gdi_.PseudoDC_DrawArc(*args, **kwargs)",
"def DrawArc(*args, **kwargs):\n return _gdi_.DC_DrawArc(*args, **kwargs)",
"def arc(r, mv_direction):\n \n vert_amount = 80\n arc_vert_amount = int(vert_amount / 2);\n edge = 2 * r * math.sin(math.radians(360 / (2 * vert_amount)))\n polygon_angle = (vert_amount - 2) / vert_amount * 180\n angle = 180 - polygon_angle\n \n for i in range(arc_vert_amount):\n if i == 0: \n rotate_turtle(polygon_angle / 2, not mv_direction)\n else:\n rotate_turtle(angle, mv_direction)\n turtle.forward(edge)",
"def draw_arc_filled(center_x, center_y,\n width, height,\n color,\n start_angle, end_angle,\n tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_TRIANGLE_FAN)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n GL.glVertex3f(0, 0, 0.5)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()",
"def _draw_arc(c, a, theta, **kwargs):\n s = np.arange(0, abs(theta), 0.01)\n s = np.sign(theta) * s\n d = a - c\n r = np.linalg.norm(d)\n alpha = DubinsUAV2D._angle(d)\n w = np.empty((len(s), 2))\n for i, t in enumerate(s):\n w[i] = c + r * np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]) @ np.array(\n [np.cos(t), np.sin(t)])\n plt.plot(w[:, 0], w[:, 1], **kwargs)",
"def draw_arc(ax, center, height, width, theta1=0, theta2=numpy.pi, color='k', direction='down',\n num_points=DEFAULT_ARC_POINTS):\n LEFT_END_THETA = numpy.pi / 2\n RIGHT_END_THETA = numpy.pi * 1.5\n MIDPOINT_THETA = numpy.pi\n\n vertical_baseline = center[1]\n\n assert LEFT_END_THETA <= theta1 <= theta2 <= RIGHT_END_THETA\n\n b = height\n a = width / 2\n\n # determine how to allocate points\n left_angle_span = min(max(MIDPOINT_THETA - theta1, 0), theta2 - theta1)\n right_angle_span = min(max(theta2 - MIDPOINT_THETA, 0), theta2 - theta1)\n total_angle_span = left_angle_span + right_angle_span\n left_points = int(num_points * left_angle_span / total_angle_span)\n right_points = num_points - left_points\n\n x_coords = numpy.empty(num_points)\n y_coords = numpy.empty(num_points)\n\n if left_points:\n # plot upper left quadrant\n left_theta2 = theta1 + left_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=theta1, theta2=left_theta2,\n num_points=left_points)\n x_coords[:left_points] = x[:]\n y_coords[:left_points] = y[:]\n if right_points:\n # plot upper right quadrant\n right_theta1 = theta2 - right_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=right_theta1, theta2=theta2,\n num_points=right_points)\n x_coords[left_points:] = x[:]\n y_coords[left_points:] = y[:]\n\n if direction == 'down':\n y_coords = - y_coords\n\n y_coords += vertical_baseline\n\n ax.plot(x_coords, y_coords, color=color)",
"def rotate_arcs(self):\n\n if self.arc_direction:\n self.thick_arc_start_angle -= 5\n self.thick_arc_end_angle -= 5\n\n self.thin_arc_start_angle += 5\n self.thin_arc_end_angle += 5\n else:\n self.thick_arc_start_angle += 5\n self.thick_arc_end_angle += 5\n\n self.thin_arc_start_angle -= 5\n self.thin_arc_end_angle -= 5",
"def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):\n\n for x in range(self.__xmax):\n for y in range(self.__ymax):\n # Make the halfmoon at x,y\n canvas.create_arc(\n radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,\n start = self.__data[(x,y)] * self.i2d, extent = 180.0,\n fill=\"Black\")",
"def draw(self, DISPLAYSURF, frame:int):\r\n\r\n #Pac-Man gets drawn\r\n pg.draw.circle(DISPLAYSURF, Colors.colors['YELLOW'], (self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2), self.radius)\r\n\r\n\r\n #The rest of the function has to do with Pacmans mouth animation\r\n # The offsets are there so Pac-Man might look in the correct direction while moving\r\n x_offset = 0\r\n y_offset = 0\r\n x_offset2 = 0\r\n y_offset2 = 0\r\n\r\n frame %= 20 #< Frame is made %20 so Pac-Man opens and closes his mouth 3 times per second\r\n\r\n if self.direction == 'u':\r\n y_offset = -1\r\n x_offset2 = 1\r\n elif self.direction == 'd':\r\n y_offset = 1\r\n x_offset2 = 1\r\n elif self.direction == 'r':\r\n x_offset = 1\r\n y_offset2 = 1\r\n elif self.direction == 'l':\r\n x_offset = -1\r\n y_offset2 = 1\r\n\r\n pacman_center = (self.pos[0] + self.grid_size//2, self.pos[1] + self.grid_size // 2)\r\n\r\n # When Pac-Man moves, his mouth will open and close depending on the current frame number \r\n if self.state == 'm' and (frame >= 5 and frame < 20): \r\n length_open_mouth = 0 #< This variable sets the width of Pacmans mouth\r\n\r\n if (frame >= 5 and frame < 10) or (frame >= 15 and frame < 20):\r\n length_open_mouth = self.grid_size - 10\r\n\r\n elif frame >= 10 and frame < 15:\r\n length_open_mouth = self.grid_size\r\n\r\n # Pacmans mouth gets drawn\r\n triangle_p1 = (pacman_center[0] + x_offset * self.radius, pacman_center[1] + y_offset * self.radius)\r\n triangle_p2 = triangle_p1[0] + x_offset2 * length_open_mouth, triangle_p1[1] + y_offset2 * length_open_mouth\r\n triangle_p3 = triangle_p1[0] - x_offset2 * length_open_mouth, triangle_p1[1] - y_offset2 * length_open_mouth\r\n pg.draw.polygon(DISPLAYSURF, Colors.colors['BLACK'], (pacman_center, triangle_p2, triangle_p3))",
"def draw_car(self):\n a = self.h / 50\n ellipse(screen, BLACK, (self.x - 15 * a, self.y + 35 * a, 30 * a, 10 * a))\n rect(screen, LIGHT_BLUE, (self.x, self.y, self.dir * 260 * a, self.h))\n rect(screen, LIGHT_BLUE, (self.x + self.dir * 40 * a, self.y - 40 * a, self.dir * 130 * a, 40 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 50 * a, self.y - 30 * a, self.dir * 45 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 120 * a, self.y - 30 * a, self.dir * 48 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 248 * a, self.y + 2 * a, self.dir * 10 * a, 10 * a))\n circle(screen, BLACK, (self.x + self.dir * int(220 * a), self.y + int(50 * a)), int(25 * a))\n circle(screen, BLACK, (self.x + self.dir * int(50 * a), self.y + int(50 * a)), int(25 * a))",
"def draw_shape_arc(self, arc, xform, colour):\n x, y, r = arc.x, arc.y, arc.radius\n # if the arc segment were extended to draw a full circle, box would\n # enclose that circle\n minpt, maxpt = [xform.chain(Point(px, py)) for (px, py)\n in [(x - r, y - r), (x + r, y + r)]]\n xs, ys = [minpt.x, maxpt.x], [minpt.y, maxpt.y]\n box = (min(xs), min(ys), max(xs), max(ys))\n\n center = xform.chain(Point(x, y))\n\n def pt_to_deg(pt):\n # given a point, give the angle w.r.t. to the xform'd center of the\n # arc (ie. where it will be when drawn)\n # 3 o'clock is angle of 0, angles increase clockwise\n opp, adj = pt.y - center.y, pt.x - center.x\n if adj == 0:\n if opp > 0:\n return 90\n return 270\n angle = 180 * atan(opp / float(adj)) / pi\n if pt.x < center.x:\n angle += 180\n return int(angle % 360)\n\n # create a point in the middle of the arc (used to detect that the xform\n # has flipped the arc around. In that case, drawing from start_angle to\n # end_angle will go in the wrong direction, and draw out exactly the\n # wrong part of the circle)\n mid_ang = (arc.start_angle + arc.end_angle) / 2\n if arc.start_angle > arc.end_angle:\n mid_ang = (mid_ang - 1) % 2\n mid_pt = xform.chain(Point(cos((2 - mid_ang) * pi) * arc.radius + x,\n sin((2 - mid_ang) * pi) * arc.radius + y))\n\n start, end = [xform.chain(pt) for pt in arc.ends()]\n if pt_to_deg(start) < pt_to_deg(end):\n if not (pt_to_deg(start) < pt_to_deg(mid_pt) < pt_to_deg(end)):\n # swap start and end so that the arc traces through the\n # transformed midpoint\n start, end = end, start\n elif (pt_to_deg(end) < pt_to_deg(mid_pt) < pt_to_deg(start)):\n # swap start and end so that the arc traces through the\n # transformed midpoint\n start, end = end, start\n\n # by using the arc.ends() points, any rotation in xform gets handled\n # properly.\n self.canvas.arc(box, pt_to_deg(start), pt_to_deg(end), fill=colour)",
"def _arc(i, j, width=1, linestyle='-', color='black'):\n\treturn Arc(((i+j)/2., 0), abs(i-j), abs(i-j), 0, 0, 180, linewidth=width, \n\t\tedgecolor=color, fill=False, linestyle=linestyle)",
"def _draw_arc(file, start_x, end_x, y, height_in_units, deprel, css_class):\n height = height_in_units * _ARC_HEIGHT_UNIT\n radius = _arc_radius(height_in_units)\n length = _arc_min_length(height_in_units)\n\n # Start.\n file.write(u' <g class=\"%s\">\\n' % css_class)\n\n # Path.\n path = (\n 'M %.2f %.2f'\n 'A %.2f %.2f 0 0 1 %.2f %.2f'\n 'L %.2f %.2f'\n 'A %.2f %.2f 0 0 1 %.2f %.2f'\n ) % (\n min(start_x, end_x), y,\n radius, radius, min(start_x, end_x) + length / 2, y - height,\n max(start_x, end_x) - length / 2, y - height,\n radius, radius, max(start_x, end_x), y\n )\n file.write(u' <path d=\"%s\" class=\"arc\" />\\n' % path)\n file.write(u' <path d=\"%s\" class=\"arc hid\" />\\n' % path)\n\n # Arrow.\n arrow_angle = _ANGLE if start_x > end_x else math.pi - _ANGLE\n _draw_arrow(file, end_x, y, arrow_angle)\n\n # Role.\n deprel = cgi.escape(deprel)\n file.write(u' <text x=\"%i\" y=\"%i\" class=\"role\">%s</text>\\n' %\n ((start_x + end_x) / 2, y - height - 0.2 * _SMALL_FONT, deprel))\n\n # End.\n file.write(u' </g>\\n')",
"def draw_arc(self, color, start_angle, end_angle,\n position, size=None, border_width=0, anchor='topleft'):\n if size is None:\n rect = spyral.Rect(position)\n else:\n rect = spyral.Rect(position, size)\n offset = self._calculate_offset(anchor, rect.size)\n pygame.draw.arc(self._surf, color, (rect.pos + offset, rect.size),\n start_angle, end_angle, border_width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self",
"def draw(self):\n arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)",
"def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)",
"def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)",
"def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)",
"def draw(self):\r\n #method provided https://content.byui.edu/file/856c5360-ff89-4409-a7ae-bca07f06f19c/1/week06/skeet.html\r\n arcade.draw_circle_outline(self.center.x, self.center.y, self.radius, TARGET_COLOR)\r\n text_x = self.center.x - (self.radius / 2)\r\n text_y = self.center.y - (self.radius / 2)\r\n arcade.draw_text(repr(self.life), text_x, text_y, TARGET_COLOR, font_size=20)",
"def horizontal_arcs_iglu():\n arc(screen, BLACK, (50, 560, 300, 20), 3.14, 0)\n arc(screen, BLACK, (60, 510, 280, 20), 3.14, 0)\n arc(screen, BLACK, (80, 460, 240, 20), 3.14, 0)\n arc(screen, BLACK, (120, 420, 160, 20), 3.14, 0)",
"def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))",
"def makeCircleOutline(self):\n #circle defined\n global circ_main\n circ_main = Circle(stroke_color=BLUE).scale(2).shift(LEFT*5)\n\n #dot at circle and dot at center\n global dot_circ\n dot_circ = always_redraw(\n lambda : Dot(circ_main.get_end())\n )\n global dot_center\n dot_center = Dot(LEFT*5)\n \n #line from origin to circle\n global line_circ\n line_circ = always_redraw(\n lambda : Line(start=dot_center.get_center(), end=dot_circ.get_center())\n )\n \n #write stuff\n self.play(Write(dot_circ), Write(line_circ), Write(dot_center))\n self.play(Write(circ_main), run_time=3, rate_func=double_smooth)"
] | [
"0.7010531",
"0.67628413",
"0.6670524",
"0.646824",
"0.644992",
"0.6396002",
"0.63777846",
"0.63353235",
"0.6235798",
"0.6210867",
"0.6182836",
"0.61770433",
"0.6169239",
"0.61444896",
"0.6139231",
"0.61139756",
"0.61103326",
"0.60893536",
"0.60711163",
"0.6058924",
"0.6048405",
"0.60454524",
"0.60454255",
"0.59931046",
"0.59486663",
"0.5920647",
"0.5887013",
"0.58827996",
"0.5882688",
"0.58748823"
] | 0.73307204 | 0 |
Draws arcs for eyebrows | def drawEyebrows(win, winW, winH):
drawArc(win, winW/2-winW/5, winH/2-winH/7.5+winH/10, winH/6, 30, 0.5) # left eyebrow
drawArc(win, winW/2+winW/5, winH/2-winH/7.5+winH/10, winH/6, 30, 0.5) # right eyebrow | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def horizontal_arcs_iglu():\n arc(screen, BLACK, (50, 560, 300, 20), 3.14, 0)\n arc(screen, BLACK, (60, 510, 280, 20), 3.14, 0)\n arc(screen, BLACK, (80, 460, 240, 20), 3.14, 0)\n arc(screen, BLACK, (120, 420, 160, 20), 3.14, 0)",
"def draw_edges():\n\n def bezier(p0, p1, p2, **kwargs):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n xb = [\n (1 - t) ** 2 * x0 + 2 * t * (1 - t) * x1 + t ** 2 * x2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n yb = [\n (1 - t) ** 2 * y0 + 2 * t * (1 - t) * y1 + t ** 2 * y2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n ax.plot(xb, yb, **kwargs)\n\n for edge in self._edges:\n\n u, v = edge\n\n x0, y0, a0 = (\n node_properties[\"node_x\"][u],\n node_properties[\"node_y\"][u],\n node_properties[\"theta\"][u],\n )\n x2, y2, a2 = (\n node_properties[\"node_x\"][v],\n node_properties[\"node_y\"][v],\n node_properties[\"theta\"][v],\n )\n\n angle = a0 + (a2 - a0) / 2\n\n # if angle > np.pi:\n # angle_corr = angle - np.pi\n # else:\n # angle_corr = angle\n\n distance = np.abs(a2 - a0)\n if distance > np.pi:\n distance = distance - np.pi\n distance = (1.0 - 1.0 * distance / np.pi) * R / 2.5\n x1 = distance * np.cos(angle)\n y1 = distance * np.sin(angle)\n x1 = 0\n y1 = 0\n\n ## dibuja los arcos\n bezier(\n [x0, y0], [x1, y1], [x2, y2], **self._edges[edge],\n )",
"def drawEllipticArc(x,y,a,b,alpha,beta,theta,ucoords=1):\n if ucoords:\n dislin.rlarc(x,y,a,b,alpha,beta,theta)\n else:\n dislin.arcell(x,y,a,b,alpha,beta,theta)",
"def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)",
"def _drawRays(self):\r\n for rayID, ray in self.rayDict.items():\r\n ray.drawPath()",
"def __draw_eras(self):\n xmin = self.ax.transLimits.transform((1-.5, 0))[0]\n xmax = self.ax.transLimits.transform((self.xmax+.5, 0))[0]\n for era in self.eras:\n for y in range(era.start.y, era.end.y+1):\n if y == era.start.y:\n axesUnits = self.ax.transLimits.transform(\n (era.start.x-.5, era.start.y))\n self.ax.axhspan(y-.5, y+.5, facecolor=era.color,\n alpha=era.alpha, xmin=axesUnits[0], xmax=xmax)\n elif y == era.end.y:\n axesUnits = self.ax.transLimits.transform(\n (era.end.x+.5, era.end.y))\n self.ax.axhspan(y-.5, y+.5, facecolor=era.color,\n alpha=era.alpha, xmin=xmin, xmax=axesUnits[0])\n else:\n self.ax.axhspan(y-.5, y+.5, facecolor=era.color,\n alpha=era.alpha, xmin=xmin, xmax=xmax)",
"def draw_aquarium(aq: Aquarium):\n penup()\n goto(-aq.width/2, aq.height/2)\n seth(0)\n fillcolor('darkblue')\n begin_fill()\n for _ in range(2):\n forward(aq.width)\n right(90)\n forward(aq.height)\n right(90)\n end_fill()\n draw_fish_list(aq.fishes)",
"def output(self):\n xpos, ypos = self.arcpoints[2]\n startxy = np.array([xpos, ypos]) # start point\n xpos, ypos = self.arcpoints[1]\n pointxy = np.array([xpos, ypos]) # a point on the curve\n xpos, ypos = self.arcpoints[0]\n endxy = np.array([xpos, ypos]) # end point\n\n a_norm = np.linalg.norm(endxy - pointxy)\n b_norm = np.linalg.norm(endxy - startxy)\n c_norm = np.linalg.norm(pointxy - startxy)\n \"\"\"\n s_factor = (a_norm + b_norm + c_norm) / 2\n radius = a_norm * b_norm * c_norm / 4\n / np.sqrt(s_factor * (s_factor - a_norm)\n * (s_factor - b_norm)\n * (s_factor - c_norm))\n \"\"\"\n b_factor1 = a_norm * a_norm * (b_norm * b_norm\n + c_norm * c_norm\n - a_norm * a_norm)\n b_factor2 = b_norm * b_norm * (a_norm * a_norm\n + c_norm * c_norm\n - b_norm * b_norm)\n b_factor3 = c_norm * c_norm * (a_norm * a_norm\n + b_norm * b_norm\n - c_norm * c_norm)\n centerxy = np.column_stack((startxy,\n pointxy,\n endxy)).dot(np.hstack((b_factor1,\n b_factor2,\n b_factor3)))\n centerxy /= b_factor1 + b_factor2 + b_factor3 # arc center\n\n self.def_field['XY_center'] = (centerxy)\n self.def_field['XY_arcpoints'].append(startxy) # start point\n self.def_field['XY_arcpoints'].append(endxy) # end point\n\n to_write = 'A '\n xpos, ypos = self.def_field['XY_center']\n\n to_write += str(int(xpos)) + ' ' + str(int(ypos)) + ' '\n to_write += str(self.def_field['radius']) + ' '\n to_write += str(self.def_field['angle1']) + ' '\n to_write += str(self.def_field['angle2']) + ' '\n to_write += str(self.def_field['unit']) + ' '\n to_write += str(self.def_field['convert']) + ' '\n to_write += str(self.def_field['width']) + ' '\n to_write += str(self.def_field['fill']) + ' '\n for xpos, ypos in self.def_field['XY_arcpoints']:\n to_write += str(self.offset[0] + xpos) + ' ' \\\n + str(self.offset[1] + ypos) + ' '\n to_write += '\\n'\n return to_write",
"def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax",
"def _arc(i, j, width=1, linestyle='-', color='black'):\n\treturn Arc(((i+j)/2., 0), abs(i-j), abs(i-j), 0, 0, 180, linewidth=width, \n\t\tedgecolor=color, fill=False, linestyle=linestyle)",
"def drawE8(\n pl=57, ## Parallel length\n gw=12.5, ## Gauge width\n tw=20, ## Grip width\n tl=50.0, ## Grip length\n rd=12.5, ## Radius\n):\n import numpy as np\n A = tw/2. - gw/2.\n th = -90.-np.arccos((rd-A)/rd)*180./np.pi\n x = rd * np.sin(th*np.pi/180.)\n ## Round...\n th0=-90\n th_delta=np.arccos((rd-A)/rd)*180/np.pi\n th1=th0+th_delta\n ths=np.linspace(th0*np.pi/180.,th1*np.pi/180.)\n xs = rd*np.cos(ths)\n ys = rd*np.sin(ths)\n ## translate xs,ys\n xs = xs + (x-xs[-1])\n ys = ys + (-A+tw-ys[-1])\n xyRound=[xs.tolist(),ys.tolist()]\n \n \n ## parallel\n x0,y0=xs[0],ys[0]\n xyParallel = [[x0-0.5*pl,x0],[y0,y0]]\n \n ## Right grip\n XS=[x+tl,x+tl,x][::-1]\n YS=[-A+0.5*tw,-A+tw,-A+tw][::-1]\n xyRG=[XS,YS]\n \n x=xyParallel[0]+xyRound[0]+xyRG[0]\n y=xyParallel[1]+xyRound[1]+xyRG[1]\n \n xyCoords=np.array([x,y])\n \n # print xyCoords.shape\n \n ## translate the coordinate so that the center of gravity is (0,0)\n xyCoords[0]=xyCoords[0]-xyCoords[0][0]\n xyCoords[1]=xyCoords[1]-xyCoords[1][-1]\n # plot(xyCoords[0],xyCoords[1],'-')\n \n ## Apply 2-fold symmetry.\n sym0 =[[ 1,0],[0, 1]] ## Identical\n sym1 =[[-1,0],[0, 1]] ## Reflect y axis\n sym2 =[[ 1,0],[0,-1]] ## Reflect x axis\n sym3 =[[-1,0],[0,-1]] ## Reflect origin\n \n sym = np.array([sym0,sym2,sym3,sym1])\n # plot(xyCoords[0,0],xyCoords[1,0],'x')\n\n xyTot=[[],[]]\n for i in range(len(sym)):\n symOp = sym[i][:,:]# (2,2)\n temp = np.tensordot(symOp,xyCoords,axes=[1,0])\n if i==1 or i==3:\n temp[0]=temp[0][::-1]\n temp[1]=temp[1][::-1]\n elif i==0 or i==2:\n temp=temp[::]\n\n for j in range(len(temp[0])):\n xyTot[0].append(temp[0][j])\n xyTot[1].append(temp[1][j])\n\n xyTot=np.array(xyTot)\n\n\n x0=min(xyTot[0])\n y0=min(xyTot[1])+tw/2.\n\n xyTot[0] = xyTot[0] - x0\n xyTot[1] = xyTot[1] - y0\n\n return xyTot",
"def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)",
"def DrawEllipticArc(*args, **kwargs):\n return _gdi_.DC_DrawEllipticArc(*args, **kwargs)",
"def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)",
"def rotate_arcs(self):\n\n if self.arc_direction:\n self.thick_arc_start_angle -= 5\n self.thick_arc_end_angle -= 5\n\n self.thin_arc_start_angle += 5\n self.thin_arc_end_angle += 5\n else:\n self.thick_arc_start_angle += 5\n self.thick_arc_end_angle += 5\n\n self.thin_arc_start_angle -= 5\n self.thin_arc_end_angle -= 5",
"def draw_edges(self):\n pass",
"def create_rink():\n\n # RINK\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, fill=WHITE, outline=\"\")\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, fill=WHITE, outline=WHITE)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n coords = OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n\n # CENTER CIRCLE\n coords = WIDTH/2-15*SCALE, HEIGHT/2-15*SCALE, WIDTH/2+15*SCALE, HEIGHT/2+15*SCALE\n canvas.create_oval(coords, outline=BLUE, width=2, fill=WHITE)\n\n # HALF CENTER CIRCLE\n coords = WIDTH/2-10*SCALE, HEIGHT-OFFSET-10*SCALE, WIDTH/2+10*SCALE, HEIGHT-OFFSET+10*SCALE\n canvas.create_arc(coords, outline=RED, width=2, start=0, extent=180)\n\n # GOAL AREA\n # - Left\n # - - Crease\n coords = OFFSET+5*SCALE, HEIGHT/2-6*SCALE, OFFSET+17*SCALE, HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=318, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=318, extent=84, style=ARC)\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.2*SCALE+1, HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET+15.2*SCALE+1, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = OFFSET, HEIGHT/2-14*SCALE, OFFSET+11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET, HEIGHT/2+14*SCALE, OFFSET+11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = OFFSET+8*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2+3*SCALE, OFFSET+8*SCALE, HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n # - Right\n # - - Crease\n coords = WIDTH-(OFFSET+5*SCALE), HEIGHT/2-6*SCALE, WIDTH-(OFFSET+17*SCALE), HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=138, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=138, extent=84, style=ARC)\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = WIDTH-OFFSET, HEIGHT/2-14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-OFFSET, HEIGHT/2+14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = WIDTH-(OFFSET+8*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+3*SCALE, WIDTH-(OFFSET+8*SCALE), HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n\n # LINES\n # - Left Baseline\n coords = OFFSET+11*SCALE, OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Right Baseline\n coords = WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Left Blueline\n coords = OFFSET+70*SCALE, OFFSET, OFFSET+70*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Right Blueline\n coords = WIDTH-(OFFSET+70*SCALE), OFFSET, WIDTH-(OFFSET+70*SCALE), HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Redline\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=7)\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=WHITE, width=5, dash=(9,9))\n\n # RINK OUTLINE\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = OFFSET, OFFSET+11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n\n\n # CENTER DOT\n coords = WIDTH/2-1*SCALE-1, HEIGHT/2-1*SCALE-1, WIDTH/2+1*SCALE+1, HEIGHT/2+1*SCALE+1\n canvas.create_oval(coords, outline=WHITE, fill=BLUE)\n\n # FACEOFF\n # - Top Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2-37*SCALE, OFFSET+46*SCALE, HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2-23*SCALE, OFFSET+32*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2+37*SCALE, OFFSET+46*SCALE, HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2+23*SCALE, OFFSET+32*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Top Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2-37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2-23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2+37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2+23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n\n # NEUTRAL ZONE FACEOFF\n # - Top Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Top Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n\n\n canvas.grid(row=1, columnspan=5)",
"def plotArc(self):\n\n # plot the spectra\n self.spcurve,=self.axes.plot(self.xarr,self.farr,linewidth=0.5,linestyle='-',marker='None',color='b')",
"def draw_axes(self, cr):\n # en gris\n cr.set_line_width(0.02)\n cr.set_source_rgb(0.3, 0.3, 0.3)\n cr.move_to( -1,0 )\n cr.line_to( 1,0 )\n cr.move_to( 0, -1 )\n cr.line_to( 0, 1 )\n cr.stroke()\n #self.draw_value( cr, \"0\", 0, 0 )\n #self.draw_value( cr, \"1\", 5-0.3, 0 )\n #self.draw_value( cr, \"2\", 2+0.3, 4-0.5 )",
"def DrawEllipticArc(*args, **kwargs):\n return _gdi_.PseudoDC_DrawEllipticArc(*args, **kwargs)",
"def draw_eyes(self):\n GREEN = (0, 255, 0)\n for eye in self.eyes:\n if eye:\n cv2.circle(self.eyes_frame, eye, 8, GREEN, 1)",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()",
"def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)",
"def test_ethene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]]\n ])\n )",
"def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )",
"def draw_car(self):\n a = self.h / 50\n ellipse(screen, BLACK, (self.x - 15 * a, self.y + 35 * a, 30 * a, 10 * a))\n rect(screen, LIGHT_BLUE, (self.x, self.y, self.dir * 260 * a, self.h))\n rect(screen, LIGHT_BLUE, (self.x + self.dir * 40 * a, self.y - 40 * a, self.dir * 130 * a, 40 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 50 * a, self.y - 30 * a, self.dir * 45 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 120 * a, self.y - 30 * a, self.dir * 48 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 248 * a, self.y + 2 * a, self.dir * 10 * a, 10 * a))\n circle(screen, BLACK, (self.x + self.dir * int(220 * a), self.y + int(50 * a)), int(25 * a))\n circle(screen, BLACK, (self.x + self.dir * int(50 * a), self.y + int(50 * a)), int(25 * a))",
"def create_rink():\n \n fig, ax = plt.subplots(figsize=(12, 9), dpi=600)\n # Нейтральная зона\n # Центральная линия\n line = plt.Line2D((0, 0), (-42.5, 42.5), lw=5, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((0, 0), (-42.5, 42.5), lw=2, color='white', linestyle='--')\n plt.gca().add_line(line)\n\n # синяя линия\n line = plt.Line2D((25, 25), (-42.5, 42.5), lw=5, color='blue', linestyle='-')\n plt.gca().add_line(line)\n\n # Центральный круг\n ax.add_patch(Arc((0, 0), 30, 30, theta1=-90, theta2=90, lw=2, edgecolor='blue'))\n ax.add_patch(Circle((0, 0), 1.5, lw=2.5, edgecolor='blue', facecolor='blue'))\n\n # точки\n ax.add_patch(Circle((20, 22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Circle((20, -22), 1, lw=5, edgecolor='red', facecolor='red'))\n\n # Верхний круг вбрасывания\n line = plt.Line2D((75, 71, 71), (23, 23, 26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (23, 23, 26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (21, 21, 18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((75, 71, 71), (21, 21, 18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((71, 71), (7, 5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (7, 5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (37, 39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((71, 71), (37, 39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n ax.add_patch(Circle((69, 22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Arc((69, 22), 30, 30, theta1=0, theta2=360, lw=2, edgecolor='red'))\n \n # Нижний круг вбрасывания\n line = plt.Line2D((75, 71, 71), (-23, -23, -26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (-23, -23, -26), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((63, 67, 67), (-21, -21, -18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((75, 71, 71), (-21, -21, -18), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n line = plt.Line2D((71, 71), (-7, -5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (-7, -5), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((67, 67), (-37, -39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((71, 71), (-37, -39), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n ax.add_patch(Circle((69, -22), 1, lw=5, edgecolor='red', facecolor='red'))\n ax.add_patch(Arc((69, -22), 30, 30, theta1=0, theta2=360, lw=2, edgecolor='red'))\n\n\n #Зона ворот\n line = plt.Line2D((89, 89), (-40.7, 40.7), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n ax.add_patch(Arc((89, 0), 16, 16, theta1=90, theta2=270, lw=2, edgecolor='red', facecolor='blue'))\n ax.add_patch(Rectangle((85.5,-4), 3.5, 8, lw=2 ,edgecolor='red', facecolor='blue', alpha=0.7))\n\n ax.add_patch(Arc((90, 1), 4, 4, theta1=-30, theta2=90, lw=2, edgecolor='red', facecolor='blue'))\n ax.add_patch(Arc((90, -1), 4, 4, theta1=270, theta2=30, lw=2, edgecolor='red', facecolor='blue'))\n line = plt.Line2D((89, 90), (3, 3), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n line = plt.Line2D((89, 90), (-3, -3), lw=2, color='red', linestyle='-')\n plt.gca().add_line(line)\n\n\n # Борта\n line = plt.Line2D((0, 80), (-42.6, -42.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n line = plt.Line2D((0, 80), (42.6, 42.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n line = plt.Line2D((100, 100), (-22.6, 22.6), lw=5, color='black')\n plt.gca().add_line(line)\n\n ax.add_patch(Arc((80, 22.6), 40, 40,\n theta1=0, theta2=90, edgecolor='black', lw=5))\n ax.add_patch(Arc((80, -22.6), 40, 40,\n theta1=270, theta2=360, edgecolor='black', lw=5))\n\n plt.xlim(0, 120)\n\n #plt.axis('auto')\n #plt.show()\n return ax, fig",
"def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)",
"def draw_raft_orientations(img_bgr, rafts_loc, rafts_ori, rafts_radii, num_of_rafts):\n\n line_thickness = int(2)\n line_color = (255, 0, 0)\n\n output_img = img_bgr\n for raft_id in np.arange(num_of_rafts):\n line_start = (rafts_loc[raft_id, 0], rafts_loc[raft_id, 1])\n line_end = (int(rafts_loc[raft_id, 0] + np.cos(rafts_ori[raft_id] * np.pi / 180) * rafts_radii[raft_id]),\n int(rafts_loc[raft_id, 1] - np.sin(rafts_ori[raft_id] * np.pi / 180) * rafts_radii[raft_id]))\n output_img = cv.line(output_img, line_start, line_end, line_color, line_thickness)\n\n return output_img",
"def draw_arc_outline(center_x, center_y, width, height, color, start_angle,\n end_angle, border_width=1, tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_STRIP)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()"
] | [
"0.6601741",
"0.62746024",
"0.5991124",
"0.59876937",
"0.5959159",
"0.591964",
"0.5901125",
"0.58788854",
"0.58297336",
"0.58249354",
"0.58104646",
"0.5792224",
"0.5779965",
"0.5762585",
"0.5755536",
"0.57407904",
"0.5722869",
"0.5716876",
"0.5698326",
"0.5696385",
"0.56717557",
"0.5650573",
"0.56302893",
"0.56242156",
"0.5621043",
"0.56115556",
"0.5588152",
"0.5585434",
"0.55753803",
"0.55712503"
] | 0.6681989 | 0 |
Draws red nose with reflection spot (polygon) | def drawNose(win, winW, winH):
noseRad = winW/12
nose = Circle(Point(winW/2, winH/2+winH/15), noseRad)
nose.setOutline("red4")
nose.setFill("red")
nose.draw(win)
spot = Polygon(Point(winW/2+noseRad*0.7, winH/2+noseRad*0.6),
Point(winW/2+noseRad*0.7, winH/2+noseRad*0.4),
Point(winW/2+noseRad*0.8, winH/2+noseRad*0.5),
Point(winW/2+noseRad*0.8, winH/2+noseRad*0.7))
spot.setOutline("white")
spot.setFill("white")
spot.draw(win) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drawPoles(wn):\n wn.setworldcoordinates(-1, -5, 3, 20)\n t = turtle.Turtle()\n t.speed(0)\n t.pensize(3)\n t.up()\n t.goto(-.5, 0)\n t.down()\n t.goto(2.5, 0)\n t.up()\n for i in range(3):\n t.goto(i, 0)\n t.down()\n t.goto(i, 10)\n t.up()\n t.hideturtle()",
"def draw():",
"def circlePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n \n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()",
"def draw_housing():\r\n green.pensize(3)\r\n green.color(\"black\", \"darkgrey\")\r\n green.begin_fill()\r\n green.forward(80)\r\n green.left(90)\r\n green.forward(200)\r\n green.circle(40, 180)\r\n green.forward(200)\r\n green.left(90)\r\n green.end_fill()",
"def render_shore_noise(self, points):\n point_list = [(x + 50, -y + 800) for x, y in points] # Up is -ve\n pygame.draw.line(self.surface, CYAN, (50, 800), (410, 800), 1) # x-axis\n pygame.draw.line(self.surface, CYAN, (50, 800), (50, 700), 1) # y-axis\n\n for x, y in point_list: # points\n self.surface.set_at((int(x), int(y)), RED)",
"def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis",
"def draw_eyes(self):\n GREEN = (0, 255, 0)\n for eye in self.eyes:\n if eye:\n cv2.circle(self.eyes_frame, eye, 8, GREEN, 1)",
"def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()",
"def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return",
"def draw_pose(self, image):\r\n\t\tif self.nose:\r\n\t\t\tcv.circle(image, (self.nose[0], self.nose[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.neck:\r\n\t\t\tcv.circle(image, (self.neck[0], self.neck[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_shoulder:\r\n\t\t\tcv.circle(image, (self.r_shoulder[0], self.r_shoulder[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_elbow:\r\n\t\t\tcv.circle(image, (self.r_elbow[0], self.r_elbow[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_wrist:\r\n\t\t\tcv.circle(image, (self.r_wrist[0], self.r_wrist[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_hip:\r\n\t\t\tcv.circle(image, (self.r_hip[0], self.r_hip[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_knee:\r\n\t\t\tcv.circle(image, (self.r_knee[0], self.r_knee[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_eye:\r\n\t\t\tcv.circle(image, (self.r_eye[0], self.r_eye[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.r_ear:\r\n\t\t\tcv.circle(image, (self.r_ear[0], self.r_ear[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_shoulder:\r\n\t\t\tcv.circle(image, (self.l_shoulder[0], self.l_shoulder[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_elbow:\r\n\t\t\tcv.circle(image, (self.l_elbow[0], self.l_elbow[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_wrist:\r\n\t\t\tcv.circle(image, (self.l_wrist[0], self.l_wrist[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_hip:\r\n\t\t\tcv.circle(image, (self.l_hip[0], self.l_hip[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_knee:\r\n\t\t\tcv.circle(image, (self.l_knee[0], self.l_knee[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_eye:\r\n\t\t\tcv.circle(image, (self.l_eye[0], self.l_eye[1]), 5, [0, 0, 255], -1, cv.LINE_AA)\r\n\t\tif self.l_ear:\r\n\t\t\tcv.circle(image, (self.l_ear[0], self.l_ear[1]), 5, [0, 0, 255], -1, cv.LINE_AA)",
"def drawEyes(win, winW, winH):\n# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))\n leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),\n Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))\n leftEye.setFill(\"white\")\n leftEye.setOutline(\"black\")\n leftEye.draw(win)\n leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)\n leftIris.setOutline(\"black\")\n leftIris.setFill(\"darkcyan\")\n leftIris.draw(win)\n leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)\n leftPupil.setOutline(\"black\")\n leftPupil.setFill(\"black\")\n leftPupil.draw(win)\n rightEye = leftEye.clone()\n rightEye.move(winW/2-winW/10,0)\n rightEye.draw(win)\n rightIris = leftIris.clone()\n rightIris.move(winW/2-winW/10,0)\n rightIris.draw(win)\n rightPupil = leftPupil.clone()\n rightPupil.move(winW/2-winW/10,0)\n rightPupil.draw(win)",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()",
"def draw_rubiks_points(disp_image, obj, color, radius=3, thickness=1):\n\tcv2.circle(disp_image, obj, radius, color=color, thickness=thickness)",
"def house ():\n\n poly (3,300,\"red\")\n penup()\n setposition(0,-300)\n pendown()\n poly (4,300,\"brown\")\n penup()\n setposition(100,-300)\n pendown()\n poly(4,100,\"green\") \n\n return None",
"def drawCoordinatePlane_region():\r\n turtle2 = t.Screen()\r\n turtle2.title(\"Life Expectancy versus Region\")\r\n t2.speed(0)\r\n t3.speed(0)\r\n setTurtle(t0)\r\n setTurtle(t1)\r\n setTurtle(t2)\r\n setTurtle(t3)\r\n drawAxes(t0)\r\n t1.left(90)\r\n drawAxes(t1)\r\n t0.pu()\r\n t0.fd(-80)\r\n t0.lt(90)\r\n drawlabels(t0, t1)\r\n drawPoints(t0, t1)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t1.lt(90)",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()",
"def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )",
"def draw(self):",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def draw(src, src_point):\n dst = src.copy()\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n for i in range(src_point.shape[0]):\n cv2.circle(dst, (int(src_point[i, 0]), int(src_point[i, 1])), 5, color=(b, g, r), thickness=1)\n return dst",
"def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()",
"def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)",
"def removeDots(image,points,coeff2,newColorArray):\r\n for i in range(1,coeff2):\r\n if (px[3,points*i] != (0,0,0)):\r\n im.putpixel((0,points*i-1),newColorArray[0][i-1])\r\n im.putpixel((1,points*i-1),newColorArray[0][i-1])\r\n im.putpixel((0,points*i),newColorArray[0][i])\r\n im.putpixel((1,points*i),newColorArray[0][i])\r\n im.putpixel((0,points*i+1),newColorArray[0][i])\r\n im.putpixel((1,points*i+1),newColorArray[0][i])\r\n if lineWidth==5:\r\n im.putpixel((0,points*i-2),newColorArray[0][i-1])\r\n im.putpixel((1,points*i-2),newColorArray[0][i-1])\r\n im.putpixel((0,points*i+2),newColorArray[0][i])\r\n im.putpixel((1,points*i+2),newColorArray[0][i])\r\n\r\n for i in range(1,coeff2):\r\n for j in range(1,coeff2):\r\n if (px[points*i-3,points*j] != (0,0,0) and px[points*i+3,points*j] != (0,0,0) and px[points*i,points*j-3] != (0,0,0) and px[points*i,points*j+3] != (0,0,0)):\r\n draw.line((points*i-5,points*j,points*i+5,points*j),fill=newColorArray[i][j],width=lineWidth)\r\n for i in range(1,coeff2):\r\n if px[points*i,500]!=(0,0,0):\r\n draw.line((points*i,500,points*i,511),fill=newColorArray[i][15],width=lineWidth)\r\n if px[500,points*i]!=(0,0,0):\r\n draw.line((500,points*i,511,points*i),fill=newColorArray[15][i],width=lineWidth)",
"def draw_ball():\n\n draw_circle(ball, 'yellow')",
"def draw(self, grille):\n grille.clear_highlight()\n for x in range(8):\n for y in range(8):\n self.draw_c(x, y, grille)",
"def paint_a_picture():\n # Make a training set (many random i,j coord and an x by y box around that coord to start with)\n # Throw it into the net\n # Test how it does for some random coordinate inputs\n pass",
"def drawContour(im,draw):\r\n img = im.filter(ImageFilter.BLUR)\r\n img = im.filter(ImageFilter.SMOOTH)\r\n img = cv.cvtColor(numpy.array(img), cv.COLOR_RGB2BGR)\r\n edges = cv.Canny(img,100,200)\r\n pos = numpy.nonzero(edges)\r\n pos2 = [(pos[0][i],pos[1][i]) for i in range(0,len(pos[0]))]\r\n pos3=[tuple(map(lambda x:int(round(x/32)),i)) for i in pos2]\r\n pos3 = [(i[1],i[0]) for i in pos3]\r\n for i in pos3:\r\n if pos3.count((i[0]+1,i[1]))>20 and i[0]<16 and i[1]<16:\r\n draw.line([(32*i[0],32*i[1]),(32*(i[0]+1),32*i[1])],fill=(0,0,0),width=5)\r\n if pos3.count((i[0],i[1]+1))>20 and i[0]<16 and i[1]<16:\r\n draw.line([(32*i[0],32*i[1]),(32*(i[0]),32*(i[1]+1))],fill=(0,0,0),width=5)",
"def draw(self):\r\n\r\n\r\n\t\tself.predict()\r\n\t\t#print np.shape(self.gray)\r\n\t\t#cv2.rectangle(self.gray, (self.bb[0], self.bb[1]), (self.bb[0] + self.bb[2], self.bb[1] + self.bb[3]))\r\n\r\n\t\t# draw points as green circles\r\n\t\tfor point in self.features:\r\n\t\t\tcv2.circle(self.gray,(int(point[0][0]),int(point[0][1])),3,(255),-1)\r\n\t\t\t\r\n\t\tcv2.imshow('image',self.gray)\r\n\t\tcv2.waitKey(1)",
"def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])"
] | [
"0.6396361",
"0.63340116",
"0.6125329",
"0.6106198",
"0.60511094",
"0.6015702",
"0.5982887",
"0.5980764",
"0.59279174",
"0.59251344",
"0.5907863",
"0.59069467",
"0.5906906",
"0.58955175",
"0.5889194",
"0.5886915",
"0.58502054",
"0.58475804",
"0.58288246",
"0.58231246",
"0.5812225",
"0.5809682",
"0.58050114",
"0.5803667",
"0.5801261",
"0.5781157",
"0.5779912",
"0.57655185",
"0.573416",
"0.57281244"
] | 0.6485777 | 0 |
Save the saveable objects to a checkpoint with `file_prefix`. | def save(self, file_prefix, options=None):
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensors = []
slice_specs = []
for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():
for slice_spec, tensor in tensor_slices.items():
if isinstance(tensor, saveable_object.SaveSpec):
tensor_value = tensor.tensor
# A tensor value of `None` indicates that this SaveableObject gets
# recorded in the object graph, but that no value is saved in the
# checkpoint.
if tensor_value is not None:
tensor_names.append(tensor.name)
tensors.append(tensor_value)
slice_specs.append(tensor.slice_spec)
else:
tensor_names.append(checkpoint_key)
tensors.append(tensor)
slice_specs.append(slice_spec)
save_device = options.experimental_io_device or (
len(tensors) and saveable_object_util.set_cpu0(tensors[0].device))
save_device = save_device or "cpu:0"
with ops.device(save_device):
return io_ops.save_v2(file_prefix, tensor_names, slice_specs, tensors) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n\n # IMPLEMENTATION DETAILS: most clients should skip.\n #\n # Suffix for any well-formed \"checkpoint_prefix\", when sharded.\n # Transformations:\n # * Users pass in \"save_path\" in save() and restore(). Say \"myckpt\".\n # * checkpoint_prefix gets fed <save_path><sharded_suffix>.\n #\n # Example:\n # During runtime, a temporary directory is first created, which contains\n # files\n #\n # <train dir>/myckpt_temp/\n # part-?????-of-?????{.index, .data-00000-of-00001}\n #\n # Before .save() finishes, they will be (hopefully, atomically) renamed to\n #\n # <train dir>/\n # myckpt{.index, .data-?????-of-?????}\n #\n # Filesystems with eventual consistency (such as S3), don't need a\n # temporary location. Using a temporary directory in those cases might\n # cause situations where files are not available during copy.\n #\n # Users only need to interact with the user-specified prefix, which is\n # \"<train dir>/myckpt\" in this case. Save() and Restore() work with the\n # prefix directly, instead of any physical pathname. (On failure and\n # subsequent restore, an outdated and orphaned temporary directory can be\n # safely removed.)\n with ops.device(\"CPU\"):\n sharded_suffix = array_ops.where(\n string_ops.regex_full_match(file_prefix, \"^s3://.*\"),\n constant_op.constant(\".part\"),\n constant_op.constant(\"_temp/part\"))\n tmp_checkpoint_prefix = string_ops.string_join(\n [file_prefix, sharded_suffix])\n registered_paths = {\n saver_name: registered_saver_filename(file_prefix, saver_name)\n for saver_name in self._registered_savers\n }\n\n def save_fn():\n saved_prefixes = []\n # Save with the registered savers. These run before default savers due to\n # the API contract.\n for saver_name, (save_fn, _) in self._registered_savers.items():\n maybe_saved_prefixes = save_fn(registered_paths[saver_name])\n if maybe_saved_prefixes is not None:\n flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)\n if not all(\n tensor_util.is_tf_type(x) and x.dtype == dtypes.string\n for x in flattened_saved_prefixes):\n raise ValueError(\n \"Registered saver must return a (maybe empty) list of \"\n f\"string type tensors. Got {maybe_saved_prefixes}.\")\n saved_prefixes.extend(flattened_saved_prefixes)\n\n # (Default saver) Save with single device savers.\n num_shards = len(self._single_device_savers)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n last_device = None\n for shard, (device, saver) in enumerate(\n sorted(self._single_device_savers.items())):\n last_device = device\n with ops.device(saveable_object_util.set_cpu0(device)):\n shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,\n num_shards_tensor)\n saved_prefixes.append(shard_prefix)\n with ops.device(device):\n # _SingleDeviceSaver will use the CPU device when necessary, but\n # initial read operations should be placed on the SaveableObject's\n # device.\n sharded_saves.append(saver.save(shard_prefix, options))\n\n with ops.control_dependencies(sharded_saves):\n # Merge on the io_device if specified, otherwise co-locates the merge op\n # with the last device used.\n merge_device = (\n options.experimental_io_device or\n saveable_object_util.set_cpu0(last_device))\n with ops.device(merge_device):\n # V2 format write path consists of a metadata merge step. Once\n # merged, attempts to delete the temporary directory,\n # \"<user-fed prefix>_temp\".\n return gen_io_ops.merge_v2_checkpoints(\n saved_prefixes, file_prefix, delete_old_dirs=True)\n\n # Since this will causes a function re-trace on each save, limit this to the\n # cases where it is needed: eager and when there are multiple tasks/single\n # device savers. Note that the retrace is needed to ensure we pickup the\n # latest values of options like experimental_io_device.\n if context.executing_eagerly() and len(self._single_device_savers) > 1:\n # Explicitly place the identity op on the first device.\n @def_function.function(jit_compile=False)\n def tf_function_save():\n save_fn()\n tf_function_save()\n else:\n return save_fn()",
"def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return",
"def save(self, prefix_file):\n self.save_encoder(prefix_file)\n sklearn.externals.joblib.dump(\n self.classifier,\n prefix_file + '_' + self.architecture + '_classifier.pkl'\n )",
"def save(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def save_states(self, checkpoint):\n raise NotImplementedError()",
"def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))",
"def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def checkpoint(self):\n save()",
"def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)",
"def save_ckpt(objects, epoch, score, ckpt_file):\n state_dicts = {name: obj.state_dict() for name, obj in objects.items() if obj is not None}\n ckpt = dict(state_dicts=state_dicts,\n epoch=epoch,\n score=score)\n may_make_dir(osp.dirname(ckpt_file))\n torch.save(ckpt, ckpt_file)\n msg = '=> Checkpoint Saved to {}'.format(ckpt_file)\n print(msg)",
"def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)",
"def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))",
"def save_objects(self):\n try:\n base_path = \"/tmp/objects.bkp\"\n # Open backup file for write\n file = open(base_path, \"w\")\n # Remove old content from file\n file.truncate()\n # Write a string with the execution objects\n file.write(json.dumps(self.SAVED_OBJECTS))\n file.close()\n except Exception as err:\n logging.error(f\"[ERROR] Couldn't save file. Traceback: {err}\")\n return False\n else:\n return True",
"def save_checkpoint(self, fname, save_optimizer=True):\n # -- Set the network to the full MultiHead_Module network to save everything in the class not only the current model -- #\n self.network = self.mh_network\n\n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().save_checkpoint(fname, save_optimizer)\n\n # -- Set the flag in already_trained_on -- #\n if not self.already_trained_on[str(self.fold)]['checkpoint_should_exist']:\n # -- Set the flag to True -- #\n self.already_trained_on[str(self.fold)]['checkpoint_should_exist'] = True\n # -- Add the current head keys for restoring (should be in correct order due to OrderedDict type of heads) -- #\n self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'] = list(self.mh_network.heads.keys())\n # -- Add the current active task for restoring -- #\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'] = self.mh_network.active_task\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model",
"def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)",
"def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint",
"def save(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n json.dump(self.model.to_json(), open(model_file, \"w\"))\n self.model.save_weights(weight_file)\n return self",
"def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")",
"def save(self, fname, io=None):\n ckpt_path = self.manager.save()\n logging.info(f'Saved to {ckpt_path}')\n\n print_summary(self.model)\n\n if io is not None:\n io._upload_dir_to_bucket(self.save_path, self.save_path, ['ckpt', 'checkpoint'])",
"def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)",
"def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)",
"def save_checkpoint(self, name=''):\n self.checkpoint_path.mkdir(exist_ok=True)\n if name:\n path = self.checkpoint_path / f'{name}_{self.epoch}.tar'\n else:\n path = self.checkpoint_path / f'{self.epoch}.tar'\n torch.save(self.get_state(), path)",
"def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n data = dict()\n data[\"inst\"] = \"save\"\n data[\"folder\"] = folder\n data[\"filename\"] = filename\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done",
"def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint",
"def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)",
"def SaveToFile(self,filePrefix):\n\n for item in ['statsDatabase', 'fileDatabase',\n 'postedContractDatabase', 'proposedContractDatabase',\n 'peerDatabase', 'recoveryDatabase', 'probeDatabase',\n 'sortedPeerList','peersRespondingToRecoverAll']:\n fileName = filePrefix + '.' + item + '.CRASH'\n fd = open(fileName,'wb')\n cPickle.dump(self.__dict__[item],fd)\n fd.close()\n\n for item in ['statsDatabase', 'fileDatabase',\n 'postedContractDatabase', 'proposedContractDatabase',\n 'peerDatabase', 'recoveryDatabase', 'probeDatabase',\n 'sortedPeerList','peersRespondingToRecoverAll']:\n fileName = filePrefix + '.' + item\n if (os.path.exists(fileName)):\n os.remove(fileName)\n os.rename(fileName + '.CRASH', fileName)\n\n dibs_logger.Logger.PrintAndLog('Saved database with prefix ' +\n filePrefix + '.',\n dibs_logger.LOG_DEBUG)",
"def save_checkpoint(self, name):\n timestamp = datetime.datetime.now(self.time_zone).strftime(\"%Y-%m-%d_%H:%M:%S\")\n backup_dir = os.path.join(os.path.dirname(self.path), \"backups\")\n checkpoint_path = os.path.join(backup_dir, \"%s_%s.pickle\" % (timestamp, name))\n pickle.dump(self, open(checkpoint_path, \"wb\"))"
] | [
"0.8050191",
"0.72655725",
"0.70114654",
"0.68767583",
"0.6402732",
"0.63886064",
"0.6338492",
"0.6300955",
"0.6300955",
"0.6300955",
"0.62696517",
"0.6260468",
"0.6259008",
"0.62584656",
"0.6258426",
"0.6252518",
"0.6250101",
"0.62088394",
"0.6168359",
"0.6159923",
"0.61346674",
"0.61235327",
"0.6095281",
"0.6093391",
"0.60916567",
"0.6077908",
"0.6073504",
"0.6071068",
"0.60649776",
"0.6057456"
] | 0.7847717 | 1 |
Restore the saveable objects from a checkpoint with `file_prefix`. | def restore(self, file_prefix, options=None):
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensor_dtypes = []
slice_specs = []
for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():
for slice_spec, tensor in tensor_slices.items():
tensor_dtypes.append(tensor.dtype)
if isinstance(tensor, saveable_object.SaveSpec):
slice_specs.append(tensor.slice_spec)
tensor_names.append(tensor.name)
else:
slice_specs.append(slice_spec)
tensor_names.append(checkpoint_key)
restore_device = options.experimental_io_device or "cpu:0"
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, slice_specs, tensor_dtypes)
restored_tensor_dict = {}
for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():
for slice_spec in tensor_slices:
restored_tensor = restored_tensors.pop(0)
restored_tensor_dict.setdefault(checkpoint_key, {})[slice_spec] = (
restored_tensor)
return restored_tensor_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restore(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n\n def restore_fn():\n restore_fn_inputs = {}\n restore_fn_input_count = {\n fn: len(keys) for fn, keys in self._restore_fn_to_keys.items()}\n\n restore_ops = {}\n # Sort by device name to avoid propagating non-deterministic dictionary\n # ordering in some Python versions.\n for device, saver in sorted(self._single_device_savers.items()):\n with ops.device(device):\n # Load values from checkpoint\n restored_tensor_dict = saver.restore(file_prefix, options)\n\n # Map restored tensors to the corresponding restore_fn, and see if all\n # inputs have all been loaded. Call `restore_fn` if that is the case.\n for checkpoint_key, slice_and_tensor in restored_tensor_dict.items():\n for slice_spec, tensor in slice_and_tensor.items():\n restore_fn = self._keys_to_restore_fn[(checkpoint_key,\n slice_spec)]\n\n # Processing the returned restored_tensor_dict to prepare for the\n # Trackable `restore` function. The `restore` function expects a\n # map of `string name (checkpoint_key) -> Tensor`. Unless there is\n # a slice_spec, in which case the map will be of\n # `string name (checkpoint_key)-> slice_spec -> Tensor`.\n if slice_spec:\n (restore_fn_inputs.setdefault(restore_fn, {}).setdefault(\n checkpoint_key, {})[slice_spec]) = tensor\n else:\n restore_fn_inputs.setdefault(restore_fn,\n {})[checkpoint_key] = tensor\n restore_fn_input_count[restore_fn] -= 1\n\n if restore_fn_input_count[restore_fn] == 0:\n restored_tensors = {}\n # Extracts the substring after the \"/.ATTRIBUTES/\" in the\n # ckpt_key from restore_fn_inputs[restore_fn] to\n # restored_tensors. For example, if restore_fn_input[restore_fn]\n # is dict { \"/.ATTIBUTES/a\": Tensor}, restored_tensors will be\n # changed to dict {\"a\": Tensor}\n for ckpt_key, tensor in restore_fn_inputs[restore_fn].items():\n restored_tensors[trackable_utils.extract_local_name(\n ckpt_key)] = tensor\n ret = restore_fn(restored_tensors)\n if isinstance(ret, dict):\n restore_ops.update(ret)\n # Run registered restore methods after the default restore ops.\n for _, (_, restore_fn) in self._registered_savers.items():\n restore_fn(file_prefix)\n return restore_ops\n\n has_custom_device_saver = any([\n context.is_custom_device(d) for d in self._single_device_savers.keys()\n ])\n # Since this will cause a function re-trace on each restore, limit this to\n # cases where it is needed: eager and when there are multiple tasks/single\n # device savers or any single device saver is a custom device. Note that the\n # retrace is needed to ensure we pickup the latest values of options like\n # experimental_io_device.\n #\n # We run in a function when there is a custom device saver because custom\n # devices, such as DTensor, usually do a sharded save and restore.\n # Doing a sharded save and restore requires knowledge about what shards\n # of variables we are restoring to. In practice, this means that custom\n # devices need the AssignVariableOps along with the Restore op within the\n # same graph to infer shapes and shard specs for Restore op.\n if context.executing_eagerly() and (len(self._single_device_savers) > 1 or\n has_custom_device_saver):\n @def_function.function(jit_compile=False, autograph=False)\n def tf_function_restore():\n restore_fn()\n return {}\n\n restore_ops = tf_function_restore()\n else:\n restore_ops = restore_fn()\n\n return restore_ops",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def save(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n\n # IMPLEMENTATION DETAILS: most clients should skip.\n #\n # Suffix for any well-formed \"checkpoint_prefix\", when sharded.\n # Transformations:\n # * Users pass in \"save_path\" in save() and restore(). Say \"myckpt\".\n # * checkpoint_prefix gets fed <save_path><sharded_suffix>.\n #\n # Example:\n # During runtime, a temporary directory is first created, which contains\n # files\n #\n # <train dir>/myckpt_temp/\n # part-?????-of-?????{.index, .data-00000-of-00001}\n #\n # Before .save() finishes, they will be (hopefully, atomically) renamed to\n #\n # <train dir>/\n # myckpt{.index, .data-?????-of-?????}\n #\n # Filesystems with eventual consistency (such as S3), don't need a\n # temporary location. Using a temporary directory in those cases might\n # cause situations where files are not available during copy.\n #\n # Users only need to interact with the user-specified prefix, which is\n # \"<train dir>/myckpt\" in this case. Save() and Restore() work with the\n # prefix directly, instead of any physical pathname. (On failure and\n # subsequent restore, an outdated and orphaned temporary directory can be\n # safely removed.)\n with ops.device(\"CPU\"):\n sharded_suffix = array_ops.where(\n string_ops.regex_full_match(file_prefix, \"^s3://.*\"),\n constant_op.constant(\".part\"),\n constant_op.constant(\"_temp/part\"))\n tmp_checkpoint_prefix = string_ops.string_join(\n [file_prefix, sharded_suffix])\n registered_paths = {\n saver_name: registered_saver_filename(file_prefix, saver_name)\n for saver_name in self._registered_savers\n }\n\n def save_fn():\n saved_prefixes = []\n # Save with the registered savers. These run before default savers due to\n # the API contract.\n for saver_name, (save_fn, _) in self._registered_savers.items():\n maybe_saved_prefixes = save_fn(registered_paths[saver_name])\n if maybe_saved_prefixes is not None:\n flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)\n if not all(\n tensor_util.is_tf_type(x) and x.dtype == dtypes.string\n for x in flattened_saved_prefixes):\n raise ValueError(\n \"Registered saver must return a (maybe empty) list of \"\n f\"string type tensors. Got {maybe_saved_prefixes}.\")\n saved_prefixes.extend(flattened_saved_prefixes)\n\n # (Default saver) Save with single device savers.\n num_shards = len(self._single_device_savers)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n last_device = None\n for shard, (device, saver) in enumerate(\n sorted(self._single_device_savers.items())):\n last_device = device\n with ops.device(saveable_object_util.set_cpu0(device)):\n shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,\n num_shards_tensor)\n saved_prefixes.append(shard_prefix)\n with ops.device(device):\n # _SingleDeviceSaver will use the CPU device when necessary, but\n # initial read operations should be placed on the SaveableObject's\n # device.\n sharded_saves.append(saver.save(shard_prefix, options))\n\n with ops.control_dependencies(sharded_saves):\n # Merge on the io_device if specified, otherwise co-locates the merge op\n # with the last device used.\n merge_device = (\n options.experimental_io_device or\n saveable_object_util.set_cpu0(last_device))\n with ops.device(merge_device):\n # V2 format write path consists of a metadata merge step. Once\n # merged, attempts to delete the temporary directory,\n # \"<user-fed prefix>_temp\".\n return gen_io_ops.merge_v2_checkpoints(\n saved_prefixes, file_prefix, delete_old_dirs=True)\n\n # Since this will causes a function re-trace on each save, limit this to the\n # cases where it is needed: eager and when there are multiple tasks/single\n # device savers. Note that the retrace is needed to ensure we pickup the\n # latest values of options like experimental_io_device.\n if context.executing_eagerly() and len(self._single_device_savers) > 1:\n # Explicitly place the identity op on the first device.\n @def_function.function(jit_compile=False)\n def tf_function_save():\n save_fn()\n tf_function_save()\n else:\n return save_fn()",
"def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))",
"def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)",
"def restore(self, checkpoint_frame=None):\n\n if checkpoint_frame:\n self.saver.restore(self.sess, self.path + '/tensorflow-model-%d' % checkpoint_frame)\n else:\n self.saver.restore(self.sess, self.saver.latest_checkpoint())",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def restore(self, sess, path=None, var_list=None):\n\n saver = tf.train.Saver(var_list)\n if path is None:\n path = tf.train.latest_checkpoint(os.path.dirname(self.config.CHECKPOINTS_PATH))\n saver.restore(sess, path)\n print(\"model restored from %s\" % path)",
"def restore_model(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n self.model = model_from_json(json.load(open(model_file)))\n self.model.load_weights(weight_file)\n return self\n model.load_weights(\"./output/model.h5\")",
"def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)",
"def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)",
"def restore_fn(flags):\n # if flags.tf_initial_checkpoint is None:\n # return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then ignore.\n # if tf.train.latest_checkpoint(flags.train_dir):\n # tf.logging.info(\n # 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n # % flags.train_dir)\n # return None\n\n exclusions = []\n if flags.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in flags.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n # Change model scope if necessary.\n if flags.checkpoint_model_scope is not None:\n variables_to_restore = \\\n {var.op.name.replace(flags.model_name,\n flags.checkpoint_model_scope): var\n for var in variables_to_restore}\n\n tf.compat.v1.logging.info('++++++++++++++++++++')\n tf.compat.v1.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' %\n (flags.pre_trained_checkpoint, flags.ignore_missing_vars))\n slim.assign_from_checkpoint_fn(flags.pre_trained_checkpoint,\n variables_to_restore,\n ignore_missing_vars=flags.ignore_missing_vars)",
"def maybe_restore_train_and_indicator_state(\n train_state,\n indicator_state,\n *,\n ckpt_manager,\n mesh,\n):\n latest_step = ckpt_manager.latest_step()\n if latest_step is None:\n return train_state, indicator_state\n logging.info('Restoring from step %d', latest_step)\n\n # Check if the directory is empty, Orbax could have failed to save\n # the checkpoint alltogether\n save_dir = checkpoint.utils.get_save_directory(\n latest_step,\n ckpt_manager.directory,\n )\n # If there's no files in the directory we should remove it and try\n # again with the checkpoint before that.\n if not any(save_dir.iterdir()):\n logging.info(\n 'Save directory %s is empty, removing and recursing restore',\n save_dir,\n )\n save_dir.rmdir()\n return maybe_restore_train_and_indicator_state(\n train_state,\n indicator_state,\n ckpt_manager=ckpt_manager,\n mesh=mesh,\n )\n\n def restore_arguments_with_mesh_axes(\n mesh_axes):\n if not mesh:\n mesh_axes = None\n\n def closure(_):\n return checkpoint.ArrayRestoreArgs(\n lazy=True,\n restore_type=jax.Array,\n mesh=mesh,\n mesh_axes=mesh_axes,\n )\n\n return closure\n\n # Evaluate the shape and filter empty nodes\n # We save the entire PyTree so there's no need to further filter\n train_state_shape = jax.eval_shape(lambda x: x, train_state)\n train_state_shape = tree_utils.filter_empty_nodes(train_state_shape,\n train_state_shape)\n train_state_pspec = create_train_state_partition_spec_from_shape(\n train_state_shape)\n train_state_restore_args = jax.tree_util.tree_map(\n restore_arguments_with_mesh_axes(train_state_pspec), train_state_shape)\n\n indicator_state_shape = jax.eval_shape(lambda x: x, indicator_state)\n indicator_state_shape = tree_utils.tree_map_with_regex(\n lambda _: None, indicator_state_shape, [(r'.*params/encoder/.*',)],\n lambda leaf: leaf)\n indicator_state_shape = tree_utils.filter_empty_nodes(indicator_state_shape,\n indicator_state_shape)\n indicator_state_pspec = create_indicator_state_partition_spec_from_shape(\n indicator_state_shape)\n indicator_state_restore_args = jax.tree_util.tree_map(\n restore_arguments_with_mesh_axes(indicator_state_pspec),\n indicator_state_shape)\n\n restored_state = ckpt_manager.restore(\n latest_step,\n items={\n 'train': train_state_shape,\n 'indicator': indicator_state_shape\n },\n restore_kwargs={\n 'train': {\n 'restore_args': train_state_restore_args\n },\n 'indicator': {\n 'restore_args': indicator_state_restore_args\n }\n })\n\n restored_state = checkpoint.apply_transformations(\n original_tree=restored_state,\n transformations=dict(),\n new_tree={\n 'train': train_state,\n 'indicator': indicator_state\n },\n default_to_original=False)\n restored_state = checkpoint.lazy_utils.maybe_get_tree(restored_state)\n logging.info('Restore finished')\n\n return operator.itemgetter('train', 'indicator')(restored_state)",
"def restore(self, sess: tf.Session) -> None:\n super().restore(sess)\n BaseModel._restore_checkpoint(self.pretrained_saver, sess, path=FLAGS.pretrained_checkpoint)",
"def restore_from_dir(self, checkpoint_dir: str):\r\n\r\n pattern = self.CKPT_FILE_TMPL.format(\"*\")\r\n full_paths = glob.glob(os.path.join(checkpoint_dir, pattern))\r\n if not full_paths:\r\n raise RuntimeError(\r\n \"Searcher unable to find checkpoint in {}\".format(\r\n checkpoint_dir)) # TODO\r\n most_recent_checkpoint = max(full_paths)\r\n self.restore(most_recent_checkpoint)",
"def restore(self):\n # For multi-worker training, it should not restore a model in certain\n # worker setting (e.g. non-chief worker in ParameterServerStrategy).\n # pylint: disable=protected-access\n if self._model._in_multi_worker_mode() and not multi_worker_util.should_load_checkpoint():\n return\n self.read_checkpoint_manager.restore_or_initialize()",
"def Restore(binary_file, format='default'):\n from dragon.config import logger\n assert os.path.exists(binary_file), \\\n 'Binary file({}) does not exist.'.format(binary_file)\n\n if format == 'default':\n try:\n state_dict = cPickle.load(open(binary_file, 'rb'))\n except UnicodeDecodeError:\n state_dict = cPickle.load(open(binary_file, 'rb'), encoding='iso-8859-1')\n logger.info('Restore From Model@: ' + binary_file)\n logger.info('Model Format: cPickle')\n for k, v in state_dict.items():\n if not HasTensor(k):\n logger.info('[Warning]: Tensor({}) does not exist in any Graphs, skip.'.format(k))\n else:\n FeedTensor(k, v)\n logger.info('[Info]: Tensor({}) is restored.'.format(k))\n\n elif format == 'caffe':\n # Caffe models can't save the tensor name\n # We simply use \"layer_name/param:X\"\n RestoreCC(binary_file, 1)\n\n else:\n raise TypeError('Unknown binary format: {}'.format(format))",
"def restore_checkpoint(self, checkpoint_id, name, path=''):\n\n\t\tself.log.debug(\"restoring Notebook %s from checkpoint %s\", name, checkpoint_id)\n\t\tnb_path = self._get_os_path(name, path)\n\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\n\t\tif not key_exists(self.bucket, cp_path):\n\t\t\tself.log.debug(\"checkpoint file does not exist: %s\", cp_path)\n\t\t\traise web.HTTPError(404,\n\t\t\t\tu'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)\n\t\t\t)\n\t\t# ensure notebook is readable (never restore from an unreadable notebook)\n\t\tkey = self.bucket.get_key(cp_path)\n\t\tnb = current.reads(key.get_contents_as_string(), u'json')\n\t\tself._copy(cp_path, nb_path)\n\t\tself.log.debug(\"copying %s -> %s\", cp_path, nb_path)",
"def _restore_variables(self, checkpoint):\n checkpoint_variables_map = list_variables(checkpoint)\n valid_variable = lambda name: name.startswith('model/encoder') or \\\n name.startswith('model/decoder')\n checkpoint_variable_names = [name for (name, _) in checkpoint_variables_map\n if valid_variable(name)]\n\n variables = get_variables_to_restore()\n variable_names = [v.name.split(':')[0] for v in variables]\n assignment_map = {}\n for var in checkpoint_variable_names:\n if var in variable_names:\n assignment_map[var] = var\n\n init_from_checkpoint(checkpoint, assignment_map)",
"def restore(self,\n sess,\n ckpt_file,\n ckpt_type):\n if ckpt_file is None:\n raise FileNotFoundError(\"checkpoint file doesn't exist\")\n \n if ckpt_type == \"debug\":\n self.ckpt_debug_saver.restore(sess, ckpt_file)\n elif ckpt_type == \"epoch\":\n self.ckpt_epoch_saver.restore(sess, ckpt_file)\n else:\n raise ValueError(\"unsupported checkpoint type {0}\".format(ckpt_type))",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def _dump_checkpoint(args):\n\n # Determine checkpoint to use\n if os.path.isfile(args.source):\n checkpoint_file = args.source\n else:\n checkpoint_file = Config.get_best_or_last_checkpoint(args.source)\n\n # Load the checkpoint and strip some fieleds\n checkpoint = torch.load(checkpoint_file, map_location=\"cpu\")\n\n # Dump it\n print(f\"# Dump of checkpoint: {checkpoint_file}\")\n excluded_keys = {\"model\", \"optimizer_state_dict\"}\n if args.keys is not None:\n excluded_keys = {key for key in excluded_keys if key not in args.keys}\n excluded_keys = excluded_keys.union(\n {key for key in checkpoint if key not in args.keys}\n )\n excluded_keys = {key for key in excluded_keys if key in checkpoint}\n for key in excluded_keys:\n del checkpoint[key]\n if excluded_keys:\n print(f\"# Excluded keys: {excluded_keys}\")\n yaml.dump(checkpoint, sys.stdout)",
"def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0",
"def Restore(binary_file, format='default'):\n assert os.path.exists(binary_file), \\\n 'Binary file({}) does not exist.'.format(binary_file)\n\n if format == 'default':\n try:\n state_dict = pickle.load(open(binary_file, 'rb'))\n except UnicodeDecodeError:\n state_dict = pickle.load(open(binary_file, 'rb'), encoding='iso-8859-1')\n logging.info('Restore From Model@: ' + binary_file)\n logging.info('Model Format: Pickle')\n for k, v in state_dict.items():\n if HasTensor(k):\n FeedTensor(k, v)\n logging.info('[Info]: Tensor({}) is restored.'.format(k))\n elif format == 'caffe':\n # Caffe models can't save the tensor name\n # We simply use \"layer_name/param:X\"\n _C.Restore(binary_file, 1)\n else:\n raise TypeError('Unknown binary format: {}'.format(format))"
] | [
"0.7489",
"0.7427531",
"0.71940327",
"0.7081849",
"0.7081849",
"0.7081849",
"0.69074863",
"0.6745683",
"0.6576598",
"0.64617085",
"0.6417714",
"0.63264495",
"0.6171721",
"0.61581063",
"0.61405045",
"0.6077467",
"0.6041426",
"0.60319626",
"0.60011774",
"0.59856737",
"0.5977744",
"0.5968534",
"0.5946902",
"0.5918395",
"0.5874637",
"0.5870645",
"0.5870645",
"0.5866355",
"0.5860199",
"0.58531475"
] | 0.77099335 | 0 |
Append sharding information to a filename. | def sharded_filename(filename_tensor, shard, num_shards):
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shard_filename(path, tag, shard_num, total_shards):\n return os.path.join(\n path, \"%s-%s-%.5d-of-%.5d\" % (_PREFIX, tag, shard_num, total_shards))",
"def shard_path(self, shard_id, training=True):\n sub_dir = 'train' if training else 'validation'\n fname = 'shard-{}.tfrecord'.format(shard_id)\n return os.path.join(self.args.output, sub_dir, fname)",
"def _write_shard(filename, dataset, indices):\n with tf.python_io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])",
"def _write_shard(filename, dataset, indices):\n with tf.io.TFRecordWriter(filename) as writer:\n for j in indices:\n writer.write(dataset[j])",
"def __appendToFile(self, st):\n fh = open(self.__fileName, \"a\")\n line = st.get_id_student() + \" \" + st.get_nume_student()\n fh.write(\"\\n\")\n fh.write(line)\n fh.close()",
"def write_hashes(filename):\n\n [head, tail] = os.path.split(filename)\n new_filename = f\"{head}/hashed-{tail}\"\n\n shutil.copy(filename, new_filename)\n\n with open(filename) as in_file:\n data = json.loads(in_file.read())\n\n click.echo(f\"Event ID: {data['event_id']}\")\n click.echo(\"Writing span hashes\")\n\n config = load_span_grouping_config({\"id\": DEFAULT_CONFIG_ID})\n results = config.execute_strategy(data)\n\n with open(new_filename, \"w\") as out_file:\n results.write_to_event(data)\n out_file.write(json.dumps(data, indent=4))\n\n click.echo(\"Done\")\n click.echo(\"\\n\")",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def _write_dataset(name, dataset, num_shards, output_dir):\n borders = np.int32(np.linspace(0, len(dataset), num_shards + 1))\n indices = list(range(len(dataset)))\n\n for i in range(num_shards):\n filename = os.path.join(\n output_dir, '%s-%.5d-of-%.5d' % (name, i, num_shards))\n shard_indices = indices[borders[i]:borders[i + 1]]\n _write_shard(filename, dataset, shard_indices)\n logging.info('Wrote dataset indices [%d, %d) to output shard %s',\n borders[i], borders[i + 1], filename)",
"def put(name, snippet, filename):\n\tlogging.info(\"writing {}:{} to {}\".format(name, snippet, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"a\") as f:\n\t\twriter = csv.writer(f)\n\t\tlogging.debug(\"Writing snippet to file\")\n\t\twriter.writerow([name,snippet])\n\tlogging.debug(\"Write successful\")\n\treturn name, snippet",
"def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]",
"def __appendToFile(self, sub):\n fh = open(self.__fileName, \"a\")\n line = sub.get_id_disciplina() + \" \" + sub.get_nume_disciplina() + \" \" + sub.get_profesor()\n fh.write(\"\\n\")\n fh.write(line)\n fh.close()",
"def save_to_file(content, song_name):\n file = open(\"./assets/homemade_partitions.txt\", \"a+\")\n # Move to the start of the file\n file.seek(0)\n # Read the total lines\n total_lines = len(file.readlines())\n # Move to the end of the file\n file.seek(0, 2)\n # Write the song's name\n file.write(f\"#{int(total_lines / 2 + 1)} {song_name}\\n\")\n # Write the song's partition\n file.write(content + \"\\n\")\n file.close()",
"def _record_all_snps(self, filename, chromosome, position, ref, alt):\n\n if filename in self.snp_positions.keys():\n if chromosome in self.snp_positions[filename].keys():\n self.snp_positions[filename][chromosome].update(\n {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n )\n else:\n self.snp_positions[filename].update(\n {\n chromosome: {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n }\n )\n else:\n self.snp_positions.update(\n {\n filename: {\n chromosome: {\n str(position): {\n \"ref\": ref,\n \"alt\": str(alt)\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\" \", \"\"),\n }\n }\n }\n }\n )",
"def append_random_number_to_filename(self, local_img_file):\n date = datetime.datetime.now()\n date_string = date.strftime(\"%m-%d-%Y\")\n return \"%s-glitched.%s\" % (local_img_file.split(\".\")[0], local_img_file.split(\".\")[1])",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def putfilenameontop(idf, lines):\n openfile = '<%s>%s</%s>' % ('h4', idf.idfname, 'h4')\n lines = [openfile, '<hr>'] + lines\n return lines",
"def store(self, filename):",
"def __newFileName(self):\n now = datetime.now()\n dateTimeAppend = now.strftime('%y%m%d_%H%M%S')\n self.__fileName = '{}/{}_{}.wav'.format(RECORDING,\n FILE_NAME_PREFIX, \n dateTimeAppend)",
"def format(self, f):\n INFO = self.logger.info\n\n f.write(\"# Greenplum Database identifier for this master/segment.\\n\")\n f.write(\"# Do not change the contents of this file.\\n\")\n f.write('dbid = %d\\n' % self.dbid)\n INFO('wrote dbid: %d' % self.dbid)\n\n if self.standby_dbid:\n f.write('standby_dbid = %d\\n' % self.standby_dbid)\n INFO('wrote standby_dbid: %d' % self.standby_dbid)",
"def create_samfile(self):",
"def write_scat(fname, data):\n from esutil.recfile import Recfile\n if os.path.exists(fname):\n os.remove(fname)\n with Recfile(fname,'w',delim=' ') as robj:\n robj.write(data)",
"def _generate_a_seg_file(self, filename, first_col_name):\n f_desc = open(filename, 'w')\n f_desc.write(self.get_seg_header())\n line = self._segments[0].get_line(self.get_name())[:]\n line[0] = first_col_name\n line[2] = 0\n line[3] = self._frames - 1\n #line[-1] = self._speaker\n f_desc.write(\"%s %s %s %s %s %s %s %s\\n\" % tuple(line))\n f_desc.close()",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write(self, filename, data, hdr):\n pass",
"def shard(xrec=None):\n\n xrec = conf.get_current()\n\n print \"FIX_DAT: shard()\", xrec\n zip_ver = xrec['zip_dir']\n\n xindex = []\n\n regEx = re.compile(\"[A-Z]{5}\")\n\n inputfile = conf.work_dir(\"/xplane_unzipped/%s/earth_fix.dat\" % (xrec['zip_dir']))\n c = 0\n print inputfile\n\n\n with open(inputfile) as readnav:\n\n for line in readnav:\n c += 1\n\n # Skip first three lines, hope Robin Peel will never change this behaviour ;-)\n if c < 4:\n pass\n else:\n\n if not line.startswith(\"99\"):\n\n lst = line.strip().split()\n fix_ident = str(lst[2])\n\n fixblob = None\n\n if fix_ident == \"NPOLE\":\n pass\n\n else:\n\n ## Write shard\n blob_path = conf.raw_fix_path( xrec, fix_ident)\n #print file_path, xrec\n\n f = open(blob_path + \".txt\", \"w\")\n f.write(line)\n f.close()\n\n ## make dic\n maj = True if regEx.match(fix_ident) else False\n data = dict(\n ident=fix_ident, major=maj, src=line,\n lat=str(lst[0]), lon=str(lst[1])\n )\n json_path = blob_path + \".json\"\n conf.write_json(json_path, data)\n\n\n\n xindex.append(fix_ident)\n\n\n if c % 5000 == 0:\n print \" > fix: %s - %s of %s\" % (fix_ident, c, MAX_LINES_GUESS)\n #sys.exit(0)\n #db.Con.commit()\n\n ## commit any outstanding after rows at end of loop\n #db.Con.commit()",
"def append(self, filename):\n\n self.db.single_insert_camera(filename)\n self.db.batch_insert_camera(filename)",
"def write_eneheader(self,filename,replica):\n \n fheader = open(filename,'w')\n fheader.write('E_pot\\tE_rest(D)\\tD\\tcontact_state\\ttemp\\n')\n fheader.write('# Energy units: Joules/mol\\n')\n fheader.write('# Restrained contact state: ' + repr(replica.mc.restraint.contacts) + '\\n')\n fheader.write('# kspring: '+str(replica.mc.restraint.kspring) + '\\n')\n\tfheader.close()",
"def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)",
"def writefile(name, instream, start=None, end=None, append=False):"
] | [
"0.6643691",
"0.5697363",
"0.56757563",
"0.5666619",
"0.54714036",
"0.5333628",
"0.53004414",
"0.5286781",
"0.5286214",
"0.52476746",
"0.5242944",
"0.5241004",
"0.5194285",
"0.5178036",
"0.5169993",
"0.5164789",
"0.51546425",
"0.51322526",
"0.51283985",
"0.5119813",
"0.5096584",
"0.5063505",
"0.5055201",
"0.5055201",
"0.5041818",
"0.5029561",
"0.50265366",
"0.50128025",
"0.5005558",
"0.500121"
] | 0.57318276 | 1 |
Save the saveable objects to a checkpoint with `file_prefix`. | def save(self, file_prefix, options=None):
options = options or checkpoint_options.CheckpointOptions()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
sharded_suffix = array_ops.where(
string_ops.regex_full_match(file_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant("_temp/part"))
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
registered_paths = {
saver_name: registered_saver_filename(file_prefix, saver_name)
for saver_name in self._registered_savers
}
def save_fn():
saved_prefixes = []
# Save with the registered savers. These run before default savers due to
# the API contract.
for saver_name, (save_fn, _) in self._registered_savers.items():
maybe_saved_prefixes = save_fn(registered_paths[saver_name])
if maybe_saved_prefixes is not None:
flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)
if not all(
tensor_util.is_tf_type(x) and x.dtype == dtypes.string
for x in flattened_saved_prefixes):
raise ValueError(
"Registered saver must return a (maybe empty) list of "
f"string type tensors. Got {maybe_saved_prefixes}.")
saved_prefixes.extend(flattened_saved_prefixes)
# (Default saver) Save with single device savers.
num_shards = len(self._single_device_savers)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
saved_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but
# initial read operations should be placed on the SaveableObject's
# device.
sharded_saves.append(saver.save(shard_prefix, options))
with ops.control_dependencies(sharded_saves):
# Merge on the io_device if specified, otherwise co-locates the merge op
# with the last device used.
merge_device = (
options.experimental_io_device or
saveable_object_util.set_cpu0(last_device))
with ops.device(merge_device):
# V2 format write path consists of a metadata merge step. Once
# merged, attempts to delete the temporary directory,
# "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
saved_prefixes, file_prefix, delete_old_dirs=True)
# Since this will causes a function re-trace on each save, limit this to the
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
# Explicitly place the identity op on the first device.
@def_function.function(jit_compile=False)
def tf_function_save():
save_fn()
tf_function_save()
else:
return save_fn() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n tensor_names = []\n tensors = []\n slice_specs = []\n for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():\n for slice_spec, tensor in tensor_slices.items():\n if isinstance(tensor, saveable_object.SaveSpec):\n tensor_value = tensor.tensor\n # A tensor value of `None` indicates that this SaveableObject gets\n # recorded in the object graph, but that no value is saved in the\n # checkpoint.\n if tensor_value is not None:\n tensor_names.append(tensor.name)\n tensors.append(tensor_value)\n slice_specs.append(tensor.slice_spec)\n else:\n tensor_names.append(checkpoint_key)\n tensors.append(tensor)\n slice_specs.append(slice_spec)\n save_device = options.experimental_io_device or (\n len(tensors) and saveable_object_util.set_cpu0(tensors[0].device))\n save_device = save_device or \"cpu:0\"\n with ops.device(save_device):\n return io_ops.save_v2(file_prefix, tensor_names, slice_specs, tensors)",
"def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return",
"def save(self, prefix_file):\n self.save_encoder(prefix_file)\n sklearn.externals.joblib.dump(\n self.classifier,\n prefix_file + '_' + self.architecture + '_classifier.pkl'\n )",
"def save(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def save_states(self, checkpoint):\n raise NotImplementedError()",
"def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))",
"def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def save(self, model_dir, model_prefix):\n self.saver.save(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix))",
"def checkpoint(self):\n save()",
"def save_ckpt(objects, epoch, score, ckpt_file):\n state_dicts = {name: obj.state_dict() for name, obj in objects.items() if obj is not None}\n ckpt = dict(state_dicts=state_dicts,\n epoch=epoch,\n score=score)\n may_make_dir(osp.dirname(ckpt_file))\n torch.save(ckpt, ckpt_file)\n msg = '=> Checkpoint Saved to {}'.format(ckpt_file)\n print(msg)",
"def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)",
"def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)",
"def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))",
"def save_objects(self):\n try:\n base_path = \"/tmp/objects.bkp\"\n # Open backup file for write\n file = open(base_path, \"w\")\n # Remove old content from file\n file.truncate()\n # Write a string with the execution objects\n file.write(json.dumps(self.SAVED_OBJECTS))\n file.close()\n except Exception as err:\n logging.error(f\"[ERROR] Couldn't save file. Traceback: {err}\")\n return False\n else:\n return True",
"def save_checkpoint(self, fname, save_optimizer=True):\n # -- Set the network to the full MultiHead_Module network to save everything in the class not only the current model -- #\n self.network = self.mh_network\n\n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().save_checkpoint(fname, save_optimizer)\n\n # -- Set the flag in already_trained_on -- #\n if not self.already_trained_on[str(self.fold)]['checkpoint_should_exist']:\n # -- Set the flag to True -- #\n self.already_trained_on[str(self.fold)]['checkpoint_should_exist'] = True\n # -- Add the current head keys for restoring (should be in correct order due to OrderedDict type of heads) -- #\n self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'] = list(self.mh_network.heads.keys())\n # -- Add the current active task for restoring -- #\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'] = self.mh_network.active_task\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model",
"def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)",
"def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint",
"def save(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n json.dump(self.model.to_json(), open(model_file, \"w\"))\n self.model.save_weights(weight_file)\n return self",
"def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")",
"def save(self, fname, io=None):\n ckpt_path = self.manager.save()\n logging.info(f'Saved to {ckpt_path}')\n\n print_summary(self.model)\n\n if io is not None:\n io._upload_dir_to_bucket(self.save_path, self.save_path, ['ckpt', 'checkpoint'])",
"def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)",
"def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)",
"def save_checkpoint(self, name=''):\n self.checkpoint_path.mkdir(exist_ok=True)\n if name:\n path = self.checkpoint_path / f'{name}_{self.epoch}.tar'\n else:\n path = self.checkpoint_path / f'{self.epoch}.tar'\n torch.save(self.get_state(), path)",
"def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n data = dict()\n data[\"inst\"] = \"save\"\n data[\"folder\"] = folder\n data[\"filename\"] = filename\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done",
"def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint",
"def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)",
"def SaveToFile(self,filePrefix):\n\n for item in ['statsDatabase', 'fileDatabase',\n 'postedContractDatabase', 'proposedContractDatabase',\n 'peerDatabase', 'recoveryDatabase', 'probeDatabase',\n 'sortedPeerList','peersRespondingToRecoverAll']:\n fileName = filePrefix + '.' + item + '.CRASH'\n fd = open(fileName,'wb')\n cPickle.dump(self.__dict__[item],fd)\n fd.close()\n\n for item in ['statsDatabase', 'fileDatabase',\n 'postedContractDatabase', 'proposedContractDatabase',\n 'peerDatabase', 'recoveryDatabase', 'probeDatabase',\n 'sortedPeerList','peersRespondingToRecoverAll']:\n fileName = filePrefix + '.' + item\n if (os.path.exists(fileName)):\n os.remove(fileName)\n os.rename(fileName + '.CRASH', fileName)\n\n dibs_logger.Logger.PrintAndLog('Saved database with prefix ' +\n filePrefix + '.',\n dibs_logger.LOG_DEBUG)",
"def save_checkpoint(self, name):\n timestamp = datetime.datetime.now(self.time_zone).strftime(\"%Y-%m-%d_%H:%M:%S\")\n backup_dir = os.path.join(os.path.dirname(self.path), \"backups\")\n checkpoint_path = os.path.join(backup_dir, \"%s_%s.pickle\" % (timestamp, name))\n pickle.dump(self, open(checkpoint_path, \"wb\"))"
] | [
"0.78489673",
"0.72668874",
"0.70105153",
"0.6881262",
"0.6406851",
"0.6393002",
"0.6343004",
"0.6299223",
"0.6299223",
"0.6299223",
"0.6275086",
"0.62643564",
"0.6264212",
"0.6264104",
"0.62639993",
"0.6254322",
"0.6253772",
"0.62144583",
"0.61729413",
"0.61586666",
"0.61399275",
"0.61275464",
"0.6100151",
"0.6099091",
"0.60959506",
"0.60833955",
"0.60795546",
"0.6075538",
"0.6064517",
"0.6061441"
] | 0.80512387 | 0 |
Restore the saveable objects from a checkpoint with `file_prefix`. | def restore(self, file_prefix, options=None):
options = options or checkpoint_options.CheckpointOptions()
def restore_fn():
restore_fn_inputs = {}
restore_fn_input_count = {
fn: len(keys) for fn, keys in self._restore_fn_to_keys.items()}
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
# Load values from checkpoint
restored_tensor_dict = saver.restore(file_prefix, options)
# Map restored tensors to the corresponding restore_fn, and see if all
# inputs have all been loaded. Call `restore_fn` if that is the case.
for checkpoint_key, slice_and_tensor in restored_tensor_dict.items():
for slice_spec, tensor in slice_and_tensor.items():
restore_fn = self._keys_to_restore_fn[(checkpoint_key,
slice_spec)]
# Processing the returned restored_tensor_dict to prepare for the
# Trackable `restore` function. The `restore` function expects a
# map of `string name (checkpoint_key) -> Tensor`. Unless there is
# a slice_spec, in which case the map will be of
# `string name (checkpoint_key)-> slice_spec -> Tensor`.
if slice_spec:
(restore_fn_inputs.setdefault(restore_fn, {}).setdefault(
checkpoint_key, {})[slice_spec]) = tensor
else:
restore_fn_inputs.setdefault(restore_fn,
{})[checkpoint_key] = tensor
restore_fn_input_count[restore_fn] -= 1
if restore_fn_input_count[restore_fn] == 0:
restored_tensors = {}
# Extracts the substring after the "/.ATTRIBUTES/" in the
# ckpt_key from restore_fn_inputs[restore_fn] to
# restored_tensors. For example, if restore_fn_input[restore_fn]
# is dict { "/.ATTIBUTES/a": Tensor}, restored_tensors will be
# changed to dict {"a": Tensor}
for ckpt_key, tensor in restore_fn_inputs[restore_fn].items():
restored_tensors[trackable_utils.extract_local_name(
ckpt_key)] = tensor
ret = restore_fn(restored_tensors)
if isinstance(ret, dict):
restore_ops.update(ret)
# Run registered restore methods after the default restore ops.
for _, (_, restore_fn) in self._registered_savers.items():
restore_fn(file_prefix)
return restore_ops
has_custom_device_saver = any([
context.is_custom_device(d) for d in self._single_device_savers.keys()
])
# Since this will cause a function re-trace on each restore, limit this to
# cases where it is needed: eager and when there are multiple tasks/single
# device savers or any single device saver is a custom device. Note that the
# retrace is needed to ensure we pickup the latest values of options like
# experimental_io_device.
#
# We run in a function when there is a custom device saver because custom
# devices, such as DTensor, usually do a sharded save and restore.
# Doing a sharded save and restore requires knowledge about what shards
# of variables we are restoring to. In practice, this means that custom
# devices need the AssignVariableOps along with the Restore op within the
# same graph to infer shapes and shard specs for Restore op.
if context.executing_eagerly() and (len(self._single_device_savers) > 1 or
has_custom_device_saver):
@def_function.function(jit_compile=False, autograph=False)
def tf_function_restore():
restore_fn()
return {}
restore_ops = tf_function_restore()
else:
restore_ops = restore_fn()
return restore_ops | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restore(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n tensor_names = []\n tensor_dtypes = []\n slice_specs = []\n\n for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():\n for slice_spec, tensor in tensor_slices.items():\n tensor_dtypes.append(tensor.dtype)\n if isinstance(tensor, saveable_object.SaveSpec):\n slice_specs.append(tensor.slice_spec)\n tensor_names.append(tensor.name)\n else:\n slice_specs.append(slice_spec)\n tensor_names.append(checkpoint_key)\n\n restore_device = options.experimental_io_device or \"cpu:0\"\n with ops.device(restore_device):\n restored_tensors = io_ops.restore_v2(\n file_prefix, tensor_names, slice_specs, tensor_dtypes)\n\n restored_tensor_dict = {}\n for checkpoint_key, tensor_slices in self._tensor_slice_dict.items():\n for slice_spec in tensor_slices:\n restored_tensor = restored_tensors.pop(0)\n restored_tensor_dict.setdefault(checkpoint_key, {})[slice_spec] = (\n restored_tensor)\n return restored_tensor_dict",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def restore(self, model_dir, model_prefix):\n self.saver.restore(self.sess, os.path.join(model_dir, model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))",
"def save(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n\n # IMPLEMENTATION DETAILS: most clients should skip.\n #\n # Suffix for any well-formed \"checkpoint_prefix\", when sharded.\n # Transformations:\n # * Users pass in \"save_path\" in save() and restore(). Say \"myckpt\".\n # * checkpoint_prefix gets fed <save_path><sharded_suffix>.\n #\n # Example:\n # During runtime, a temporary directory is first created, which contains\n # files\n #\n # <train dir>/myckpt_temp/\n # part-?????-of-?????{.index, .data-00000-of-00001}\n #\n # Before .save() finishes, they will be (hopefully, atomically) renamed to\n #\n # <train dir>/\n # myckpt{.index, .data-?????-of-?????}\n #\n # Filesystems with eventual consistency (such as S3), don't need a\n # temporary location. Using a temporary directory in those cases might\n # cause situations where files are not available during copy.\n #\n # Users only need to interact with the user-specified prefix, which is\n # \"<train dir>/myckpt\" in this case. Save() and Restore() work with the\n # prefix directly, instead of any physical pathname. (On failure and\n # subsequent restore, an outdated and orphaned temporary directory can be\n # safely removed.)\n with ops.device(\"CPU\"):\n sharded_suffix = array_ops.where(\n string_ops.regex_full_match(file_prefix, \"^s3://.*\"),\n constant_op.constant(\".part\"),\n constant_op.constant(\"_temp/part\"))\n tmp_checkpoint_prefix = string_ops.string_join(\n [file_prefix, sharded_suffix])\n registered_paths = {\n saver_name: registered_saver_filename(file_prefix, saver_name)\n for saver_name in self._registered_savers\n }\n\n def save_fn():\n saved_prefixes = []\n # Save with the registered savers. These run before default savers due to\n # the API contract.\n for saver_name, (save_fn, _) in self._registered_savers.items():\n maybe_saved_prefixes = save_fn(registered_paths[saver_name])\n if maybe_saved_prefixes is not None:\n flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)\n if not all(\n tensor_util.is_tf_type(x) and x.dtype == dtypes.string\n for x in flattened_saved_prefixes):\n raise ValueError(\n \"Registered saver must return a (maybe empty) list of \"\n f\"string type tensors. Got {maybe_saved_prefixes}.\")\n saved_prefixes.extend(flattened_saved_prefixes)\n\n # (Default saver) Save with single device savers.\n num_shards = len(self._single_device_savers)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n last_device = None\n for shard, (device, saver) in enumerate(\n sorted(self._single_device_savers.items())):\n last_device = device\n with ops.device(saveable_object_util.set_cpu0(device)):\n shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,\n num_shards_tensor)\n saved_prefixes.append(shard_prefix)\n with ops.device(device):\n # _SingleDeviceSaver will use the CPU device when necessary, but\n # initial read operations should be placed on the SaveableObject's\n # device.\n sharded_saves.append(saver.save(shard_prefix, options))\n\n with ops.control_dependencies(sharded_saves):\n # Merge on the io_device if specified, otherwise co-locates the merge op\n # with the last device used.\n merge_device = (\n options.experimental_io_device or\n saveable_object_util.set_cpu0(last_device))\n with ops.device(merge_device):\n # V2 format write path consists of a metadata merge step. Once\n # merged, attempts to delete the temporary directory,\n # \"<user-fed prefix>_temp\".\n return gen_io_ops.merge_v2_checkpoints(\n saved_prefixes, file_prefix, delete_old_dirs=True)\n\n # Since this will causes a function re-trace on each save, limit this to the\n # cases where it is needed: eager and when there are multiple tasks/single\n # device savers. Note that the retrace is needed to ensure we pickup the\n # latest values of options like experimental_io_device.\n if context.executing_eagerly() and len(self._single_device_savers) > 1:\n # Explicitly place the identity op on the first device.\n @def_function.function(jit_compile=False)\n def tf_function_save():\n save_fn()\n tf_function_save()\n else:\n return save_fn()",
"def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))",
"def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)",
"def restore(self, checkpoint_frame=None):\n\n if checkpoint_frame:\n self.saver.restore(self.sess, self.path + '/tensorflow-model-%d' % checkpoint_frame)\n else:\n self.saver.restore(self.sess, self.saver.latest_checkpoint())",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def restore(self, sess, path=None, var_list=None):\n\n saver = tf.train.Saver(var_list)\n if path is None:\n path = tf.train.latest_checkpoint(os.path.dirname(self.config.CHECKPOINTS_PATH))\n saver.restore(sess, path)\n print(\"model restored from %s\" % path)",
"def restore_model(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n self.model = model_from_json(json.load(open(model_file)))\n self.model.load_weights(weight_file)\n return self\n model.load_weights(\"./output/model.h5\")",
"def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)",
"def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)",
"def restore_fn(flags):\n # if flags.tf_initial_checkpoint is None:\n # return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then ignore.\n # if tf.train.latest_checkpoint(flags.train_dir):\n # tf.logging.info(\n # 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n # % flags.train_dir)\n # return None\n\n exclusions = []\n if flags.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in flags.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n # Change model scope if necessary.\n if flags.checkpoint_model_scope is not None:\n variables_to_restore = \\\n {var.op.name.replace(flags.model_name,\n flags.checkpoint_model_scope): var\n for var in variables_to_restore}\n\n tf.compat.v1.logging.info('++++++++++++++++++++')\n tf.compat.v1.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' %\n (flags.pre_trained_checkpoint, flags.ignore_missing_vars))\n slim.assign_from_checkpoint_fn(flags.pre_trained_checkpoint,\n variables_to_restore,\n ignore_missing_vars=flags.ignore_missing_vars)",
"def maybe_restore_train_and_indicator_state(\n train_state,\n indicator_state,\n *,\n ckpt_manager,\n mesh,\n):\n latest_step = ckpt_manager.latest_step()\n if latest_step is None:\n return train_state, indicator_state\n logging.info('Restoring from step %d', latest_step)\n\n # Check if the directory is empty, Orbax could have failed to save\n # the checkpoint alltogether\n save_dir = checkpoint.utils.get_save_directory(\n latest_step,\n ckpt_manager.directory,\n )\n # If there's no files in the directory we should remove it and try\n # again with the checkpoint before that.\n if not any(save_dir.iterdir()):\n logging.info(\n 'Save directory %s is empty, removing and recursing restore',\n save_dir,\n )\n save_dir.rmdir()\n return maybe_restore_train_and_indicator_state(\n train_state,\n indicator_state,\n ckpt_manager=ckpt_manager,\n mesh=mesh,\n )\n\n def restore_arguments_with_mesh_axes(\n mesh_axes):\n if not mesh:\n mesh_axes = None\n\n def closure(_):\n return checkpoint.ArrayRestoreArgs(\n lazy=True,\n restore_type=jax.Array,\n mesh=mesh,\n mesh_axes=mesh_axes,\n )\n\n return closure\n\n # Evaluate the shape and filter empty nodes\n # We save the entire PyTree so there's no need to further filter\n train_state_shape = jax.eval_shape(lambda x: x, train_state)\n train_state_shape = tree_utils.filter_empty_nodes(train_state_shape,\n train_state_shape)\n train_state_pspec = create_train_state_partition_spec_from_shape(\n train_state_shape)\n train_state_restore_args = jax.tree_util.tree_map(\n restore_arguments_with_mesh_axes(train_state_pspec), train_state_shape)\n\n indicator_state_shape = jax.eval_shape(lambda x: x, indicator_state)\n indicator_state_shape = tree_utils.tree_map_with_regex(\n lambda _: None, indicator_state_shape, [(r'.*params/encoder/.*',)],\n lambda leaf: leaf)\n indicator_state_shape = tree_utils.filter_empty_nodes(indicator_state_shape,\n indicator_state_shape)\n indicator_state_pspec = create_indicator_state_partition_spec_from_shape(\n indicator_state_shape)\n indicator_state_restore_args = jax.tree_util.tree_map(\n restore_arguments_with_mesh_axes(indicator_state_pspec),\n indicator_state_shape)\n\n restored_state = ckpt_manager.restore(\n latest_step,\n items={\n 'train': train_state_shape,\n 'indicator': indicator_state_shape\n },\n restore_kwargs={\n 'train': {\n 'restore_args': train_state_restore_args\n },\n 'indicator': {\n 'restore_args': indicator_state_restore_args\n }\n })\n\n restored_state = checkpoint.apply_transformations(\n original_tree=restored_state,\n transformations=dict(),\n new_tree={\n 'train': train_state,\n 'indicator': indicator_state\n },\n default_to_original=False)\n restored_state = checkpoint.lazy_utils.maybe_get_tree(restored_state)\n logging.info('Restore finished')\n\n return operator.itemgetter('train', 'indicator')(restored_state)",
"def restore(self, sess: tf.Session) -> None:\n super().restore(sess)\n BaseModel._restore_checkpoint(self.pretrained_saver, sess, path=FLAGS.pretrained_checkpoint)",
"def restore_from_dir(self, checkpoint_dir: str):\r\n\r\n pattern = self.CKPT_FILE_TMPL.format(\"*\")\r\n full_paths = glob.glob(os.path.join(checkpoint_dir, pattern))\r\n if not full_paths:\r\n raise RuntimeError(\r\n \"Searcher unable to find checkpoint in {}\".format(\r\n checkpoint_dir)) # TODO\r\n most_recent_checkpoint = max(full_paths)\r\n self.restore(most_recent_checkpoint)",
"def restore(self):\n # For multi-worker training, it should not restore a model in certain\n # worker setting (e.g. non-chief worker in ParameterServerStrategy).\n # pylint: disable=protected-access\n if self._model._in_multi_worker_mode() and not multi_worker_util.should_load_checkpoint():\n return\n self.read_checkpoint_manager.restore_or_initialize()",
"def Restore(binary_file, format='default'):\n from dragon.config import logger\n assert os.path.exists(binary_file), \\\n 'Binary file({}) does not exist.'.format(binary_file)\n\n if format == 'default':\n try:\n state_dict = cPickle.load(open(binary_file, 'rb'))\n except UnicodeDecodeError:\n state_dict = cPickle.load(open(binary_file, 'rb'), encoding='iso-8859-1')\n logger.info('Restore From Model@: ' + binary_file)\n logger.info('Model Format: cPickle')\n for k, v in state_dict.items():\n if not HasTensor(k):\n logger.info('[Warning]: Tensor({}) does not exist in any Graphs, skip.'.format(k))\n else:\n FeedTensor(k, v)\n logger.info('[Info]: Tensor({}) is restored.'.format(k))\n\n elif format == 'caffe':\n # Caffe models can't save the tensor name\n # We simply use \"layer_name/param:X\"\n RestoreCC(binary_file, 1)\n\n else:\n raise TypeError('Unknown binary format: {}'.format(format))",
"def restore_checkpoint(self, checkpoint_id, name, path=''):\n\n\t\tself.log.debug(\"restoring Notebook %s from checkpoint %s\", name, checkpoint_id)\n\t\tnb_path = self._get_os_path(name, path)\n\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\n\t\tif not key_exists(self.bucket, cp_path):\n\t\t\tself.log.debug(\"checkpoint file does not exist: %s\", cp_path)\n\t\t\traise web.HTTPError(404,\n\t\t\t\tu'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)\n\t\t\t)\n\t\t# ensure notebook is readable (never restore from an unreadable notebook)\n\t\tkey = self.bucket.get_key(cp_path)\n\t\tnb = current.reads(key.get_contents_as_string(), u'json')\n\t\tself._copy(cp_path, nb_path)\n\t\tself.log.debug(\"copying %s -> %s\", cp_path, nb_path)",
"def _restore_variables(self, checkpoint):\n checkpoint_variables_map = list_variables(checkpoint)\n valid_variable = lambda name: name.startswith('model/encoder') or \\\n name.startswith('model/decoder')\n checkpoint_variable_names = [name for (name, _) in checkpoint_variables_map\n if valid_variable(name)]\n\n variables = get_variables_to_restore()\n variable_names = [v.name.split(':')[0] for v in variables]\n assignment_map = {}\n for var in checkpoint_variable_names:\n if var in variable_names:\n assignment_map[var] = var\n\n init_from_checkpoint(checkpoint, assignment_map)",
"def restore(self,\n sess,\n ckpt_file,\n ckpt_type):\n if ckpt_file is None:\n raise FileNotFoundError(\"checkpoint file doesn't exist\")\n \n if ckpt_type == \"debug\":\n self.ckpt_debug_saver.restore(sess, ckpt_file)\n elif ckpt_type == \"epoch\":\n self.ckpt_epoch_saver.restore(sess, ckpt_file)\n else:\n raise ValueError(\"unsupported checkpoint type {0}\".format(ckpt_type))",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def _dump_checkpoint(args):\n\n # Determine checkpoint to use\n if os.path.isfile(args.source):\n checkpoint_file = args.source\n else:\n checkpoint_file = Config.get_best_or_last_checkpoint(args.source)\n\n # Load the checkpoint and strip some fieleds\n checkpoint = torch.load(checkpoint_file, map_location=\"cpu\")\n\n # Dump it\n print(f\"# Dump of checkpoint: {checkpoint_file}\")\n excluded_keys = {\"model\", \"optimizer_state_dict\"}\n if args.keys is not None:\n excluded_keys = {key for key in excluded_keys if key not in args.keys}\n excluded_keys = excluded_keys.union(\n {key for key in checkpoint if key not in args.keys}\n )\n excluded_keys = {key for key in excluded_keys if key in checkpoint}\n for key in excluded_keys:\n del checkpoint[key]\n if excluded_keys:\n print(f\"# Excluded keys: {excluded_keys}\")\n yaml.dump(checkpoint, sys.stdout)",
"def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0",
"def Restore(binary_file, format='default'):\n assert os.path.exists(binary_file), \\\n 'Binary file({}) does not exist.'.format(binary_file)\n\n if format == 'default':\n try:\n state_dict = pickle.load(open(binary_file, 'rb'))\n except UnicodeDecodeError:\n state_dict = pickle.load(open(binary_file, 'rb'), encoding='iso-8859-1')\n logging.info('Restore From Model@: ' + binary_file)\n logging.info('Model Format: Pickle')\n for k, v in state_dict.items():\n if HasTensor(k):\n FeedTensor(k, v)\n logging.info('[Info]: Tensor({}) is restored.'.format(k))\n elif format == 'caffe':\n # Caffe models can't save the tensor name\n # We simply use \"layer_name/param:X\"\n _C.Restore(binary_file, 1)\n else:\n raise TypeError('Unknown binary format: {}'.format(format))"
] | [
"0.7710502",
"0.7426955",
"0.71934044",
"0.70837396",
"0.70837396",
"0.70837396",
"0.690655",
"0.6745984",
"0.6577562",
"0.646206",
"0.6419114",
"0.63269955",
"0.6172737",
"0.61575806",
"0.6141102",
"0.60786045",
"0.60416305",
"0.60324484",
"0.6001584",
"0.59863573",
"0.5976694",
"0.5966559",
"0.59486943",
"0.5917606",
"0.5874949",
"0.587232",
"0.587232",
"0.58658606",
"0.58591676",
"0.5851999"
] | 0.74891186 | 1 |
Main entry point for landing zone command. | def run(config, toml_config, args, parser, subparser):
if not args.landingzone_cmd: # pragma: nocover
return run_nocmd(config, args, parser, subparser)
else:
config = LandingZoneConfig.create(args, config, toml_config)
return args.landingzone_cmd(config, toml_config, args, parser, subparser) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main() -> None:\n\n data = Ground(sys.argv[1])\n DeliveryMan.show_route(data.coordinates)",
"def Run(self, args):\n project = properties.VALUES.core.project.Get(required=True)\n zone = {}\n zone['dnsName'] = args.dns_name\n zone['name'] = args.zone\n zone['description'] = args.description\n\n really = console_io.PromptContinue('Creating %s in %s' % (zone, project))\n if not really:\n return\n\n dns = self.context['dns']\n request = dns.managedZones().create(project=project, body=zone)\n try:\n result = request.execute()\n return result\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)",
"def main():\n\n args = parse_args()\n setup_logging(args.verbose)\n\n logging.debug('Args: %s', args)\n\n latitude, longitude = get_geo_coordinates(address_string=args.address)\n response = query_dark_sky(latitude, longitude, use_cache=args.use_cache)\n weather = parse_weather(response)\n build_text_to_speak(weather)",
"def main():\n\n # Fixes the startup process.\n util.replace_command('Launcher.exe', 'Borderlands2.exe')\n util.append_argument('-NoSplash')\n\n # Disables esync prevents crashes.\n util.disable_esync()\n\n # Enables NVIDIA PhysX in Borderlands 2.\n util.protontricks('physx')",
"def entry_point():\n\n\n plac.call(main)",
"def main():\r\n # establishes start end and time data\r\n query_user()\r\n # returns optimal subway stop\r\n subway_stop_location = determine_optimal_CTA()\r\n # returns the time you need to arrive at the CTA\r\n arrival_time_transit = determine_arrival_time(subway_stop_location)\r\n # launches directions in google maps, with two windows for directions to and from CTA\r\n # launch_directions(arrival_time_transit, subway_stop_location)\r",
"def main():\n\n # Has oceanview done something? If this is still false by the end,\n # Display the Usage information.\n did_something = False\n\n # The user wants to clear the database.\n if 'cleardb' in sys.argv:\n did_something = True\n print(\"It's sqlite, just delete the file.\")\n\n # The user wants the test data added to the database.\n if 'maketestdb' in sys.argv:\n did_something = True\n database = data.Database(\"db.sqlite\", \"database/build_db.sql\")\n dbutil.add_test_data(database)\n\n # The user wants the front end launched\n if 'front' in sys.argv or 'both' in sys.argv:\n did_something = True\n frontend = front.init()\n frontend.run(INTERFACE, 8000)\n\n # The user wants the back end launched.\n if 'back' in sys.argv or 'both' in sys.argv:\n did_something = True\n backend = back.init()\n backend.run(INTERFACE, 80)\n\n # did_something is False, nothing was done, show the usage info.\n if did_something is False:\n print(\"Usage: python oceanview.py [command]\")\n print(\"COMMANDS:\")\n print(\" front - start the frontend\")\n print(\" back - start the backend\")\n print(\" both - start both\")\n print(\" maketestdb - add test data to the database\")",
"def Main():\n argument_parser = argparse.ArgumentParser(description=(\n u'Extracts the MSIE zone information from a NTUSER.DAT or SYSTEM '\n u'Registry file.'))\n\n argument_parser.add_argument(\n u'-d', u'--debug', dest=u'debug', action=u'store_true', default=False,\n help=u'enable debug output.')\n\n argument_parser.add_argument(\n u'source', nargs=u'?', action=u'store', metavar=u'PATH', default=None,\n help=(\n u'path of the volume containing C:\\\\Windows, the filename of '\n u'a storage media image containing the C:\\\\Windows directory,'\n u'or the path of a SOFTWARE Registry file.'))\n\n options = argument_parser.parse_args()\n\n if not options.source:\n print(u'Source value is missing.')\n print(u'')\n argument_parser.print_help()\n print(u'')\n return False\n\n logging.basicConfig(\n level=logging.INFO, format=u'[%(levelname)s] %(message)s')\n\n output_writer = StdoutWriter()\n\n if not output_writer.Open():\n print(u'Unable to open output writer.')\n print(u'')\n return False\n\n collector_object = msie_zone_info.MSIEZoneInfoCollector(\n debug=options.debug)\n\n if not collector_object.ScanForWindowsVolume(options.source):\n print((\n u'Unable to retrieve the volume with the Windows directory from: '\n u'{0:s}.').format(options.source))\n print(u'')\n return False\n\n collector_object.Collect(output_writer)\n output_writer.Close()\n\n # TODO: implement.\n # if not collector_object.key_found:\n # print(u'No lockdown and zones key found.')\n\n return True",
"def main():\n docopt = docoptinit(__doc__)\n logging.basicConfig(level=logging.INFO,\n format='[%(asctime)s] [%(levelname)s] [ %(filename)s:%(lineno)s - %(name)s ] %(message)s ')\n logging.info('basic config')\n # qb.set_logger(__file__, debug=docopt['--debug'])\n host = docopt['--host']\n port = int(docopt['--port'])\n if not (1 <= port <= 65535):\n raise Exception('port must be 1-65535')\n\n global verbose\n verbose = int(docopt['--verbose'])\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(start_warp_server(host, port))\n loop.run_forever()\n except OSError:\n pass\n except KeyboardInterrupt:\n print('bye')\n finally:\n loop.close()",
"def main_cli():\n pass",
"def main():\n args = parse_args()\n json_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"zones.json\")\n tzprops_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"locales\", \"en-US\", \"chrome\", \"calendar\",\n \"timezones.properties\")\n zoneinfo_path = tempfile.mkdtemp(prefix=\"zones\")\n zoneinfo_pure_path = tempfile.mkdtemp(prefix=\"zones\")\n\n updater = TimezoneUpdater(args.tzdata_path, zoneinfo_path, zoneinfo_pure_path)\n updater.run(json_file, tzprops_file, args.vzic_path)\n\n # Clean up.\n shutil.rmtree(zoneinfo_path)\n shutil.rmtree(zoneinfo_pure_path)",
"def main(*args):\r\n print(START_MESSAGE)\r\n print(\"Script Location:\", location)\r\n print(\"Arguments Passed:\", args)",
"def main():\n CLI_APP.run()",
"def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)",
"def main() -> None:\n _ = (\n command_line_argument_parser().parse_args()\n ) # configuration only, no arg processing\n print(description())\n py_trees.logging.level = py_trees.logging.Level.DEBUG\n py_trees.blackboard.Blackboard.enable_activity_stream(maximum_size=100)\n root = Remap(name=\"Remap\", remap_to={\"/foo/bar/wow\": \"/parameters/wow\"})\n\n ####################\n # Execute\n ####################\n root.tick_once()\n print(root.blackboard)\n print(py_trees.display.unicode_blackboard())\n print(py_trees.display.unicode_blackboard_activity_stream())",
"def land(self, *args, **kwargs):\n self.log.info('Landing drone...')\n self.dc.land()\n self.log.info('Landed!')",
"def cli():\n pass",
"def show_landing(self):\n print(\"Hooray, the Eagle has landed!\")",
"def main(options):\n home = Address(options.name,\n options.address,\n options.city,\n options.state,\n options.zip_code)",
"def cli():\r\n pass",
"def cli():\n\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass",
"def cli():\n pass"
] | [
"0.69619477",
"0.6414897",
"0.6252262",
"0.6175504",
"0.59503347",
"0.59487396",
"0.59453166",
"0.5835306",
"0.58340424",
"0.58282",
"0.5816947",
"0.5801014",
"0.5773889",
"0.5770805",
"0.5750875",
"0.5741733",
"0.5728984",
"0.5725193",
"0.5716316",
"0.56993407",
"0.5698184",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507",
"0.56597507"
] | 0.7543086 | 0 |
Override method from djrestauth library to login with username or email | def login(self):
self.user = self.serializer.validated_data['user'] or self.serializer.validated_data['email'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_login(request):\n if \"email\" in request.DATA and \"password\" in request.DATA:\n user = authenticate(\n request,\n username=request.DATA[\"email\"],\n password=request.DATA[\"password\"],\n )\n if user is not None:\n login(request, user)\n return JsonResponse(\"OK - User logged in.\", status=200, safe=False)\n return JsonResponse(\n \"Not Found - User not found.\", status=404, safe=False\n )\n return JsonResponse(\n \"Bad Request - Please provide the required json body.\",\n status=400,\n safe=False,\n )",
"def authenticate(self, request, username=None, password=None, **kwargs):\n user_model = get_user_model()\n try:\n user = user_model.objects.get(email=username)\n except user_model.DoesNotExist:\n return None\n else:\n if user.check_password(password):\n return user\n return None",
"def authenticate(self, username=None, password=None):\n\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if True:\n return user\n except User.DoesNotExist:\n return None",
"def authenticate_credentials(self, payload):\n username = payload.get('username')\n email = payload.get('email')\n if not username and not email:\n msg = _('Invalid payload.')\n raise exceptions.AuthenticationFailed(msg)\n try:\n # Username query is case insensitive\n user_queryset = User.objects.filter(\n Q(username__iexact=username)|\n Q(email__iexact=email)\n ).distinct()\n if user_queryset.exists() and user_queryset.count() == 1:\n user = user_queryset.first()\n return user\n except User.DoesNotExist:\n return None",
"def user_login(request):\n try:\n email = request.data['email']\n password = request.data['password']\n except KeyError:\n return Response(\n messages.REQUIRED_EMAIL_AND_PASSWORD,\n status=status.HTTP_400_BAD_REQUEST)\n try:\n # response = validations_utils.login_user_existence_validation(email)\n user = authenticate(email=email, password=password) # Validates credentials of user.\n except ValidationException:\n return Response(messages.INVALID_EMAIL_OR_PASSWORD, status=status.HTTP_401_UNAUTHORIZED)\n try:\n login_user = utils.authenticate_user(user, request.data) # Authorizes the user and returns appropriate data.\n # token = utils.fetch_token(user) # fetches the token for authorized user.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n return Response(login_user, status=status.HTTP_200_OK)",
"def requestlogin(email, **kwargs): # pragma: no cover\n\n payload = {'email': email}\n # Sending the payload as params is intentional to match the snafu of the upstream API\n post('/user/requestlogin', params=payload, **kwargs)",
"def login(request):\n\n if request.user.is_authenticated:\n return redirect('dashboard')\n \n else:\n if request.method == 'POST':\n email = request.POST['email']\n senha = request.POST['senha']\n\n if email == \"\" or senha == \"\":\n messages.error(request, 'Os campos de email e senha não devem ficar em branco')\n return redirect('login')\n\n if User.objects.filter(email=email).exists:\n try:\n nome = User.objects.filter(email=email).values_list('username', flat=True).get()\n\n user = auth.authenticate(request, username = nome, password = senha)\n except:\n user = None\n\n if user is not None:\n auth.login(request, user)\n messages.success(request, 'login realizado com sucesso')\n return redirect('dashboard')\n else:\n messages.error(request,'Senha ou Usuário incorretos')\n return redirect('login')\n else:\n messages.error(request,'Email não cadastrado')\n return redirect('login')\n\n return render(request, 'usuarios/login.html')",
"def login_user():\n pass",
"def do_login(self, backend, user):",
"def do_login(self, request, email, password, **extra_fields):\n user = authenticate(email=email, password=password)\n\n if not user or not user.is_active:\n return False\n\n login(request, user)\n token = self.generate_auth_token(user)\n\n message = {\n 'token': token,\n 'user': user\n }\n return message",
"def login(self, request):\n email = request.data.get('email', None)\n password = request.data.get('password', None)\n if email is None and password is None:\n LOGGER.error(\"Either email or password not matched.\")\n return Response(\n data=\"Please enter a valid email address and password\",\n status=status.HTTP_400_BAD_REQUEST)\n data = {'email': email, 'password': password}\n resp = requests.post(url=TOKEN_GET_ENDPOINT, data=data)\n if resp.status_code != 200:\n LOGGER.error(\"Invalid credentials, Login failed!.\")\n return Response(data=\"Invalid Credentials\",\n status=status.HTTP_401_UNAUTHORIZED)\n LOGGER.debug(\"Login successful.\")\n user = {'user_type': User.objects.get(email=email).user_type,\n 'first_name': User.objects.get(email=email).first_name,\n 'last_name': User.objects.get(email=email).last_name,\n 'emp_id': User.objects.get(email=email).emp_id,\n 'email': email}\n response = json.loads(resp.text)\n response.update(user)\n return Response(json.dumps(response), status=status.HTTP_200_OK)",
"def login_user(request, error=\"\"): # pylint: disable-msg=too-many-statements,unused-argument\r\n\r\n backend_name = None\r\n email = None\r\n password = None\r\n redirect_url = None\r\n response = None\r\n running_pipeline = None\r\n third_party_auth_requested = settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request)\r\n third_party_auth_successful = False\r\n trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))\r\n user = None\r\n\r\n if third_party_auth_requested and not trumped_by_first_party_auth:\r\n # The user has already authenticated via third-party auth and has not\r\n # asked to do first party auth by supplying a username or password. We\r\n # now want to put them through the same logging and cookie calculation\r\n # logic as with first-party auth.\r\n running_pipeline = pipeline.get(request)\r\n username = running_pipeline['kwargs'].get('username')\r\n backend_name = running_pipeline['backend']\r\n requested_provider = provider.Registry.get_by_backend_name(backend_name)\r\n\r\n try:\r\n user = pipeline.get_authenticated_user(username, backend_name)\r\n third_party_auth_successful = True\r\n except User.DoesNotExist:\r\n AUDIT_LOG.warning(\r\n u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(\r\n username=username, backend_name=backend_name))\r\n return JsonResponse({\r\n \"success\": False,\r\n # Translators: provider_name is the name of an external, third-party user authentication service (like\r\n # Google or LinkedIn).\r\n \"value\": _('There is no {platform_name} account associated with your {provider_name} account. Please use your {platform_name} credentials or pick another provider.').format(\r\n platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME)\r\n }) # TODO: this should be a status code 401 # pylint: disable=fixme\r\n\r\n else:\r\n\r\n if 'email' not in request.POST or 'password' not in request.POST:\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": _('There was an error receiving your login information. Please email us.'), # TODO: User error message\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n email = request.POST['email']\r\n password = request.POST['password']\r\n try:\r\n user = User.objects.get(email=email)\r\n except User.DoesNotExist:\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.warning(u\"Login failed - Unknown user email\")\r\n else:\r\n AUDIT_LOG.warning(u\"Login failed - Unknown user email: {0}\".format(email))\r\n\r\n # check if the user has a linked shibboleth account, if so, redirect the user to shib-login\r\n # This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu\r\n # address into the Gmail login.\r\n if settings.FEATURES.get('AUTH_USE_SHIB') and user:\r\n try:\r\n eamap = ExternalAuthMap.objects.get(user=user)\r\n if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):\r\n return JsonResponse({\r\n \"success\": False,\r\n \"redirect\": reverse('shib-login'),\r\n }) # TODO: this should be status code 301 # pylint: disable=fixme\r\n except ExternalAuthMap.DoesNotExist:\r\n # This is actually the common case, logging in user without external linked login\r\n AUDIT_LOG.info(\"User %s w/o external auth attempting login\", user)\r\n\r\n # see if account has been locked out due to excessive login failures\r\n user_found_by_email_lookup = user\r\n if user_found_by_email_lookup and LoginFailures.is_feature_enabled():\r\n if LoginFailures.is_user_locked_out(user_found_by_email_lookup):\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": _('This account has been temporarily locked due to excessive login failures. Try again later.'),\r\n }) # TODO: this should be status code 429 # pylint: disable=fixme\r\n\r\n # see if the user must reset his/her password due to any policy settings\r\n if PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": _('Your password has expired due to password policy on this account. You must '\r\n 'reset your password before you can log in again. Please click the '\r\n 'Forgot Password\" link on this page to reset your password before logging in again.'),\r\n }) # TODO: this should be status code 403 # pylint: disable=fixme\r\n\r\n # if the user doesn't exist, we want to set the username to an invalid\r\n # username so that authentication is guaranteed to fail and we can take\r\n # advantage of the ratelimited backend\r\n username = user.username if user else \"\"\r\n\r\n if not third_party_auth_successful:\r\n try:\r\n user = authenticate(username=username, password=password, request=request)\r\n # this occurs when there are too many attempts from the same IP address\r\n except RateLimitException:\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": _('Too many failed login attempts. Try again later.'),\r\n }) # TODO: this should be status code 429 # pylint: disable=fixme\r\n\r\n if user is None:\r\n # tick the failed login counters if the user exists in the database\r\n if user_found_by_email_lookup and LoginFailures.is_feature_enabled():\r\n LoginFailures.increment_lockout_counter(user_found_by_email_lookup)\r\n\r\n # if we didn't find this username earlier, the account for this email\r\n # doesn't exist, and doesn't have a corresponding password\r\n if username != \"\":\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else \"<unknown>\"\r\n AUDIT_LOG.warning(u\"Login failed - password for user.id: {0} is invalid\".format(loggable_id))\r\n else:\r\n AUDIT_LOG.warning(u\"Login failed - password for {0} is invalid\".format(email))\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": _('Email or password is incorrect.'),\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n # successful login, clear failed login attempts counters, if applicable\r\n if LoginFailures.is_feature_enabled():\r\n LoginFailures.clear_lockout_counter(user)\r\n\r\n if user is not None and user.is_active:\r\n try:\r\n # We do not log here, because we have a handler registered\r\n # to perform logging on successful logins.\r\n login(request, user)\r\n if request.POST.get('remember') == 'true':\r\n request.session.set_expiry(604800)\r\n log.debug(\"Setting user session to never expire\")\r\n else:\r\n request.session.set_expiry(0)\r\n except Exception as e:\r\n AUDIT_LOG.critical(\"Login failed - Could not create session. Is memcached running?\")\r\n log.critical(\"Login failed - Could not create session. Is memcached running?\")\r\n log.exception(e)\r\n raise\r\n\r\n redirect_url = try_change_enrollment(request)\r\n\r\n if third_party_auth_successful:\r\n redirect_url = pipeline.get_complete_url(backend_name)\r\n\r\n response = JsonResponse({\r\n \"success\": True,\r\n \"redirect_url\": redirect_url,\r\n })\r\n\r\n # set the login cookie for the edx marketing site\r\n # we want this cookie to be accessed via javascript\r\n # so httponly is set to None\r\n\r\n if request.session.get_expire_at_browser_close():\r\n max_age = None\r\n expires = None\r\n else:\r\n max_age = request.session.get_expiry_age()\r\n expires_time = time.time() + max_age\r\n expires = cookie_date(expires_time)\r\n\r\n response.set_cookie(\r\n settings.EDXMKTG_COOKIE_NAME, 'true', max_age=max_age,\r\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\r\n path='/', secure=None, httponly=None,\r\n )\r\n\r\n return response\r\n\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.warning(u\"Login failed - Account not active for user.id: {0}, resending activation\".format(user.id))\r\n else:\r\n AUDIT_LOG.warning(u\"Login failed - Account not active for user {0}, resending activation\".format(username))\r\n\r\n reactivation_email_for_user(user)\r\n not_activated_msg = _(\"This account has not been activated. We have sent another activation message. Please check your e-mail for the activation instructions.\")\r\n return JsonResponse({\r\n \"success\": False,\r\n \"value\": not_activated_msg,\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r",
"def _login(self, *args, **kwargs):\n pass",
"def login_user(self, email=\"[email protected]\", password=\"test1234\"):\n user_data = {\n 'email': email,\n 'password': password\n }\n return self.client().post('/auth/login', data=user_data)\n\n ############################################\n ##### ALL OUR TESTS METHODS LIE HERE #######",
"def authenticate(self, request, **kwargs):\n\n login = kwargs.get(\n 'login',\n kwargs.get('username', None))\n password = kwargs.get('password', None)\n\n if login and password:\n\n lookup_obj = Q()\n\n authentication_methods = app_settings.AUTHENTICATION_METHODS\n if (AuthenticationMethod.PHONE in authentication_methods and\n PhoneValidationForm({'phone': login}).is_valid()):\n lookup_obj |= Q(phonenumber__phone=login)\n elif (AuthenticationMethod.EMAIL in authentication_methods and\n EmailValidationForm({'email': login}).is_valid()):\n lookup_obj |= Q(emailaddress__email=login)\n elif (AuthenticationMethod.USERNAME in authentication_methods and\n UsernameValidationForm({'username': login}).is_valid()):\n lookup_obj |= Q(username=login)\n else:\n return None\n\n if lookup_obj:\n try:\n user = User.objects.get(lookup_obj)\n if (user.check_password(password)\n and self.user_can_authenticate(user)):\n return user\n except User.DoesNotExist:\n return None\n\n return None",
"def auth(self, user):",
"def user_login(request):\n username = request.POST.get(\"username\", \"\")\n password = request.POST.get(\"password\", \"\")\n print(type(password))\n if username == \"\" or password == \"\":\n return JsonResponse({\"code\": 10103, \"message\": \"username or password is null\"})\n\n if username != \"admin\" or password != \"123\":\n return JsonResponse({\"code\": 10104, \"message\": \"username or password error\"})\n\n return JsonResponse({\"code\": 10200, \"message\": \"login success\"})",
"def login():\r\n if not request.is_json or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n return login_user(request)",
"def user_auth(self, email=None, password=None):\n email = raw_input(\"Email: \") if email is None else email\n password = getpass.getpass() if password is None else password\n\n login_url = self.base_api_url\n login_data = {\n \"method\": \"user.login\",\n \"params\": {\n \"email\": email,\n \"pass\": password\n }\n }\n login_params = {\"method\": \"user.login\"}\n # If the user/password match, the server respond will contain a\n # session cookie that you can use to authenticate future requests.\n r = requests.post(\n login_url,\n data=json.dumps(login_data),\n params=login_params\n )\n if r.json()[\"result\"] not in [\"OK\"]:\n raise AuthenticationError(\n \"Could not authenticate.\\n{}\".format(r.json())\n )\n self.cookies = r.cookies",
"def authenticate(self, request, username=None, password=None, **kwargs):\n try:\n user = Account.objects.get(email=username)\n if user.check_password(password):\n return user\n except Account.DoesNotExist:\n try:\n user = Account.objects.get(username=username)\n if user.check_password(password):\n return user\n except Account.DoesNotExist:\n Account().set_password(password)",
"def login_user(self, email=\"[email protected]\", password=\"test1234\"):\n user_data = {\n 'email': email,\n 'password': password\n }\n return self.client().post('api/v1/auth/login', data=user_data)",
"def login(self, email: typing.Union[str, None], password: typing.Union[str, None]) -> tuple:\n pass",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def authenticate(user, request):",
"def login_user(self):\n return self.client.post(\n '/api/v1/user/auth/signin',\n data=json.dumps(dict(\n email='[email protected]',\n password='42qwR@#'\n )),\n content_type='application/json'\n )",
"def login(self, email='[email protected]', password='password'):\n return login(self.client, email, password)",
"def login(self, request): \n user = Account.find_by_username(request.username)\n if user is None:\n print \"User not found\" \n return AccountResponse(errmsg=\"Username not recognized\")\n return AccountResponse(id=user.key.id())",
"def _authenticate_and_get_user(self, email, password, user=None):\n # Authenticate the user\n emailBackend = EmailBackend()\n user = emailBackend.authenticate(username=email, password=password, user=user)\n return user",
"def _login(self, email, password):\r\n resp = self.client.post(\r\n reverse('login_post'),\r\n {'email': email, 'password': password}\r\n )\r\n self.assertEqual(resp.status_code, 200)\r\n return resp"
] | [
"0.68783975",
"0.66309136",
"0.66066015",
"0.66062045",
"0.6598922",
"0.6579894",
"0.6574915",
"0.6515354",
"0.65140116",
"0.65127194",
"0.65111333",
"0.6499497",
"0.6488138",
"0.646653",
"0.6461104",
"0.64396834",
"0.64242405",
"0.6423767",
"0.6421206",
"0.64155567",
"0.6401461",
"0.6363541",
"0.63222563",
"0.63222563",
"0.63112646",
"0.6306887",
"0.6306601",
"0.62871164",
"0.62744457",
"0.62138647"
] | 0.68613046 | 1 |
Convert numpy array of noisy coefs to dataframe for plotting. | def arr_to_df(coefs_noisy, n_arr, coefs_id):
out = pd.DataFrame(coefs_noisy, columns=n_arr)
out = pd.DataFrame(out.stack()).reset_index()
out.columns = ['component', 'n', 'value']
out = out.assign(id=coefs_id)
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vec_to_df(coefs, n_arr, coefs_id):\n out = pd.DataFrame({'component': 'L2_dist', 'n': n_arr, 'value': coefs,\n 'id': coefs_id})\n return out",
"def as_DF(self):\n\n gs_df = pd.DataFrame(self.P, columns=self.xvec, index=self.yvec)\n gs_df.columns.name = 'x'\n gs_df.index.name = 'y'\n\n return gs_df",
"def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats",
"def to_dataframe(self):\n import pandas as pd\n\n d = OrderedDict({\"y\": self.y})\n for pred in self.predictors:\n d[pred.name] = pred.x\n \n df = pd.DataFrame(d)\n return df",
"def from_nparray_to_df(nparray: np.ndarray) -> pd.DataFrame:\n return pd.DataFrame(data=nparray[1:, 1:],\n index=nparray[1:, 0],\n columns=nparray[0, 1:])",
"def _coeff_to_df(self):\n dct = self._coeff_to_dict()\n\n return (\n pd.DataFrame(data=dct.items(), columns=[\"feature\", \"coeff\"])\n .sort_values(by=\"coeff\", ascending=False)\n .reset_index(drop=True)\n )",
"def extract_y(tracer_file, tracer_network):\n y_table = pd.DataFrame(tracer_file['Y'])\n y_table.columns = list(tracer_network['isotope'])\n return y_table",
"def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n X = date_part(df.index, method=self.datepart_method)\n y = pd.DataFrame(self.model.predict(X))\n y.columns = df.columns\n y.index = df.index\n df = df + y\n return df",
"def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()",
"def SweepFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)",
"def make_noised_dataframe(dataframe, noise_list, noised_path=\"noised_images\"):\r\n original_image_names = []\r\n noised_image_names = []\r\n\r\n init_dir(noised_path)\r\n\r\n for index, row in tqdm(dataframe.iterrows()):\r\n rand_noise = random.choice(noise_list)\r\n\r\n if not issubclass(type(rand_noise), Noise) and rand_noise is not None:\r\n raise ValueError(\"noise is not of valid type Noise\")\r\n\r\n img = cv2.imread(row['path'], cv2.IMREAD_GRAYSCALE)\r\n img = np.array(img, np.float32)\r\n\r\n noised_img = rand_noise.add(img)\r\n noised_img = np.array(noised_img, np.float32)\r\n noised_class_name = type(rand_noise).__name__\r\n path = os.path.join(noised_path, f\"img{index}_{noised_class_name}.jpg\")\r\n\r\n cv2.imwrite(path, noised_img)\r\n original_image_names.append(row['path'])\r\n noised_image_names.append(path)\r\n return pd.DataFrame({'original_path': original_image_names, 'noised_path': noised_image_names})",
"def transform(self, X: np.ndarray) -> pd.core.frame.DataFrame:\r\n return pd.DataFrame(X, columns=self.attributes_names)",
"def tsne_2d(data_matrix, labels):\n\tprint(\"tsne 2d....\")\n\ttsne = manifold.TSNE(n_components=2, init='pca', method=\"exact\",random_state=0)\n\tpos = tsne.fit_transform(data_matrix)\n\txs, ys = pos[:, 0], pos[:, 1]\n\tdf = pd.DataFrame(dict(x=xs, y=ys, label=labels))\n\treturn df",
"def format_and_annualise(self, raw_cov_array):\n assets = self.X.columns\n return (\n pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency\n )",
"def curve_to_df(data):\n\n column = [\"interval start\", \"interval end\", \"newNTC\"]\n\n data = [[*row[0].split(\" - \"), row[1]] for row in data]\n\n df = pd.DataFrame(data, columns=column)\n\n return df",
"def feature_class_to_pandas_data_frame(feature_class, field_list):\r\n return DataFrame(\r\n arcpy.da.FeatureClassToNumPyArray(\r\n in_table=feature_class,\r\n field_names=field_list,\r\n skip_nulls=False,\r\n null_value=-99999\r\n )\r\n )",
"def dataTimeSeries(timesteps,df,predictors,target,dropnan,out=2,dropVars=True): \r\n \r\n series = series_to_supervised(df[predictors+[target]].copy(),timesteps,out,dropnan=dropnan)\r\n \r\n if dropnan==False:\r\n series.replace(pd.np.nan,0,inplace=True)\r\n \r\n # Dropping other variables:\r\n if dropVars:\r\n index = list(np.arange(series.shape[1]-2,\r\n series.shape[1]-len(predictors)-2,\r\n -1))\r\n \r\n labels = [item for idx,item in enumerate(series.columns) \r\n if idx in index]\r\n \r\n #print(\"Eliminando variáveis: {}\".format(labels))\r\n series.drop(labels,axis=1,inplace=True) \r\n \r\n return series",
"def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans",
"def _data_array_to_dataframe(self, prices_data_array: QFDataArray, frequency: Frequency):\n original_dates = list(prices_data_array.dates.to_index())\n dates = prices_data_array.resample(dates='1D').first().dates.to_index()\n market_open_datetimes = [price_datetime + MarketOpenEvent.trigger_time() for price_datetime in dates if\n price_datetime + MarketOpenEvent.trigger_time() in original_dates]\n shifted_open_datetimes = [price_datetime - frequency.time_delta() for price_datetime in market_open_datetimes]\n\n new_dates = list(set(original_dates + shifted_open_datetimes))\n new_dates = sorted(new_dates)\n prices_df = PricesDataFrame(index=new_dates, columns=prices_data_array.tickers)\n\n prices_df.loc[shifted_open_datetimes, :] = \\\n prices_data_array.loc[market_open_datetimes, :, PriceField.Open].values\n prices_df.loc[original_dates, :] = prices_data_array.loc[original_dates, :, PriceField.Close].values\n\n return prices_df",
"def df_model(self):\n return self.Kernel.df(self.xdata)",
"def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df",
"def impulse_fix():\n df = impulse_test_data()\n return df",
"def transform(self, X, y=None):\n\n X = self._prepare(X)\n\n z_data = self._z_scaler.transform(X.values, y)\n\n transformed_ndarray = super(self.__class__, self).transform(z_data)\n\n pandas_df = pd.DataFrame(transformed_ndarray)\n pandas_df.columns = [\"pca_{}\".format(i) for i in range(len(pandas_df.columns))]\n\n return pandas_df",
"def construct_df(t,y):\n\n df = np.zeros((3,3))\n\n df[0][0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0][1] = 77.27*(1.0 -y(0) )\n df[0][2] = 0.0;\n df[1][0] = -1.0/77.27;\n df[1][1] = (-1.0/77.27)*(1.0+y(0))\n df[1][2] = 1.0/77.27\n df[2][0] = 0.161\n df[2][1] = 0.0\n df[2][2] = -0.161\n\n return df",
"def plot_coefs(results):\n coefs_noisy = pd.concat([\n arr_to_df(results['obj_noisy'], n_arr, 'obj'),\n vec_to_df(results['dist_obj'], n_arr, 'obj'),\n arr_to_df(results['pos_noisy'], n_arr, 'pos'),\n vec_to_df(results['dist_pos'], n_arr, 'pos'),\n arr_to_df(results['neg_noisy'], n_arr, 'neg'),\n vec_to_df(results['dist_neg'], n_arr, 'neg')\n ])\n\n xlim = (min(n_arr), max(n_arr))\n ylim = (-1.1, 1.1)\n\n g = sns.FacetGrid(coefs_noisy, row = 'id', col = 'component', xlim = xlim,\n ylim = ylim)\n g.map(sns.pointplot, 'n', 'value', order = n_arr)\n g.set_xticklabels(rotation = 45)\n\n for i, val in enumerate(results['obj_true']):\n ax = g.axes[0, i]\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['pos_true']):\n ax = g.axes[1, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['neg_true']):\n ax = g.axes[2, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())",
"def timeseries_dataframe(self):\n return",
"def invert(self):\r\n return pd.DataFrame(\r\n np.linalg.pinv(self.data, hermitian=True),\r\n index=self.data.index,\r\n columns=self.data.columns,\r\n ) # do not return CategoryCov because variance can be negative\r",
"def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values\n\n sin_df = pd.DataFrame()\n # make this faster\n for index, row in self.sin_params.iterrows():\n yy = pd.DataFrame(\n row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],\n columns=[index],\n )\n sin_df = pd.concat([sin_df, yy], axis=1)\n df_index = df.index\n df = df.astype(float).reset_index(drop=True) + sin_df.reset_index(drop=True)\n df.index = df_index\n return df",
"def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df",
"def get_ts_df( N):\n df = pd.DataFrame()\n df['value'] = N.reshape(-1)\n df['time'] = list(range( N.shape[1])) * N.shape[0]\n df['unit'] = np.repeat( range( N.shape[0]), N.shape[1])\n return df"
] | [
"0.5676058",
"0.5650214",
"0.5565525",
"0.55317235",
"0.55146766",
"0.5458486",
"0.5452004",
"0.54493064",
"0.54056114",
"0.539738",
"0.5363231",
"0.5359971",
"0.5354889",
"0.5327784",
"0.5323152",
"0.5283996",
"0.5249769",
"0.52473086",
"0.52459276",
"0.5223076",
"0.52193254",
"0.5209716",
"0.5177063",
"0.51735616",
"0.517173",
"0.5136947",
"0.5134284",
"0.5133183",
"0.51262444",
"0.511717"
] | 0.7141686 | 0 |
Plot risk and fairness gaps. | def plot_metrics(results, epsilon_pos, epsilon_neg):
## Plot risk and fairness gaps as a function of sample size,
## with true minimum risk and true fairness gaps for reference.
metrics_Y0 = pd.concat(results['metrics_Y0_noisy'], keys=n_arr)
metrics_Y0 = metrics_Y0.reset_index().drop(columns='level_1').rename(
columns={'level_0': 'n'})
metrics_Y = pd.concat(results['metrics_Y_noisy'], keys=n_arr)
metrics_Y = metrics_Y.reset_index().drop(columns='level_1').rename(
columns={'level_0': 'n'})
metrics = pd.concat([metrics_Y0, metrics_Y])
m = results['metrics_Y0_best']
risk = m.loc[m.Metric == 'Risk', 'Value'].values[0]
g = sns.FacetGrid(metrics, row='Outcome', col='Metric',
col_order=['risk', 'gap_FPR', 'gap_FNR'])
g.map(sns.pointplot, 'n', 'value', order=n_arr)
g.set_xticklabels(rotation=45)
g.axes[0, 0].hlines(risk, *g.axes[0, 0].get_xlim())
g.axes[1, 0].hlines(risk, *g.axes[1, 0].get_xlim())
g.axes[0, 1].hlines(epsilon_pos, *g.axes[0, 1].get_xlim())
g.axes[1, 1].hlines(epsilon_pos, *g.axes[1, 1].get_xlim())
g.axes[0, 2].hlines(epsilon_neg, *g.axes[0, 2].get_xlim())
g.axes[1, 2].hlines(epsilon_neg, *g.axes[1, 2].get_xlim()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')",
"def dif_plot(sp, x, y, seqmin, seqmax):\n sp.bar(x, y, color='gray')\n sp.set_xlim(seqmin, seqmax)\n sp.xaxis.set_major_locator(MaxNLocator(15))\n sp.set_xlabel('Residue')\n sp.set_ylabel('Difference')\n sp.grid(False)\n sp.axhline(y=0, linewidth=1, color='black')",
"def fig_primegap_race(dir, ext):\n X = 7000\n gap_sizes = [2, 4, 6, 8]\n # X = 100000\n # gap_sizes = [i for i in range(2,50) if i%2==0]\n v = prime_gap_plots(X, gap_sizes)\n\n P = sum(line(x) for x in v)\n P += sum(text(\"Gap %s\" % gap_sizes[i], (v[i][-1][0] * 1.04, v[i][-1][1]), color='black', fontsize=8)\n for i in range(len(v)))\n\n P.save(dir + '/primegap_race.%s' % ext, figsize=[9, 3], gridlines=True)\n return P",
"def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()",
"def plotCrossflows():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n axialno = [i for i in range(1,15)]\n gapno = len(crossFlow.index)\n gapscatter = []\n gapplots = [i for i in range(0, gapno)]\n for i in gapplots:\n leg = \"gap\" + str(i+1)\n gapscatter.append(plt.plot(axialno, crossFlow.iloc[i,:],\\\n label=leg, linewidth=0.5))\n plt.legend(loc='lower right', ncol=4, fontsize=7)\n plt.grid(True, linestyle=':')\n plt.xticks(axialno)\n plt.xlabel('Axial node number')\n plt.ylabel('lateral velocity, m/s')\n plt.savefig(\"lateral velocity in gaps CFD.jpg\", dpi=300)\n plt.close() \n \n return 0",
"def plot_stay_prob(ax, stay_prob):\n mean_stay_prob = np.mean(stay_prob, 2)\n std_stay_prob = sem(stay_prob, 2)\n # set width of bar\n bar_width = 0.25\n\n # Set position of bar on X axis\n r1 = np.arange(2)\n r2 = [x + bar_width for x in r1]\n\n ax.bar(r1, mean_stay_prob[0, :], yerr=std_stay_prob[0, :],\n color='b', width=bar_width, edgecolor='white', label='Common')\n ax.bar(r2, mean_stay_prob[1, :], yerr=std_stay_prob[1, :],\n color='r', width=bar_width, edgecolor='white', label='Uncommon')\n\n # Add xticks on the middle of the group bars\n ax.set_xticks([r + bar_width/2 for r in range(2)])\n ax.set_xticklabels(['Rewarded', 'Unrewarded'])\n ax.set_ylabel('Stay Probability')\n ax.set_title('A2C-LSTM Model')\n ax.set_ylim(0, 1)\n ax.legend()",
"def plot_hillclimber(scores, hillclimb_students_scores = None):\n\n\tplt.plot(range(0, len(scores)), scores)\n\tif hillclimb_students_scores:\n\t\tplt.plot(range(len(scores), len(scores) + len(hillclimb_students_scores)), hillclimb_students_scores)\n\tplt.ylabel(\"Score\")\n\tplt.xlabel(\"Amount of swaps\")\n\tplt.title(\"Hillclimber\")\n\tplt.show()",
"def visualize_data(dqn_rewards, ddqn_rewards):\n \n fig, ax = plt.subplots()\n x_values = list(range(1, dqn_rewards.size + 1))\n ax.plot(x_values, dqn_rewards, label='dqn rewards')\n ax.plot(x_values, ddqn_rewards, label='ddqn rewards')\n plt.xlabel('episodes')\n plt.title('Cumulative Reward per Game')\n plt.legend()\n plt.show()",
"def create_graphs(hpo_names_to_display, num_races_for_legend,\n racial_percentages, img_name):\n num_sites_to_display = len(hpo_names_to_display)\n bar_width = 2 / num_sites_to_display\n\n idxs = []\n for x in range(num_sites_to_display):\n idxs.append(x)\n\n prev_bottom = [0] * num_sites_to_display\n\n race_cids = list(racial_percentages.keys())\n\n for racial_id in race_cids:\n\n list_by_hpo = racial_percentages[racial_id]\n plt.bar(idxs, list_by_hpo, bottom=prev_bottom, width=bar_width)\n prev_bottom = list(map(add, prev_bottom, list_by_hpo))\n\n plt.xticks(idxs, hpo_names_to_display, rotation=90)\n\n # allow user to show how many to display; otherwise overwhelming\n plt.legend(labels=most_popular_race_cnames[:num_races_for_legend],\n bbox_to_anchor=(1, 1))\n plt.ylabel('Percentage of Racial Breakdown for the Site')\n plt.xlabel('Health Provider Organization (HPO)')\n plt.title('Racial Distribution By Site - Person Table from EHR')\n\n plt.savefig(img_name, bbox_inches=\"tight\")\n\n plt.show()",
"def plot_metrics2(df, n_arr, risk_best, epsilon_pos, epsilon_neg, row, col,\n **kwargs):\n xlim = (min(n_arr), max(n_arr))\n # g = sns.FacetGrid(df, row = row, col = col,\n # col_order = ['risk', 'gap_FPR', 'gap_FNR'], xlim = xlim,\n # ylim = (0, 1), **kwargs)\n g = sns.FacetGrid(df, row=row, col=col,\n col_order=['risk', 'gap_FPR', 'gap_FNR'], xlim=xlim,\n **kwargs)\n g.map(sns.pointplot, 'n', 'value', order=n_arr, ci='sd')\n g.set_xticklabels(rotation=45)\n\n risk_best = to_iterable(risk_best)\n epsilon_pos = to_iterable(epsilon_pos)\n epsilon_neg = to_iterable(epsilon_neg)\n\n for i, rr in enumerate(risk_best):\n g.axes[i, 0].hlines(rr, *g.axes[i, 0].get_xlim())\n g.axes[i, 0].hlines(rr, *g.axes[i, 0].get_xlim())\n\n for i, ee in enumerate(epsilon_pos):\n g.axes[i, 1].hlines(ee, *g.axes[i, 1].get_xlim())\n g.axes[i, 1].hlines(ee, *g.axes[i, 1].get_xlim())\n\n for i, ee in enumerate(epsilon_neg):\n g.axes[i, 2].hlines(ee, *g.axes[i, 1].get_xlim())\n g.axes[i, 2].hlines(ee, *g.axes[i, 1].get_xlim())\n\n g.set_titles(template='')\n\n for ax, m in zip(g.axes[0, :], ['risk', 'gap_FPR', 'gap_FNR']):\n ax.set_title(m)\n for ax, l in zip(g.axes[:, 0], df[row].unique()):\n ax.set_ylabel(l, rotation=90, ha='center', va='center')\n\n return g",
"def plot_risk():\n mua,vra,pra = pgen.get_pdf(plot=False,write=False)\n deadline = ptsl.D\n\n a1 = range(1,17)\n a2 = range(1,17)\n shape = (len(a1),len(a2))\n X,Y = meshgrid(a1,a2)\n Z1 = np.full(shape,-1.0) # Risk Power Surface\n Z2 = np.full(shape,-1.0) # Peak Power Surface\n\n for x in a1 :\n for y in a2 :\n mu = mua[x-1] + mua[y-1]\n vr = vra[x-1] + vra[y-1]\n\n dist = stats.norm(loc=mu,scale=np.sqrt(vr))\n risk = dist.sf(deadline)\n\n Z1[x-1,y-1] = risk\n Z2[x-1,y-1] = np.max([pra[x-1],pra[y-1]])\n\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, \n cmap=cm.Greys,linewidth=0, antialiased=False)\n ax.set_xlabel(\"alloc ph1\")\n ax.set_ylabel(\"alloc ph2\")\n ax.set_zlabel(\"Risk\")\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.savefig(\"risk-surface.pdf\",bbox_inches='tight')\n plt.close()\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, \n cmap=cm.Greys_r,linewidth=0, antialiased=False)\n ax.set_xlabel(\"alloc ph1\")\n ax.set_ylabel(\"alloc ph2\")\n ax.set_zlabel(\"peak power\")\n # print(ax.azim)\n ax.view_init(azim=-30)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.savefig(\"pkp-surface.pdf\",bbox_inches='tight')\n plt.close()\n\n\n\n # fig = plt.figure()\n # ax = fig.add_subplot(111)\n # levels=[Z3[7,7]]\n # print(levels)\n # # levels.sort()\n # # print(levels)\n # qset = plt.contour(X,Y,Z3,levels=levels)\n # # plt.plot(a1,a2,marker=\"*\")\n # # print(qset.levels)\n # # for l in qset.collections : \n # # print(l.get_array())\n # # ax.annotate(\"(%s,%s,%s)\"% (14,14,1.82), xy=(14,14), textcoords='data')\n # # ax.annotate(\"(%s,%s,%s)\"% (17,17,4.63), xy=(17,17), textcoords='data')\n # # ax.annotate(\"(%s,%s,%s)\"% (19,19,6.75), xy=(19,19), textcoords='data')\n # # ax.annotate(\"(%s,%s,%s)\"% (20,20,7.36), xy=(20,20), textcoords='data')\n # ax.set_xlabel(\"alloc ph1\")\n # ax.set_ylabel(\"alloc ph2\")\n # ax.set_title(\"Contour plots for 2-phase allocations\")\n # plt.savefig(\"Util2.pdf\")\n # plt.close()",
"def plot(self, x_feature=\"ratio\", y_feature=\"fold_change\", ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n # - Data\n x, y = (\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[y_feature],\n )\n x_, y_ = (\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[y_feature],\n )\n\n x_pred = np.arange(0, x.max(), 0.1)\n y_pred, y_pred_std = self.predict(x_pred.reshape(-1, 1), return_std=True)\n\n # - Plot\n # Segments used for fitting\n ax.scatter(\n x,\n y,\n c=cy.QCplot.PAL_DBGD[0],\n alpha=0.7,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) >= {self.n_sgrna}\",\n )\n\n # Segments not used for fitting\n plt.scatter(\n x_,\n y_,\n c=cy.QCplot.PAL_DBGD[0],\n marker=\"X\",\n alpha=0.3,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) < {self.n_sgrna}\",\n )\n\n # Plot GP fit\n # GP fit\n plt.plot(\n x_pred, y_pred, ls=\"-\", lw=1.0, c=cy.QCplot.PAL_DBGD[1], label=\"GPR mean\"\n )\n plt.fill_between(\n x_pred,\n y_pred - y_pred_std,\n y_pred + y_pred_std,\n alpha=0.2,\n color=cy.QCplot.PAL_DBGD[1],\n lw=0,\n )\n\n # Misc\n plt.axhline(0, ls=\":\", color=cy.QCplot.PAL_DBGD[2], lw=0.3, zorder=0)\n\n plt.xlabel(f\"Segment\\n{x_feature}\")\n plt.ylabel(f\"Segment\\nmean {y_feature}\")\n\n plt.title(f\"{self.kernel_}\", fontsize=6)\n\n plt.legend(frameon=False)\n\n return ax",
"def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def plot_timecourse_repair_counts(repair_counts_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n palette = sns.color_palette(\"colorblind\")\n\n sns.lineplot(x=\"generation\", y=\"independent_repair_proportion\", data=repair_counts_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n # handles, labels = ax.get_legend_handles_labels()\n #\n # labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # # ax.legend(handles=handles[1:], labels=labels[1:])\n # ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_repairs_\" + file_name + \".png\")\n plt.show()",
"def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return",
"def plot_example_rat_and_cumrat_subplots(data_1, data_2):\n color_1 = rgb_to_matplot_lib(light_green)\n color_2 = rgb_to_matplot_lib(light_red)\n\n # create the fig. and axes.\n f, (ax1, ax2) = plt.subplots(1, 2)\n f.set_size_inches(16,7)\n\n # plot small dash lines to follow the grading \n for y in range(1, 6): \n ax1.plot(range(0, 45), [y] * len(range(0, 45)), \"--\", lw=0.5, color=\"black\", alpha=0.3)\n ax2.plot(range(0, 45), [y] * len(range(0, 45)), \"--\", lw=0.5, color=\"black\", alpha=0.3)\n\n # plot data for the first book\n indices_1 = data_1.reset_index().index.tolist()\n overall_1 = data_1.overall.tolist()\n\n ax1.plot(indices_1, overall_1, color=color_1)\n\n cum_avg = np.divide(np.cumsum(overall_1),np.add(indices_1, [1] * len(indices_1)))\n ax2.plot(indices_1, cum_avg, color=color_1, label=\"Book 1\")\n\n # plot data for the second book\n indices_2 = data_2.reset_index().index.tolist()\n overall_2 = data_2.overall.tolist()\n\n ax1.plot(indices_2, overall_2, color=color_2)\n\n cum_avg = np.divide(np.cumsum(overall_2),np.add(indices_2, [1] * len(indices_2)))\n ax2.plot(indices_2, cum_avg, color=color_2, label=\"Book 2\")\n\n # remove some plot frameline\n ax1.spines[\"top\"].set_visible(False) \n ax1.spines[\"right\"].set_visible(False)\n ax2.spines[\"top\"].set_visible(False) \n ax2.spines[\"right\"].set_visible(False)\n\n # axis \n ax1.set_ylabel('Rating', fontsize = 14)\n ax2.set_ylabel('Rating', fontsize = 14)\n ax1.set_xlabel('Rating Index', fontsize = 14)\n ax2.set_xlabel('Rating Index', fontsize = 14)\n ax1.tick_params(axis='both', labelsize=14)\n ax2.tick_params(axis='both', labelsize=14)\n\n # set titles\n ax1.set_title('Ratings', fontsize = 14)\n ax2.set_title('Cumulative Average Ratings', fontsize = 14)\n\n # limit the axis to what's necessary\n ax1.set_ylim(1.0, 5.1)\n ax2.set_ylim(1.0, 5.1)\n ax1.set_xlim(0, 45)\n ax2.set_xlim(0, 45)\n\n # change ticks on y axis\n ax1.set_yticks(range(1,6))\n ax2.set_yticks(range(1,6))\n\n # add a legend\n ax2.legend(loc=\"lower right\", fontsize=12)\n\n plt.show()",
"def plot__error_plot_round(ys, xs=None, k=8): # 2020-09-03\n\n if xs is None:\n xs = np.arange(ys.shape[0])\n\n ys_pad = np.pad(ys, pad_width=(k // 2, k // 2), mode='edge')\n ys_avg = list()\n ys_std1 = list()\n ys_std2 = list()\n for i in range(len(ys)):\n ys_part = ys_pad[i:i + k]\n avg = ys_part.mean()\n ys_avg.append(avg)\n ys_std1.append((ys_part[ys_part > avg] - avg).mean())\n ys_std2.append((ys_part[ys_part <= avg] - avg).mean())\n\n # if is_padding:\n # plt.plot(xs[:-k//2], ys[:-k//2], color='royalblue')\n # else:\n plt.plot(xs, ys, color='royalblue')\n\n plt.plot(xs, ys_avg, color='lightcoral')\n ys_avg = np.array(ys_avg)\n ys_std1 = np.array(ys_std1)\n ys_std2 = np.array(ys_std2)\n plt.fill_between(xs, ys_avg + ys_std1, ys_avg + ys_std2, facecolor='lightcoral', alpha=0.3)\n plt.show()",
"def timeSpaceDiagramMethod(self):\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel('Time (s)', fontsize=24, fontweight='bold')\n ax1.set_ylabel('Distance (m)', fontsize=24, fontweight='bold')\n max_x_limit = self.xAxisRange-100\n plt.xlim([0, max_x_limit])\n plt.ylim([0, max(self.distance_Green)+400])\n plt.xticks(np.arange(0, self.xAxisRange-75, 50), fontsize=24)\n ax1.tick_params(axis='y', labelsize=18)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax1.spines[axis].set_linewidth(4)\n # ax1.set_yticks(ticks=np.arange(0, 100, 20),fontsize = 24)\n #newYlabel = ['-400','0','395','810','1225']\n # plt.gca().set_yticklabels(newYlabel)\n # plt.yticks([])\n req_phase_length = len(self.greenRectangleStartPoint)\n for i in range(0, req_phase_length):\n x = self.greenRectangleStartPoint[i]\n y = self.distance_Green[i]\n ax1.add_patch(Rectangle(\n (x, y), self.greenTime[i], 30, angle=0.0, color='green', linewidth=2,))\n\n req_phase_length = len(self.clearanceRectangleStartPoint)\n for i in range(0, req_phase_length):\n x = self.clearanceRectangleStartPoint[i]\n y = self.distance_Clearance[i]\n ax1.add_patch(Rectangle(\n (x, y), self.clearanceTime[i], 30, angle=0.0, color='red', linewidth=2))\n\n\n if len(self.evTrajectoryTimePoint) > 0:\n ax1.scatter(self.evTrajectoryTimePoint, self.evTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.transitTrajectoryTimePoint) > 0:\n ax1.scatter(self.transitTrajectoryTimePoint, self.transitTrajectoryDistancePoint, c=\"black\",\n linewidths=4, marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.truckTrajectoryTimePoint) > 0:\n ax1.scatter(self.truckTrajectoryTimePoint, self.truckTrajectoryDistancePoint, c=\"black\",\n linewidths=4, marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.carTrajectoryTimePoint) > 0:\n ax1.scatter(self.carTrajectoryTimePoint, self.carTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2)\n\n if len(self.connectedVehicleTrajectoryTimePoint) > 0:\n ax1.scatter(self.connectedVehicleTrajectoryTimePoint, self.connectedVehicleTrajectoryDistancePoint, c=\"black\", linewidths=4,\n marker=\".\", edgecolor=\"none\", s=50, label='Connected Vehicles Trajectory', zorder=2) \n\n ax1.legend(loc='upper right', prop={\"size\": 16})\n ax1.set_title(\"Time-Space Diagram\", fontsize=20, fontweight='bold')\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.grid(color='black', linestyle='-', linewidth=0.5)\n plt.show()",
"def plot_pretty():\n\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n plt.style.use('seaborn-deep')\n\n black = '#2B2B2D'\n red = '#E90039'\n orange = '#FF1800'\n white = '#FFFFFF'\n yellow = '#FF9900'\n\n plt.figure(figsize=(12.8, 9.6))\n plt.rcParams.update({'font.size': 16, 'text.color': white, 'axes.labelcolor': white,\n 'axes.edgecolor': white, 'xtick.color': white, 'ytick.color': white})\n\n plt.gcf().set_facecolor(black)\n\n plt.subplot(2, 3, 1)\n plt.plot(ts, ys[:, 2], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[1], color=white)\n plt.title(r'$C_{FA}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 2)\n plt.plot(ts, ys[:, 0], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[0], color=white)\n plt.title(r'$C_{G}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 3)\n plt.plot(ts, ys[:, 3], color=orange)\n plt.title(r'$C_{E}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 4)\n plt.plot(ts, us[:, lin_model.inputs[1]], color=red)\n plt.title(r'$F_{m, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 5)\n plt.plot(ts, us[:, lin_model.inputs[0]], color=red)\n plt.title(r'$F_{G, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 6)\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n color=red\n )\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n color=yellow\n )\n plt.legend([r'$C_{FA}$', r'$C_G$'], facecolor=black)\n plt.title('bias')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n # plt.suptitle('Closedloop bioreactor without noise')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise_pretty.png', transparent=True)\n plt.show()",
"def plot_schedulability_rates_fairgen():\n modes = [\n # name, color, linestyle, marker\n ('dSMC', 'blue', 'dashed', 'o'),\n ('dAMC', 'red', 'dashed', 'd'),\n ('EDF-VD', 'green', 'dashed', 's'),\n ('pSMC', 'orange', 'dashed', '^'),\n ('pAMC-BB', 'magenta', 'dashed', 'D'),\n ('pAMC-BB+', 'purple', 'dashed', 'v')\n ]\n\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n rates = {}\n fig = plt.figure(figsize=(12, 6), dpi=300)\n fig.suptitle('Evaluation: MC-Fairgen (n=%d)' % len(task_sets_list[0]))\n ax1 = fig.add_subplot(111)\n for name, color, linestyle, marker in modes:\n rates[name] = pickle.load(open(eval_fairgen_path + name, 'rb'))\n ax1.plot(utils, rates[name], label=name, color=color, linestyle=linestyle, marker=marker)\n ax1.set_xlabel('LO mode utilization')\n ax1.set_ylabel('Percentage of task sets schedulable')\n ax1.set_xticks([k * 0.5 for k in range(5)])\n ax1.set_yticks([k * 20 for k in range(6)])\n ax1.xaxis.set_minor_locator(ticker.AutoMinorLocator(5))\n ax1.yaxis.set_minor_locator(ticker.AutoMinorLocator(1))\n ax1.minorticks_on()\n ax1.grid(which='both', linestyle='dashed')\n\n # Plot average system util:\n avg_utils = []\n for i, _ in enumerate(utils):\n avg_utils.append(np.average([task_set.u_avg for task_set in task_sets_list[i]]))\n ax2 = ax1.twinx()\n ax2.plot(utils, avg_utils, label='Avg Sys Util (right scale)', color='black', linestyle='dashed',\n marker=None)\n ylim = ax1.get_ylim()\n ax2.set_ylim(ylim[0] / 100, ylim[1] / 100)\n ax2.set_ylabel('U(Avg)')\n\n plt.axvline(1.0, color='black', linewidth=0.8)\n lines1, labels1 = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n plt.xlim(0.1, 2.1)\n plt.legend(lines1 + lines2, labels1 + labels2, loc='upper right')\n plt.savefig('./figures/schedulability_rates_fairgen.png')\n # plt.show()",
"def plot_comparison_GHR(data, data1):\n # Loads the different datasets\n runs = data[data.columns[0]]\n distance = data[data.columns[1]]\n\n runs1 = data1[data1.columns[0]]\n distance1 = data1[data1.columns[1]]\n\n # Forms the histogram\n plt.plot(runs, distance, label=\"Simulated Annealing\")\n plt.plot(runs1, distance1, color = 'orange', label=\"Hillclimber\")",
"def get_stability_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n _, _, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n episodes = np.arange(len(values))\n unstable_episodes = np.where(\n np.logical_and(values < y_lower[-1], episodes > first_episode))[0]\n\n ax.plot(episodes, values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n for i, episode in enumerate(unstable_episodes):\n ax.axvline(episode, color='salmon', lw=2,\n label='Unstable' if i == 0 else None)\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n\n ax.set_title('Normalized instability = {:.3f}%'.format(\n self.get_normalized_instability() * 100.))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig",
"def investment_line(self):\n inv, marks = self._get_marks()\n fig = plt.figure(figsize=(4, 2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n investmentValues = inv['Invested']\n #investmentValues = pd.Series([0], index=[investmentValues.index[0]-timedelta(1)]).append(investmentValues)\n ax.plot(investmentValues, lw=1.2, color=\"blue\", label='Invested', marker=\"o\", markersize=3, markerfacecolor=\"grey\")\n ax.set_xlabel('Time')\n ax.set_ylabel('Investments (€)')\n ax.set_title('Investment Amount (€) - Daily')\n ax.xaxis.set_major_locator(dates.MonthLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b-%Y'))\n for x, y, mark in zip(marks.index, marks['Invested'], marks['Marks']):\n a = ax.get_ylim()\n if x == marks.index[0]:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 80), y + (a[1]-a[0])/35), fontsize=5)\n else:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 50), y - (a[1]-a[0])/35), fontsize=5)\n ax.grid(True)\n fig.autofmt_xdate()\n ax.legend()\n return fig, ax",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig",
"def plotThresholds (df, attack_df): \n global episod_limit\n \n ret = getThresholds (df, attack_df)\n thresholds = ret[0]\n rewards = ret[1]\n rewards_constant = ret[2]\n\n plt.plot(np.arange (0, episod_limit + 2, 1), thresholds, marker = 'None',\n linestyle = '-', color = 'k', label = 'Threshold')\n plt.xlabel ('Time')\n plt.ylabel ('Threshold')\n plt.grid ()\n plt.legend (loc='best')\n plt.savefig (\"figures/threshold.png\")\n plt.close ()\n return (rewards, rewards_constant, thresholds)",
"def plotScopes12p(day,shot):\n axs = []\n for p in range(4):\n if p == 0:\n ax = plt.subplot(4,3,3*p+1)\n plt.title(\"day %d, shot %d, scope 1\"%(day,shot))\n else:\n plt.subplot(4,3,3*p+1,sharex=ax)\n\n x = findReadData(day,1,p+1,shot)\n plt.plot(x.Time,x.Ampl)\n plt.ylabel(\"channel %d\"%(p+1))\n fudgePlotLimits(x.Time,x.Ampl)\n if p==3:\n plt.xlabel(\"time ($\\mu$s)\")\n\n plt.subplot(4,3,3*p+2,sharex=ax)\n if p == 0:\n plt.title(\"day %d, shot %d, scope 2\"%(day,shot))\n x = findReadData(day,2,p+1,shot)\n plt.plot(x.Time,x.Ampl)\n plt.ylabel(\"channel %d\"%(p+1))\n fudgePlotLimits(x.Time,x.Ampl)\n if p==3:\n plt.xlabel(\"time ($\\mu$s)\")\n\n plt.subplot(4,3,3*p+3,sharex=ax)\n if p == 0:\n plt.title(\"day %d, shot %d, scope 3\"%(day,shot))\n x = findReadData(day,3,p+1,shot)\n plt.plot(x.Time,x.Ampl)\n plt.ylabel(\"channel %d\"%(p+1))\n fudgePlotLimits(x.Time,x.Ampl)\n if p==3:\n plt.xlabel(\"time ($\\mu$s)\")",
"def draw_yield_curve(self):\n data = Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['Yield to maturity'])\n ax.set_xlabel('year')\n ax.set_ylabel('rate')\n ax.set_title('Zero-coupon yield curve')\n plt.show()",
"def risk_prob_plot(climastartyear, climaendyear, forecastyear, forecastmonth, forecastday,\n stat, sta_name, wth_path, weights, weight_var, wf_year, wf_month, wf_day,\n w_leadtime, climafile, forecastfile, weightfile):\n # creating folders to put output data and plot\n if not os.path.isdir(\"./plot_output\"):\n os.makedirs(\"./plot_output\")\n if not os.path.isdir(\"./plot_output/gaussian\"):\n os.makedirs(\"./plot_output/gaussian\")\n if not os.path.isdir(\"./plot_output/ecdf\"):\n os.makedirs(\"./plot_output/ecdf\")\n \n # set up actual dates for the x axis representation\n date = dt.datetime(forecastyear, forecastmonth, forecastday).date()\n f_date = date.strftime('%d-%b-%Y')\n\n climayears = np.arange(climastartyear, climaendyear+1)\n\n # warning that certain number of years have been removed from the\n # climatology to make the length divisible by len(weight)\n ny_del = len(climayears) % len(weights)\n if ny_del != 0:\n climayears = climayears[:(len(climayears) - ny_del)]\n message = \"WARNING: The last %s year of climatology years has been removed \\n\" \\\n \"only %s ensembles are used!\" % (ny_del, len(climayears))\n print message\n else:\n climayears = climayears\n\n # reading necessary files required for tamsat-alert\n # generate weighting metric file\n # This function will make a weighting file from rainfall or temperature\n # from the given GLAM weather inputs. If one wants to weight with a different\n # variable the text file should be given in tamsat_alert directory with two\n # column 1= climayears 2= weighing metric values. File should have one line of header.\n weighting.weight_metric_prep(climayears, wth_path, sta_name, f_date, weight_var,\n wf_year, wf_month, wf_day, w_leadtime, weightfile)\n\n # read climatology time series (This file is created during crop yield forecast)\n climametric = np.genfromtxt(climafile, skip_header=1)[:, 1]\n\n # read forecast ensemble time series (This file is created during crop yield forecast)\n forecametric = np.genfromtxt(forecastfile, skip_header=1)[:, 1]\n\n # read Weighting metric time series (This file is created by weighting metric prep)\n # this only work for GLAM format so one has to write its own code or provide specific\n # file with two column 1, climayears 2, weight metric value (header must be given in the file)\n wmetric = np.genfromtxt(weightfile, skip_header=1)[:, 1]\n\n # calculating probability distribution\n # threshold probability \n thresholds = np.arange(0.01, 1.01, 0.01)\n\n # calculate the mean and sd of the climatology\n climamean = np.mean(climametric)\n climasd = np.std(climametric)\n \n # calculate the mean and sd of the the projected\n # yield based on climatology weather data\n # we need the weighted yield forecast\n projmean, projsd = weight_forecast(forecametric, wmetric, weights)\n projsd = np.maximum(projsd, 0.001) # avoid division by zero\n\n if stat == 'normal':\n # calculate the normal distribution\n probabilityyields = []\n for z in range(0, len(thresholds)):\n thres = sps.norm.ppf(thresholds[z], climamean, climasd)\n probyield = sps.norm.cdf(thres, projmean, projsd)\n probabilityyields = np.append(probabilityyields, probyield)\n del probyield \n np.savetxt('./data_output/probyield_normal.txt', probabilityyields.T, fmt='%0.2f')\n \n elif stat == 'ecdf':\n # calculate the empirical distribution\n ecdf_clima = ECDF(climametric)\n probabilityyields = []\n for z in range(0, len(ecdf_clima.x)):\n thres = ecdf_clima.x[z] # (thresholds[z])\n ecdf_proj = ECDF(forecametric)\n probyield = ecdf_proj(thres)\n probabilityyields = np.append(probabilityyields, probyield)\n del probyield\n np.savetxt('./data_output/probyield_ecdf.txt', probabilityyields.T, fmt='%0.2f')\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n\n # Plots of results\n # Risk probability plot (original format ECB)\n sns.set_style(\"ticks\")\n fig = plt.figure(figsize=(8, 6))\n ax = plt.subplot(111)\n if stat == 'normal':\n # Plot using normal distribution\n plt.plot(thresholds*100, thresholds, '--k', lw=1, label='Climatology')\n line = plt.plot(thresholds*100, probabilityyields, 'k', lw=1, label='Projected')\n # indicating critical points\n highlight_point(ax, line[0], [thresholds[79]*100, probabilityyields[79]], 'g') # below average\n highlight_point(ax, line[0], [thresholds[59]*100, probabilityyields[59]], 'y') # below average\n highlight_point(ax, line[0], [thresholds[39]*100, probabilityyields[39]], 'm') # below average\n highlight_point(ax, line[0], [thresholds[19]*100, probabilityyields[19]], 'r') # well below average\n \n elif stat == 'ecdf':\n # Plot using empirical cumulative distribution\n plt.plot(ecdf_clima.y*100, ecdf_clima.y, '--k', lw=1, label='Climatology')\n line = plt.plot(ecdf_clima.y*100, probabilityyields, 'k', lw=1, label='Projected')\n # identifying the index for the critical points\n nn = int(round(len(climayears)/5., 0)) # this should be an integer\n wba_i = nn \n ba_i = (nn * 2) \n a_i = (nn * 3) \n av_i = (nn * 4) \n # indicating critical points\n highlight_point(ax, line[0], [ecdf_clima.y[av_i]*100, probabilityyields[av_i]], 'g') # below average\n highlight_point(ax, line[0], [ecdf_clima.y[a_i]*100, probabilityyields[a_i]], 'y') # below average\n highlight_point(ax, line[0], [ecdf_clima.y[ba_i]*100, probabilityyields[ba_i]], 'm') # below average\n highlight_point(ax, line[0], [ecdf_clima.y[wba_i]*100, probabilityyields[wba_i]], 'r') # well below average\n \n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n\n plt.title('Theme: Probability of yield estimate (against ' + str(climastartyear) + '-' + str(climaendyear) +\n ' climatology)\\nLocation: ' + sta_name + '\\nForecast date: ' + f_date, loc='left', fontsize=14)\n plt.xlabel('Climatology', fontsize=14)\n plt.ylabel('Probability <= Climatological percentile', fontsize=14)\n plt.yticks(fontsize=14)\n plt.xticks(fontsize=14)\n plt.legend()\n plt.tight_layout()\n if stat == 'normal': \n path = './plot_output/gaussian/'\n elif stat == 'ecdf':\n path = './plot_output/ecdf/'\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n fig.savefig(path + sta_name+'_'+f_date+'_yieldprob.png', dpi=300)\n plt.close()\n\n # Risk probability plot (Pentiles bar plot format DA)\n pp = []\n sns.set_style(\"ticks\")\n fig = plt.figure(figsize=(8, 6))\n if stat == 'normal':\n verylow = probabilityyields[19] \n low = probabilityyields[39] - verylow\n average = probabilityyields[59] - (verylow+low) \n high = probabilityyields[79] - (verylow+low+average)\n veryhigh = 1 - (verylow+low+average+high)\n elif stat == 'ecdf':\n\n # identifying the index for the critical points\n nn = int(round(len(climayears)/5., 0)) # this should be an integer\n wba_i = nn \n ba_i = (nn * 2) \n a_i = (nn * 3) \n av_i = (nn * 4) \n \n verylow = probabilityyields[wba_i] \n low = probabilityyields[ba_i] - probabilityyields[wba_i] # verylow\n average = probabilityyields[a_i] - probabilityyields[ba_i] # (verylow+low)\n high = probabilityyields[av_i] - probabilityyields[a_i] # (verylow+low+average)\n veryhigh = 1 - probabilityyields[av_i] # (verylow+low+average+high)\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method') \n\n val = [verylow, low, average, high, veryhigh] # the bar lengths\n pos = np.arange(5)+.5 # the bar centers on the y axis\n plt.barh(pos[0], val[0]*100, align='center', color='r', label='Very low (0-20%)')\n plt.barh(pos[1], val[1]*100, align='center', color='m', label='Low (20-40%)')\n plt.barh(pos[2], val[2]*100, align='center', color='grey', label='Average (40-60%)')\n plt.barh(pos[3], val[3]*100, align='center', color='b', label='High (60-80%)')\n plt.barh(pos[4], val[4]*100, align='center', color='g', label='Very high (80-100%)')\n \n plt.annotate(str(round(val[0]*100, 1))+'%', ((val[0]*100)+1, pos[0]), xytext=(0, 1), textcoords='offset points', fontsize=20)\n plt.annotate(str(round(val[1]*100, 1))+'%', ((val[1]*100)+1, pos[1]), xytext=(0, 1), textcoords='offset points', fontsize=20)\n plt.annotate(str(round(val[2]*100, 1))+'%', ((val[2]*100)+1, pos[2]), xytext=(0, 1), textcoords='offset points', fontsize=20)\n plt.annotate(str(round(val[3]*100, 1))+'%', ((val[3]*100)+1, pos[3]), xytext=(0, 1), textcoords='offset points', fontsize=20)\n plt.annotate(str(round(val[4]*100, 1))+'%', ((val[4]*100)+1, pos[4]), xytext=(0, 1), textcoords='offset points', fontsize=20)\n\n plt.yticks(pos, ('Very low', 'Low', 'Average', 'High', 'Very high'), fontsize=14)\n plt.xticks(fontsize=14)\n plt.xlabel('Probability', fontsize=14)\n plt.title('Theme: Probability of yield estimate (against ' + str(climastartyear) + '-' + str(climaendyear) +\n ' climatology)\\nLocation: ' + sta_name+'\\nForecast date: ' + f_date, loc='left', fontsize=14)\n plt.xlim(0, 101)\n plt.legend()\n plt.tight_layout()\n if stat == 'normal': \n path = './plot_output/gaussian/'\n elif stat == 'ecdf':\n path = './plot_output/ecdf/'\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n\n # append the probabilities to pp\n pp = np.append(pp, round(val[0]*100, 1))\n pp = np.append(pp, round(val[1]*100, 1))\n pp = np.append(pp, round(val[2]*100, 1))\n pp = np.append(pp, round(val[3]*100, 1))\n pp = np.append(pp, round(val[4]*100, 1))\n \n fig.savefig(path + sta_name+'_'+f_date+'_pentile.png', dpi=300)\n plt.close()\n \n # save the probabilities of each category on a text file\n headval = '1 = Very low(0-20%) 2 = Low(20-40%) 3 = Average(40-60%) 4 = High(60-80%) 5 = Very high(80-100%)\\n\\\nCategory Probability'\n category = [1, 2, 3, 4, 5]\n rp = np.array([category, pp])\n rp = rp.T\n np.savetxt('./data_output/RiskProbability.txt', rp, delimiter=' ', header=headval, fmt='%i %6.2f')\n\n # probability density plot\n sns.set_style(\"ticks\")\n fig = plt.figure(figsize=(8, 6))\n if stat == 'normal':\n # Plot using normal distribution\n sns.kdeplot(climametric, bw=10, shade=True, label='Climatology', cumulative=False)\n sns.kdeplot(forecametric, bw=10, shade=False, color='g', label='Projected', cumulative=False)\n\n elif stat == 'ecdf':\n # Plot using empirical cumulative distribution\n sns.kdeplot(climametric, bw=10, shade=True, label='Climatology', cumulative=False)\n sns.kdeplot(forecametric, bw=10, shade=False, label='Projected', cumulative=False)\n\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n plt.title('Theme: Probability of yield estimate (against ' + str(climastartyear)+'-' + str(climaendyear) +\n ' climatology)\\nLocation: ' + sta_name + '\\nForecast date: ' + f_date, loc='left', fontsize=14)\n plt.xlabel('Yield (Kg/ha)', fontsize=14)\n plt.ylabel('Probability density', fontsize=14)\n plt.yticks(fontsize=14)\n plt.xticks(fontsize=14)\n plt.legend()\n plt.tight_layout()\n if stat == 'normal': \n path = './plot_output/gaussian/'\n elif stat == 'ecdf':\n path = './plot_output/ecdf/'\n else:\n raise ValueError('Please use only \"normal\" or \"ecdf\" stat method')\n fig.savefig(path + sta_name + '_' + f_date + '_ked_plot.png', dpi=300)\n plt.close()\n\n # histogram plot\n sns.set_style(\"ticks\")\n fig = plt.figure(figsize=(8, 6))\n binboundaries = np.linspace(min(forecametric)-(0.01*(min(forecametric))), max(forecametric)+(0.01*(max(forecametric))), 10)\n sns.distplot(forecametric, bins=binboundaries, hist=True, kde=False, label=f_date, hist_kws={\"color\": \"b\"})\n plt.xlabel('Yield ($\\mathregular{Kg ha^{-1}}$)', fontsize=14)\n plt.ylabel('Frequency', fontsize=14)\n plt.title('Theme: Probability of yield estimate (against ' + str(climastartyear) + '-' + str(climaendyear) +\n ' climatology)\\nLocation: ' + sta_name + '\\nForecast date: ' + f_date, loc='left', fontsize=14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.xlim(min(forecametric)-(0.01*(min(forecametric))), max(forecametric)+(0.01*(max(forecametric))))\n plt.ylim(0, len(forecametric)+1)\n plt.tight_layout() \n fig.savefig(path + sta_name + '_' + f_date + '_hist_plot.png', dpi=300)\n plt.close()\n\n # plot additional variables of the input data\n cum_plots(climastartyear, climaendyear, forecastyear, sta_name, wth_path, weights)\n return pp"
] | [
"0.62412286",
"0.6003811",
"0.5952074",
"0.59471977",
"0.5879272",
"0.5722639",
"0.55827194",
"0.5509448",
"0.54747474",
"0.54674083",
"0.5467399",
"0.54673696",
"0.54360753",
"0.5435962",
"0.5407168",
"0.540194",
"0.5389398",
"0.53769875",
"0.537178",
"0.53634727",
"0.5351095",
"0.53479415",
"0.5337056",
"0.53301865",
"0.5329621",
"0.53256965",
"0.5325542",
"0.5295568",
"0.52933353",
"0.52914006"
] | 0.6439876 | 0 |
Plot the residuals between measured and predicted values. | def residuals(y_true, y_pred, ax=None):
_check_parameter_validity(y_true, y_pred)
if ax is None:
ax = plt.gca()
# horizontal line for residual=0
ax.axhline(y=0)
ax.scatter(y_pred, y_true - y_pred)
_set_ax_settings(ax, "Predicted Value", "Residuals", "Residuals Plot")
return ax | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _plot_resid_vs_fitted(self, ax):\n\n res = self._model.fit()\n\n ax.plot(res.fittedvalues, res.resid, '.')\n ax.set_xlabel('Fitted ' + self._model.endog_names)\n ax.set_ylabel('Raw residual')\n plt.sca(ax)\n plt.axhline(color='k')",
"def residual_plot(targets, predictions, filename):\n\tfig, ax = plt.subplots()\n\tfig.suptitle(str(targets.shape[0]) + ' samples, Residual Plot', fontsize=12)\n\tresiduals = targets - predictions\n\taxes = plt.gca()\n\taxes.set_ylim(np.min(residuals), np.max(residuals))\n\taxes.set_xlim(np.min(predictions), np.max(predictions))\n\tax.scatter(predictions, residuals, edgecolors=(0, 0, 0))\n\tax.set_xlabel('Predictions')\n\tax.set_ylabel('Residuals')\n\tplt.savefig(filename)\n\tplt.close()",
"def draw_residual_plot(x: pd.Series, y: pd.Series, x_label: str, y_label: str):\n\n plt.title(\"Residual plot of '%s' and '%s'\" % (x_label, y_label))\n sns.residplot(x=x, y=y, scatter_kws={\"s\": 80})\n plt.show()",
"def _plot_residuals(self, y, yhat, _id):\n try:\n assert self.model_fit is not None\n except AssertionError:\n self._uvts_cls_logger.exception(\"Model has to be fitted first! Please call ts_fit(...)\")\n\n fig, axes = plt.subplots(2, 1, figsize=(20, 5), sharex=True)\n\n axes[0].plot(pd.Series(yhat, index=self._train_dt.index), color='y', linewidth=2.0)\n axes[0].plot(pd.Series(y, index=self._train_dt.index), color='b')\n\n axes[0].set_ylabel(\"Model Fit\")\n axes[0].set_title(\"Real (blue) and estimated values, \" + str(_id))\n #\n axes[1].plot(self.residuals, color=\"r\")\n \"\"\"\n if self.forecast is not None and self.residuals_forecast is None \\\n and self.lower_conf_int is not None and self.upper_conf_int is not None:\n axes[0].fill_between(self.lower_conf_int.index, self.lower_conf_int, self.upper_conf_int, color='k',\n alpha=.15)\n \"\"\"\n if self.lower_conf_int is not None and self.upper_conf_int is not None:\n axes[0].fill_between(self.lower_conf_int.index, self.lower_conf_int, self.upper_conf_int, color='k',\n alpha=.15)\n if self.upper_whisker_res is not None:\n axes[1].axhline(y=self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',\n linewidth=1.5)\n axes[1].axhline(y=-self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',\n linewidth=1.5)\n\n axes[1].set_ylabel('Residuals')\n axes[1].set_title('Difference between model output and the real data and +/- upper whisker, ' + str(_id))\n\n return fig, axes",
"def plot_residuals(self, series):\n model = ARIMA(series, order=(self.p, self.d, self.q))\n model_fit = model.fit(disp=0)\n print(model_fit.summary())\n\n residuals = pd.DataFrame(model_fit.resid)\n residuals.plot()\n plt.title(self.ticker + ' ARIMA residuals')\n plt.savefig('plots/ARIMA/{0}Resid_{1}{2}{3}.pdf'.format(self.ticker, self.p, self.d, self.q))\n plt.close()\n print(residuals.describe())",
"def residual_vs_actual(\n y_true: ArrayLike | str,\n y_pred: ArrayLike | str,\n df: pd.DataFrame | None = None,\n ax: plt.Axes | None = None,\n xlabel: str = r\"Actual value\",\n ylabel: str = r\"Residual ($y_\\mathrm{true} - y_\\mathrm{pred}$)\",\n **kwargs: Any,\n) -> plt.Axes:\n y_true, y_pred = df_to_arrays(df, y_true, y_pred)\n assert isinstance(y_true, np.ndarray)\n assert isinstance(y_pred, np.ndarray)\n ax = ax or plt.gca()\n\n y_err = y_true - y_pred\n\n ax.plot(y_true, y_err, \"o\", alpha=0.5, label=None, mew=1.2, ms=5.2, **kwargs)\n ax.axline(\n [1, 0], [2, 0], linestyle=\"dashed\", color=\"black\", alpha=0.5, label=\"ideal\"\n )\n\n ax.set(xlabel=xlabel, ylabel=ylabel)\n ax.legend(loc=\"lower right\")\n\n return ax",
"def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)",
"def plot_actual_predicted(self):\n predicted = [self.f(x, self.coefficients) for x in self.x_values]\n\n plt.scatter(self.x_values, self.y_values, label = \"Actual data\", c = 'b')\n plt.plot(self.x_values, predicted, label = \"Predicted data\", c = 'r')\n plt.title(f\"Graph of Prediected and Actual data points.\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.legend()\n plt.show()",
"def plotPredictedError():\n\tglobal normalized\n\n\twarmthPred = []\n\twarmthObserved = []\n\tcompPred = []\n\tcompObserved = []\n\tSStotalWarmth = 0\n\tSSresWarmth = 0\n\tSStotalComp = 0\n\tSSresComp = 0\n\tkeys = parser.getMappings(normalized)[0].keys()\n\tfor key in keys:\n\n\t\tif \"_\" in key:\n\t\t\twarmthAxis, compAxis = getPlotData(key)\n\t\t\twarmthPred.append(warmthAxis[3])\n\t\t\twarmthObserved.append(warmthAxis[2])\n\t\t\tcompPred.append(compAxis[3])\n\t\t\tcompObserved.append(compAxis[2])\n\n\tmeanObservedWarmth = np.mean(warmthObserved)\n\tmeanObservedComp = np.mean(compObserved)\n\tfor i in range(0, len(warmthObserved)):\n\t\tSStotalWarmth += (warmthObserved[i] - meanObservedWarmth)**2\n\t\tSSresWarmth += (warmthObserved[i] - warmthPred[i])**2\n\t\tSStotalComp += (compObserved[i] - meanObservedComp)**2\n\t\tSSresComp += (compObserved[i] - compPred[i])**2\n\n\n\tplt.axis([0, 100, 0, 100])\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(warmthObserved, warmthPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.title(\"Observed vs Predicted Warmth\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(warmthObserved, warmthPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()\n\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(compObserved, compPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.axis([0, 100, 0, 100])\n\tplt.title(\"Observed vs Predicted Competence\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(compObserved, compPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()",
"def analysis_plot(predictions, ys):\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 5))\n\n residuals = ys - predictions\n\n # Plot 1 - Predicted vs Actual\n sns.scatterplot(predictions, ys, ax=ax1)\n ax1.set_title('Predicted vs Actual', fontsize=20)\n ax1.set(xlabel='Predicted Ys', ylabel='Actual Ys')\n\n # Plot 2 - Residuals PLot (predicted vs residuals)\n sns.scatterplot(predictions, residuals, ax=ax2)\n ax2.set_title('Residuals Plot', fontsize=20)\n ax2.set(xlabel='Predicted Ys', ylabel='Residuals')\n\n # Plot 3 - QQ Plot\n sm.qqplot(residuals, ax=ax3, line='s')\n ax3.set_title('QQ Plot- Distribution of Residuals', fontsize=20)\n\n plt.show();",
"def plot_residuals(turnstile_weather, predictions):\n plt.figure()\n (turnstile_weather['ENTRIESn_hourly'] - predictions).hist()\n return plt",
"def plot_true_predicted(train_test_sets, radii_test_RF,\n radii_test_output_error):\n\n X_train, X_test, y_train, y_test = train_test_sets\n plt.figure()\n plt.errorbar(radii_test_RF, y_test.values,\n xerr=radii_test_output_error,\n fmt='.', c='C1', elinewidth=0.5,\n label='Random forest')\n # 1:1 line and labels\n plt.plot(np.sort(y_test.values), np.sort(y_test.values), 'k-', lw=0.25)\n\n plt.ylabel(r'True radius ($R_\\oplus$)')\n plt.ylabel(r'Predicted radius ($R_\\oplus$)')\n plt.legend(loc='lower right')\n return None",
"def _single_residual_plot(self, scatter_data, color):\n # figure\n p = default_figure(\n {\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\",\n \"width\": 900,\n \"height\": 300\n }\n )\n\n # Residuals\n p.scatter(scatter_data[0], scatter_data[1], color=color, size=10, fill_alpha=0.8)\n\n # Baseline\n baseline = Span(location=0, dimension=\"width\", line_color=self.plot_design.models_dummy_color, line_width=2)\n p.add_layout(baseline)\n\n # plot specific styling\n p.xaxis.axis_label = \"Predicted\"\n p.yaxis.axis_label = \"Residual\"\n\n formatter = FuncTickFormatter(code=self._formatter_code) # negative numbers are having a wacky formatting\n # formatters must be created independently, cannot be reused\n p.xaxis.formatter = formatter\n p.yaxis.formatter = formatter\n\n p.toolbar.autohide = True\n\n return p",
"def _plot_resid_vs_time(self, ax):\n\n res = self._model.fit()\n\n ax.plot(res.resid.index, res.resid, '.')\n ax.set_xlabel('Time')\n ax.set_ylabel('Residual')\n\n plt.sca(ax)\n plt.axhline(color='k')",
"def plot_residuals(result_file,output_fig=None,fig=None,**kwargs):\n \n \n #### Read file\n \n dic_result=file2dic(result_file)\n\n ### Get xbin range\n \n rms_val=np.append(dic_result['RMS_P'],dic_result['RMS_S'])\n x_bins=np.linspace(np.min(rms_val),np.max(rms_val),100)\n \n ### Plots\n \n if fig is None:\n fig, (ax1, ax2) = plt.subplots(2, sharex=True)\n else: \n (ax1,ax2)=fig.get_axes()\n \n ax1.hist(dic_result['RMS_P'],x_bins,edgecolor='black',**kwargs)\n ax1.set_title('RMS for %i models tested'%(len(dic_result['RMS_P'])))\n ax1.set_ylabel('P')\n ax1.tick_params('x',direction='in')\n ax2.hist(dic_result['RMS_S'],x_bins,edgecolor='black',**kwargs)\n ax2.invert_yaxis()\n ax2.set_xlabel('RMS [s]')\n ax2.set_ylabel('S')\n \n fig.subplots_adjust(hspace=0)\n plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)\n \n if output_fig:\n plt.savefig(output_fig)\n \n \n return fig",
"def plot_observed_predicted(y_data, y_predict, ols_line=False, model_fit=None, figsize=(15, 10), save=False, end_name_fig='', folder='Charts'): \r\n\r\n end_name_fig = end_name_fig + '_' if end_name_fig is not None else ''\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_data, y_predict)\r\n \r\n if ols_line == False:\r\n ax.plot([y_data.min(), y_data.max()], [y_data.min(), y_data.max()], 'k--', lw=4)\r\n\r\n else:\r\n line_fit = sm.OLS(y_data, sm.add_constant(y_predict, prepend=True)).fit()\r\n abline_plot(model_results=line_fit, ax=ax)\r\n\r\n ax.set_title('Predicted vs Observed')\r\n ax.set_ylabel('Observed values')\r\n ax.set_xlabel('Predicted values')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/predict_observed_' + end_name_fig + '.png')\r\n\r\n if model_fit is not None:\r\n \r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_predict, model_fit.resid_pearson)\r\n ax.hlines(0, 0, 1)\r\n ax.set_xlim(0, 1)\r\n ax.set_title('Residual Dependence Plot')\r\n ax.set_ylabel('Pearson Residuals')\r\n ax.set_xlabel('Fitted values') \r\n\r\n if save == True:\r\n plt.savefig(folder + '/pearson_residuals_' + end_name_fig + '.png')\r\n\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n res_dev_residuals = model_fit.resid_deviance.copy()\r\n res_dev_residuals_std = stats.zscore(res_dev_residuals)\r\n ax.hist(res_dev_residuals_std, bins=25)\r\n ax.set_title('Histogram of standardized deviance residuals')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/standard_deviance_residuals_' + end_name_fig + '.png')\r\n\r\n graphics.gofplots.qqplot(res_dev_residuals, line='r')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/gofplot_' + end_name_fig + '.png')",
"def _plotModelResiduals(id='simulated800nmJoint1', folder='results/', out='Residual.pdf', individual=False):\n #data\n if individual:\n data = pf.getdata(folder+id+'small.fits')\n data[data < 1] = 1.\n data = np.log10(data)\n else:\n data = pf.getdata(folder+id+'datafit.fits')\n data[data < 1] = 1.\n data = np.log10(data)\n #model\n model = pf.getdata(folder+id+'model.fits ')\n model[model < 1] = 1.\n model = np.log10(model)\n #residual\n residual = pf.getdata(folder+id+'residual.fits')\n #squared residual\n residualSQ = pf.getdata(folder+id+'residualSQ.fits')\n\n max = np.max((data.max(), model.max()))\n\n #figure\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax = [ax1, ax2, ax3, ax4]\n fig.subplots_adjust(hspace=0.05, wspace=0.3, top=0.95, bottom=0.02, left=0.02, right=0.9)\n ax1.set_title('Data')\n ax2.set_title('Model')\n ax3.set_title('Residual')\n ax4.set_title('$L^{2}$ Residual')\n\n im1 = ax1.imshow(data, interpolation='none', vmax=max, origin='lower', vmin=0.1)\n im2 = ax2.imshow(model, interpolation='none', vmax=max, origin='lower', vmin=0.1)\n im3 = ax3.imshow(residual, interpolation='none', origin='lower', vmin=-100, vmax=100)\n im4 = ax4.imshow(residualSQ, interpolation='none', origin='lower', vmin=0., vmax=10)\n\n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax2)\n cax2 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax3)\n cax3 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax4)\n cax4 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar1 = plt.colorbar(im1, cax=cax1)\n cbar1.set_label(r'$\\log_{10}(D_{i, j} \\quad [e^{-}]$)')\n cbar2 = plt.colorbar(im2, cax=cax2)\n cbar2.set_label(r'$\\log_{10}(M_{i, j} \\quad [e^{-}]$)')\n cbar3 = plt.colorbar(im3, cax=cax3)\n cbar3.set_label(r'$M_{i, j} - D_{i, j} \\quad [e^{-}]$')\n cbar4 = plt.colorbar(im4, cax=cax4)\n cbar4.set_label(r'$\\frac{(M_{i, j} - D_{i, j})^{2}}{\\sigma_{CCD}^{2}}$')\n\n for tmp in ax:\n plt.sca(tmp)\n plt.xticks(visible=False)\n plt.yticks(visible=False)\n\n plt.savefig(out)\n plt.close()",
"def plot_regression(e_predict, e_true, ax=None, label='', e_max=0.5):\n ax = ax or plt.gca()\n ax.scatter(e_true, e_predict, lw=0, alpha=0.5, s=10)\n ax.set_xlabel('True shear')\n ax.set_ylabel('Predicted shear')\n ax.set_xlim(0, e_max)\n ax.set_ylim(0, e_max)\n ax.plot([0, e_max], [0, e_max], 'r--')\n R2 = sklearn.metrics.r2_score(e_true, e_predict)\n ax.text(0.05, 0.9, f'{label}$R^2 = {R2:.2f}$',\n transform=ax.transAxes, fontsize=16, color='r')",
"def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()",
"def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)",
"def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()",
"def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))",
"def residual_plot(self, residual_tuples):\n residual_tuples = assess_models_names(residual_tuples)\n _ = []\n i = 0\n for model, scatter_points in residual_tuples:\n if i == 0:\n color = self.plot_design.models_color_tuple[0]\n i += 1\n else:\n color = self.plot_design.models_color_tuple[1]\n plot = self._single_residual_plot(scatter_points, color)\n _.append(Panel(child=plot, title=model))\n\n main_plot = Tabs(tabs=_)\n return main_plot",
"def _plot_model_pred_vs_obs(self, ax):\n\n res = self._model.fit()\n\n ax.plot(self._model.endog, res.fittedvalues, '.', label='Observation')\n\n x_lim = ax.get_xlim()\n\n ax.plot(x_lim, x_lim, 'k:', label='1:1 line')\n\n x_label = 'Observed ' + self._model.endog_names\n y_label = 'Predicted ' + self._model.endog_names\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n ax.legend(loc='best', numpoints=1)",
"def _projected_residuals(model, plot_width=400, plot_height=400):\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n hover = HoverTool(tooltips=[(\"#SampleID\", \"@index\")])\n pcvar = model.percent_explained()\n resid = model.residuals()\n p = figure(plot_width=plot_width, plot_height=plot_height,\n tools=[hover, BoxZoomTool(), ResetTool(),\n WheelZoomTool(), SaveTool(), PanTool()])\n resid_source = ColumnDataSource(resid)\n\n p.circle(resid.columns[0], resid.columns[1], size=7,\n source=resid_source, fill_color='blue', legend='residuals')\n\n p.title.text = 'Projected Residuals'\n p.title_location = 'above'\n p.xaxis.axis_label = '{} ({:.2%})'.format(pcvar.index[0], pcvar.iloc[0])\n p.yaxis.axis_label = '{} ({:.2%})'.format(pcvar.index[1], pcvar.iloc[1])\n return p",
"def plot_inv_rv_dep(\n outpath,\n inv_RVs,\n alavs,\n alav_uncs,\n waves,\n plot_waves,\n slopes,\n intercepts,\n dense,\n norm=\"V\",\n):\n fig, ax = plt.subplots(\n len(plot_waves), figsize=(7, len(plot_waves) * 4), sharex=True\n )\n if norm != \"V\":\n norm = str(norm) + r\"\\mu m\"\n\n for j, wave in enumerate(plot_waves):\n indx = np.abs(waves - wave).argmin()\n # plot the data and the fitted line, give the dense sightlines a different color and marker\n handle1 = ax[j].errorbar(\n inv_RVs[dense][:, 0] - 1 / 3.1,\n alavs[indx][dense],\n xerr=(\n inv_RVs[dense][:, 1],\n inv_RVs[dense][:, 2],\n ),\n yerr=alav_uncs[indx][dense],\n ms=5,\n fmt=\"s\",\n color=\"magenta\",\n elinewidth=0.5,\n )\n handle2 = ax[j].errorbar(\n inv_RVs[~dense][:, 0] - 1 / 3.1,\n alavs[indx][~dense],\n xerr=(\n inv_RVs[~dense][:, 1],\n inv_RVs[~dense][:, 2],\n ),\n yerr=alav_uncs[indx][~dense],\n ms=5,\n fmt=\"ok\",\n elinewidth=0.5,\n )\n xs = np.arange(-0.155, 0.12, 0.01)\n ax[j].plot(\n xs,\n slopes[indx] * xs + intercepts[indx],\n color=\"forestgreen\",\n ls=\"-\",\n label=r\"$%5.3f\\, %+5.3f\\, [1/R(V)-1/3.1]$\"\n % (intercepts[indx], slopes[indx]),\n )\n ax[j].set_ylabel(r\"$A(\" + \"{:1.2f}\".format(wave) + \"\\mu m)/A(\" + norm + \")$\")\n ax[j].legend(loc=\"lower left\", fontsize=fs * 0.8)\n\n # add literature curves, only if normalized to A(V)\n if norm == \"V\":\n styles = [\"--\", \":\"]\n for i, cmodel in enumerate([CCM89, F19]):\n if wave > 3.3:\n continue\n yvals = []\n for xval in xs:\n ext_model = cmodel(Rv=1 / (xval + 1 / 3.1))\n yvals.append(ext_model(wave * u.micron))\n ax[j].plot(\n xs,\n yvals,\n lw=1.5,\n ls=styles[i],\n alpha=0.8,\n )\n line_handles = [\n Line2D([0], [0], color=\"tab:blue\", lw=1.5, ls=\"--\"),\n Line2D([0], [0], color=\"tab:orange\", lw=1.5, ls=\":\"),\n ]\n line_labels = [\n \"Cardelli et al. (1989)\",\n \"Fitzpatrick et al. (2019)\",\n ]\n\n # finalize the plot\n plt.xlabel(r\"$\\frac{1}{R(V)} - \\frac{1}{3.1}$\", fontsize=fs * 1.3)\n plt.subplots_adjust(hspace=0)\n handles = [handle1, handle2]\n labels = (\"dense\", \"diffuse\")\n fig.legend(handles, labels, bbox_to_anchor=(0.4, 0.19), fontsize=fs * 0.8)\n if norm == \"V\":\n fig.legend(\n line_handles, line_labels, bbox_to_anchor=(0.65, 0.8), fontsize=fs * 0.8\n )\n plt.savefig(\n outpath + \"inv_RV_dep\" + norm.split(\"\\\\\")[0] + \".pdf\", bbox_inches=\"tight\"\n )",
"def interpPlot(self):\n self.expInt, a = self.interpData(None, self.expData)\n self.simInt, b = self.interpData(self.optimSim)\n self.residual = abs(self.expInt.data - self.simInt.data)\n\n plt.figure()\n self.sasPlot(self.expInt, sim=self.simInt.data, resid=self.residual)\n\n return",
"def plotPrefitResiduals(self, R_obs_noise, labels, units, colors, filename):\n obs_time_vec = self._obs_time_vec\n prefit = self._prefit_res_vec\n\n nmbrTotalObs = obs_time_vec.size\n nmbrObs = np.shape(R_obs_noise)[0] # Number of observations per unit time\n\n plt.figure()\n plt.hold(True)\n for i in range(0, nmbrObs):\n prefit_obs_i = prefit[:,i]\n prefit_RMS = np.sqrt(np.sum(prefit_obs_i**2)/nmbrTotalObs)\n plt.plot(obs_time_vec, prefit_obs_i/np.sqrt(R_obs_noise[i,i]), '.', color=colors[i], label= labels[i] + ' RMS = ' + str(round(prefit_RMS,4)) + ' ' + units[i])\n plt.axhline(3, color='k',linestyle='--')\n plt.axhline(-3, color='k',linestyle='--')\n plt.legend(prop={'size':8})\n plt.xlim([obs_time_vec[0], obs_time_vec[-1]])\n plt.ylim([-6,6])\n plt.xlabel('Observation Time $[s]$')\n plt.ylabel('Normalized Pre-fit Residuals')\n plt.savefig(filename, bbox_inches='tight', dpi=300)\n plt.close()\n\n return",
"def test_plots_residuals_as_quivers() -> None:\n cam = Camera(imgsz=(4288, 2848), f=(3100, 3200), c=(5, -4), k=(0.1, -0.05, 0.02))\n xcam = Matlab(imgsz=(4288, 2848), fc=(3100, 3200))\n converter = Converter(xcam, cam, uv=100)\n quivers = converter.plot()\n np.testing.assert_equal(quivers.X, converter.uv[:, 0])\n np.testing.assert_equal(quivers.Y, converter.uv[:, 1])\n residuals = converter.residuals()\n np.testing.assert_equal(quivers.U, residuals[:, 0])\n np.testing.assert_equal(quivers.V, residuals[:, 1])"
] | [
"0.76177293",
"0.74306864",
"0.73046005",
"0.7271648",
"0.71746546",
"0.70974493",
"0.70785415",
"0.7046744",
"0.70070755",
"0.69879323",
"0.68859196",
"0.6638787",
"0.6637704",
"0.65629137",
"0.6533907",
"0.6488404",
"0.64341474",
"0.64010227",
"0.63988537",
"0.63487905",
"0.6315562",
"0.62580705",
"0.62427074",
"0.623606",
"0.6202014",
"0.6196581",
"0.61923844",
"0.61865777",
"0.6154134",
"0.61501896"
] | 0.7854943 | 0 |
Complete the images table of the database with the relevant values of the directory images. | def complete_images_table(self, table):
# Variable initialization
cmp_double = 0
cmp_img = 0
# Connection to database
conn, cursor = connection_database(self.db_name, self.host, self.user, self.password, self.local, self.ssl_ca)
# Get images paths
images = glob.glob(self.img_dir + "/*jpg")
pbar = progressbar.ProgressBar()
for image_path in pbar(images):
image_name = os.path.basename(image_path)
img = Ci(image_path)
# Get image information
lon, lat, k, h, datetime, global_key = img.get_info_from_filename()
date_sql = mapillary_to_sql_date(datetime)
columns = ("image_key", "latitude", "longitude", "datetime", "filename")
values = (global_key, lat, lon, date_sql, image_name)
try:
insert_into(cursor, table, columns, values)
cmp_img += 1
except mysql.connector.errors.IntegrityError:
# print("Image already in table", image_name)
cmp_double += 1
continue
# Validate queries
commit_query(conn)
# End connection to database
end_connection(conn, cursor)
# Display information
print("Number of images inserted into database: {} ".format(cmp_img))
print("Number of images already inserted in database : {} ".format(cmp_double)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_images(image_filename):\n\n # Write code here to loop over image data and populate DB.",
"def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)",
"def add_images(imagefiles, description, tags, users_r, \n users_w, groups_r, groups_w, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n sql_cmd = \"\"\"INSERT INTO {t} \n (id, imagefile, description, tags, time_photo, \n time_added, users_r, users_w, groups_r, groups_w)\n VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\".format(t=safe(table))\n time_added = time.asctime(time.localtime(time.time()))\n print(\"Adding images to table \" + table + \":\")\n for img in imagefiles:\n if img[-4:].lower() == '.zip': # Don't include our own zip\n continue\n time_photo = get_image_info(img).get('DateTimeOriginal', '(no time)')\n c.execute(sql_cmd, (os.path.basename(img), description, tags, time_photo,\n time_added, users_r, users_w, groups_r, groups_w,))\n print(img)\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"Error when trying to add images in table\", table, \"in\", db_file)\n print(e)\n return False\n else:\n return True",
"def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)",
"def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)",
"def load_images():\n\n print \"Images\"\n\n for i, row in enumerate(open(\"seed_data/image_id_and_label.csv\")):\n row = row.rstrip()\n image_id, label = row.split(\",\")\n image_id = image_id.strip()\n\n try:\n image_id = int(image_id)\n except ValueError:\n pass # or whatever\n\n image = Image(id=image_id,\n image_label=label)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(image)\n\n # provide some sense of progress\n if i % 100 == 0:\n print i\n\n # Once we're done, we should commit our work\n db.session.commit()",
"def _table(self, images):\n header = ['Name', 'Pulled', 'Saved']\n data = []\n for item in images:\n if item not in self._missing:\n data.append((item, True, True if self._save else 'N/A'))\n else:\n data.append((item, self._missing[item]['pulled'], self._missing[item]['saved']))\n return self._check_table(header, {'Name': 'l'}, data)",
"def store_images(database):\n # image folder\n folder_path = os.getcwd() + '/Data/Test'\n img_width, img_height = 224, 224\n images = []\n label = []\n\n for _, dirs, _ in os.walk(folder_path, topdown=True):\n for directory in dirs:\n sub_folder_path = os.path.join(folder_path, directory)\n for _, _, files in os.walk(sub_folder_path):\n for name in files:\n if name != '.DS_Store':\n img = os.path.join(sub_folder_path, name)\n img = image.load_img(img, target_size=(img_width, img_height))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n images.append(img)\n label.append(directory)\n\n images = np.vstack(images)\n model = Model()\n predictions = model.model.predict(images, batch_size=10)\n db_actions.reinitialize_table(database)\n for i in range(100):\n prediction = predictions[i, :]\n normalized_prediction = prediction / np.sum(prediction)\n db_actions.add_encoding(database, normalized_prediction, label[i])\n print(\"Sum is: {}\".format(np.sum(normalized_prediction)))",
"def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)",
"def get_all_images_from_database():\r\n\r\n logging.debug('get_all_images_from_database()')\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n full_image_paths = []\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Select a row\r\n c.execute(\"SELECT ipath FROM wallpapers\", ())\r\n result = c.fetchall()\r\n conn.close()\r\n for item in result:\r\n full_image_paths.append(os.path.abspath(item[0]))\r\n logging.debug('get_all_images_from_database - full_image_paths = {}'.format(full_image_paths))\r\n return full_image_paths",
"def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)",
"def scan_images(self):\n rtn = 0\n mime_list = self.db.get_mime_list()\n (results,count) = datastore.find({})\n for f in results:\n dict = f.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n #record the id, file size, file date, in_ds\n self.db.create_picture_record(f.object_id, f.get_file_path())\n rtn += 1\n f.destroy()\n self.db.commit()\n _logger.debug('%s entries found in journal. Number of pictures %s'%(count,rtn,))\n return rtn",
"def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)",
"def convert(dir_path, table, dataset, quality, test_case):\n final_path = dir_path.joinpath(\"images\")\n if test_case:\n size = 20\n else:\n size = len(table)\n for i in tqdm(range(size)):\n img = Image.fromarray(table[i])\n if dataset == 1:\n img = img.convert(\"L\")\n nom = final_path.joinpath(str(i) + \".jpg\")\n img.save(nom, quality=quality)",
"def ImgDirDialog(self):\r\n \r\n self.img_dir = tk.filedialog.askdirectory(title = \"Select Destination Directory for image data\")\r\n self.file_names = [fn for fn in sorted(os.listdir(self.img_dir)) if any(fn.endswith(ext) for ext in file_extensions)]\r\n self.paths = [self.img_dir + '/' + file_name for file_name in self.file_names]\r\n \r\n # Number of labels and paths\r\n self.n_labels = len(self.labels)\r\n self.n_paths = len(self.paths)\r\n \r\n # set image container to first image\r\n self.set_image(self.paths[self.index])\r\n \r\n # if copy_or_move == 'copy':\r\n # try:\r\n # df = pd.read_csv(df_path, header=0)\r\n # # Store configuration file values\r\n # except FileNotFoundError:\r\n # df = pd.DataFrame(columns=[\"im_path\", 'sorted_in_folder'])\r\n # df.im_path = self.paths\r\n # df.sorted_in_folder = self.paths\r\n \r\n if copy_or_move == 'move':\r\n self.df = pd.DataFrame(columns=[\"im_path\", 'sorted_in_folder'])\r\n self.df.im_path = self.paths\r\n self.df.sorted_in_folder = self.paths",
"def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))",
"def help_add_db():\n global picture_lst\n picture_lst = [\"test/db1.jpg\", \"test/db2.jpg\", \"test/db3.jpg\", \"test/db4.jpg\", \"test/db5.jpg\",\n \"test/db6.jpg\", \"test/db7.jpg\", \"test/db8.jpg\", \"test/db9.jpg\", \"test/db10.jpg\"]\n help_main()",
"def save_images(images, db, path):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n copy_files(files, path)",
"def setup_database():\n database = {}\n\n # for filename in glob.glob(os.path.join(IMAGES_PATH, '')):\n for root, dirs, files in os.walk(IMAGES_PATH):\n for file in files:\n filename = os.path.join(IMAGES_PATH, file)\n # Load image\n image_rgb = face_recognition.load_image_file(filename)\n\n # Use name in filename as the identity key\n identity = os.path.splitext(os.path.basename(filename))[0]\n\n # Get face encoding and link it to the identity\n locations, encodings = get_face_embeddings_from_image(image_rgb)\n database[identity] = encodings[0]\n\n return database",
"def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)",
"def __extract_images(self, images_file, labels_file, phase):\n images, max_size = self.__readImages(\n os.path.join(self.outdir, images_file))\n assert len(labels) == len(images), '%d != %d' % (\n len(labels), len(images))\n\n map_size = len(images) * 28 * 28 * 10\n env = lmdb.open(self.outdir, map_size=map_size)\n\n with env.begin(write=True) as txn:\n # txn is a Transaction object\n for i, image in enumerate(images):\n datum = annfab.utils.image_to_datum(image, labels[i])\n str_id = '{:08}'.format(i)\n\n # The encode is only essential in Python 3\n txn.put(str_id.encode('ascii'), datum.SerializeToString())",
"def full_image_list(self):\n for architecture in self.database.architectures:\n yield md.item(md.link(_format_architecture(architecture)))\n\n for architecture in self.database.architectures:\n yield \"\"\n yield md.header(_format_architecture(architecture), 3)\n yield \"\"\n yield \"Supported platforms:\"\n yield \"\"\n\n for platform in self.database.platforms:\n releases = self._release_list(architecture, platform)\n if releases:\n yield md.item(releases)\n\n for platform in self.database.platforms:\n for release in self.database.releases(platform):\n if not self.database.has(\n architecture=architecture, platform=platform, release=release\n ):\n continue\n\n yield \"\"\n yield md.header(\n _format_platform(platform, release, architecture), 4\n )\n yield \"\"\n\n for version in self.database.versions:\n image = self.database.get(\n version=version,\n architecture=architecture,\n platform=platform,\n release=release,\n )\n if not image:\n continue\n\n tags = [\n tag\n for tag in self.database.tags(image)\n if len(tag.version) < 4\n ]\n\n yield _format_image(image, tags)",
"def add_images(self, ims, width=400, im_names=None, no_names=False):\n self.t = table(border=1, style=\"table-layout: fixed;\") # Insert a table\n self.doc.add(self.t)\n with self.t:\n with tr():\n for i, im in enumerate(ims):\n if im_names is None:\n im_name = im.split('/')[-1]\n else:\n im_name = im_names[i]\n with td(style=\"word-wrap: break-word; width:%dpx\" % width, halign=\"center\", valign=\"top\"):\n with p():\n if not no_names:\n p(im_name)\n with a(href=im):\n img(style=\"width:%dpx\" % width, src=im)",
"def write_img_to_db():\n with lite.connect(\"test.db\") as con:\n cur = con.cursor()\n data = read_image_from_fs()\n binary = lite.Binary(data)\n cur.execute(\"INSERT INTO Images(Data) VALUES (?)\", (binary,))",
"def _append_img_dir(self, data, image_dir, image_col='images'):\n import os\n df = data.copy()\n df[image_col] = df['image_url'].apply(lambda x: os.path.join(\n image_dir, os.path.basename(x) + '.jpg'))\n return df",
"def show_images(plate_full_name, well):\n if not IPYTHON:\n return\n\n src_dir = op.join(cp_config[\"Paths\"][\"SrcPath\"], plate_full_name)\n ctrl_images = load_control_images(src_dir)\n image_dir = op.join(src_dir, \"images\")\n templ_dict = {}\n for ch in range(1, 6):\n im = load_image(image_dir, well, ch)\n templ_dict[\"Img_{}_Cpd\".format(ch)] = img_tag(\n im, options='style=\"width: 250px;\"')\n templ_dict[\"Img_{}_Ctrl\".format(ch)] = ctrl_images[ch]\n tbody_templ = Template(cprt.IMAGES_TABLE)\n table = cprt.TABLE_INTRO + \\\n tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO\n return HTML(table)",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)"
] | [
"0.6612568",
"0.65215194",
"0.6491594",
"0.6421915",
"0.6367421",
"0.62234783",
"0.61760515",
"0.61658376",
"0.6102105",
"0.6085422",
"0.5987134",
"0.59791154",
"0.5875186",
"0.58479464",
"0.5841986",
"0.583527",
"0.5827648",
"0.5797089",
"0.578143",
"0.57734877",
"0.57726663",
"0.5733225",
"0.5724313",
"0.5694982",
"0.568736",
"0.5662635",
"0.56548107",
"0.56548107",
"0.56548107",
"0.56320006"
] | 0.77361715 | 0 |
Connects to the SQL server and stores the results of the comparisons in a csv. | def get_duels(self, csv_file, table):
conn, curs = connection_database(self.db_name, self.host, self.user, self.password, self.local, self.ssl_ca)
query = "SELECT * FROM {};".format(table)
curs.execute(query)
result = curs.fetchall()
end_connection(conn, curs)
# Write comparisons results in csv file
with open(csv_file, mode='w') as file:
file_writer = csv.writer(file, delimiter=',', lineterminator='\n')
for comparison in result:
file_writer.writerow(comparison) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sql_to_csv():\n csv_outfile = 'optwrf_database.csv'\n db_conn = conn_to_db('optwrf.db')\n sql_to_csv(csv_outfile, db_conn)\n close_conn_to_db(db_conn)\n assert os.path.exists(csv_outfile) == 1",
"def main():\n #use automationassets to get credentials \n cred = automationassets.get_automation_credential(\"xxxx\")\n username = cred[\"username\"] \n password = cred[\"password\"]\n driver = '{SQL Server}'\n \n #declare database connection\n conn = pyodbc.connect('DRIVER={0};SERVER=xxxxx;DATABASE=xxxxx;UID={1};PWD={2}'.format(driver,username,password)) \n cursor = conn.cursor()\n\n #execute [App].[RptStagClientFileStatus] procedure and write to csv file\n cursor.execute(\"SET NOCOUNT ON; EXEC [App].[RptStagClientFileStatus] active\")\n results_activeclient = cursor.fetchall()\n activecsv = datetime.now().strftime('activeclientfile-%Y-%m-%d-%H-%M.csv')\n with open(activecsv,'wb') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['CreateDate','Institution','Status','RecCount'])\n a.writerows(results_activeclient)\n\n #execute [App].[RptStagClientFileStatus] procedure and write to csv file\n cursor.execute(\"SET NOCOUNT ON; EXEC [App].[RptStagClientFileStatus] writeoff\")\n results_writeoff = cursor.fetchall()\n writeoffcsv = datetime.now().strftime('writeoff-%Y-%m-%d-%H-%M.csv')\n with open(writeoffcsv,'wb') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['CreateDate','Institution','Status','RecCount'])\n a.writerows(results_writeoff)\n\n #execute [App].[RptMFILog] procedure and write to csv file\n cursor.execute(\"SET NOCOUNT ON; EXEC [App].[RptMFILog]\")\n results_mfilog = cursor.fetchall()\n mficsv = datetime.now().strftime('mfilog-%Y-%m-%d-%H-%M.csv')\n with open(mficsv,'wb') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['CreateDate','MFILog'])\n a.writerows(results_mfilog)\n\n #execute [Client].[GetAccountStatistics] procedure and write to csv file \n cursor.execute(\"SET NOCOUNT ON; EXEC [Client].[GetAccountStatistics]\")\n results_percent = cursor.fetchall()\n #create row_as_list for encoding process (changed pyodbc.Row to list data type)\n row_as_list = [x for x in results_percent]\n rawlist = list()\n #create rawlist to build manually list of pyodbc row for easily write to csv\n for x in row_as_list:\n lst2 = list()\n for y in x:\n res=encode(y)\n lst2.append(res)\n rawlist.append(lst2)\n\n percentcsv = datetime.now().strftime('80percent-%Y-%m-%d-%H-%M.csv')\n with codecs.open(percentcsv,'wb',encoding='utf-8') as fp:\n a = csv.writer(fp, delimiter=',')\n header = ['MFIName','ClientCountAtSignUp','UploadCountLastMonth','UploadCount','80%','Status']\n a.writerow(header)\n a.writerows(rawlist)\n \n #Execute [App].[RptDailyStatusSummary] and show in body of mail\n cursor.execute(\"SET NOCOUNT ON; EXEC [App].[RptDailyStatusSummary]\")\n summary = cursor.fetchall()\n #get summary data to show in mail's body\n sum1 = strip_data(summary[0])\n sum2 = strip_data(summary[1])\n sum3 = strip_data(summary[2])\n sum4 = strip_data(summary[3])\n sum5 = strip_data(summary[4])\n sum6 = strip_data(summary[5])\n sum7 = strip_data(summary[6])\n\n csvlist = list()\n for file in glob.glob(\"*.csv\"):\n csvlist.append(file)\n\n #credentials of sender's address \n cred1 = automationassets.get_automation_credential(\"RunBookEmailCred\") \n name = cred1[\"username\"] \n passw = cred1[\"password\"]\n\n sender = name\n receiver = xxxxxxxx\n smtpsrv = \"smtp.office365.com\"\n\n SUBJECT = 'MMCIX Daily Status ({0})'.format(datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))\n #get the location of the script \n FILEPATH = os.path.dirname(os.path.abspath(__file__))\n #build mail's body\n msg = MIMEMultipart()\n msg['From'] = sender\n msg['To'] = COMMASPACE.join(receiver)\n msg['Subject'] = SUBJECT\n b1 = 'Daily Status job schedule are processed successfully.'\n b2 = 'Daily Status Summary'\n btle = 'Title, Record'\n body =\"\"\"\n {0}\n\n {1}\n\n {2}\n {3}\n {4}\n {5}\n {6}\n {7}\n {8}\n {9}\n\n Regards,\n MMCIX Team\n\n \"\"\".format(b1,b2,btle,sum1,sum2,sum3,sum4,sum5,sum6,sum7)\n body = MIMEText(body)\n msg.attach(body)\n #attach multiple csv in csvlist\n for f in csvlist:\n #file_path = os.path.join(FILEPATH, f)\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(f, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment', filename=f) # or\n msg.attach(part)\n\n smtpserver = smtplib.SMTP(smtpsrv,587)\n smtpserver.ehlo()\n smtpserver.starttls()\n smtpserver.ehlo\n smtpserver.login(sender, passw)\n smtpserver.sendmail(sender, receiver, msg.as_string())\n print 'Successfully sent mail'\n smtpserver.close()",
"def export_comparisons(self):\n print(\"Exporting comparisons:\")\n\n return",
"def test_connection(self):\n with vertica_python.connect(**conn_info) as connection:\n print(\"Connected to {} on host{} \".format(conn_info['database'],conn_info['host']))\n cur = connection.cursor()\n cur.execute(self.TEST_QUERY)\n for row in cur.iterate():\n print(\"The row is {}\".format(row))",
"def save_csv(connection, query, columns, name):\n\n try:\n df = pd.read_sql(query, connection, columns=columns)\n df.to_csv(name, index=False)\n except Exception as ex:\n print(type(ex))\n print(ex)",
"def connect(sql, query, conn):\n cursor = conn.cursor() # Return the cursor and use it to perform queries.\n\n # Execute the query.\n if query == 'no_query': # If we're just running an SQL query without a variable, only execute the sql\n cursor.execute(sql)\n else:\n cursor.execute(sql, query) # If we have a variable (e.g. an FBgn) be sure to include it in the execute command.\n \n records = cursor.fetchall() # Grab the results.\n cursor.close() # Close the cursor.\n return records # Return a list of tuples.",
"def get_rows(self, logger):\n engine = self.connecting_database(logger)\n\n # Returning all rows from the table using Pandas\n logger.info(\"Getting rows . . .\")\n df = pd.read_sql('bt_challenge_boa', con=engine)\n\n logger.info(\"Total Rows: {}\".format(str(len(df))))\n logger.info(\"Saving the CSV file . . .\")\n df.to_csv(self.path + self.outpufile, index=False, doublequote=True)",
"def querySciDB2(cmd):\n #startT = benchmark.startTimer(cmd)\n proc = subprocess.Popen([\"/opt/scidb/12.10/bin/iquery\", \"-o\", \"csv\", \"-a\", \"-q\", cmd], stdout = subprocess.PIPE)\n out,err = proc.communicate()\n\n lines = out.split(\"\\n\")\n\n header = lines[0] #.split(\",\")\n rows = lines[1:-1]\n\n #benchmark.endTimer(cmd, startT)\n return header, rows",
"def etl_operations():\n tap = SQLTaps(db_type='mysql',\n username='root',\n password='',\n host='localhost',\n db_name='ETLtestDb')\n\n conn = tap.get_connection()\n\n query = 'SELECT id, filename, student_xml FROM StudentsData'\n\n rows = tap.get_rows(conn, query)\n\n rows_json = tap.covert_ResultProxy_to_JSON(rows)\n\n result_list = rows_json.get('result')\n converter = Convert()\n\n csv_row_list = list()\n\n headers = list()\n\n for row in result_list:\n xml_content = base64.b64decode(row.get('student_xml').encode())\n csv_content = converter.xml_to_csv(xml_content)\n headers = csv_content.get('columns')\n csv_row_list.append(csv_content.get('values'))\n\n csv_target('students.csv', csv_row_list, headers)",
"def export_sensor_data_to_csv(self):\n df = pd.read_sql('SELECT * FROM sensor_data', self.conn)\n df.to_csv('output/sensor_data.csv', index=False)",
"def doQuery( connection ): # function definition\r\n cursor = connection.cursor()\r\n \r\n query = \"\"\"select Title, sum( UnitPrice ) as cost\r\n from Album natural join Track\r\n group by AlbumId\r\n order by cost desc\"\"\"\r\n \r\n cursor.execute( query )\r\n \r\n print( \"Album titles and cost of tracks\" )\r\n print( \"Title\\t\\t\\t\\tCost\" )\r\n for (Title, cost) in cursor:\r\n print( Title, \"\\t\\t\\t$\", cost )\r\n \r\n cursor.close()",
"def cmpEquals(self, conn1, sql1, conn2, sql2):\n for row in self.get_query_results(conn1, sql1):\n res1 = row[0]\n for row in self.get_query_results(conn2, sql2):\n res2 = row[0]\n self.log.info(\n \"cmpEquals:: task: {}, value1: {}, value2: {}\".format(\n self.task_id, str(res1), str(res2)\n )\n )\n\n if res1 != res2:\n raise AirflowException(\n \"EtlValidation cmpEqualsError: query {}\".format(sql1 + \"<>\" + sql2)\n )",
"def run_analytics_queries(cur, conn):\n \n output = []\n\n for query in analytics_queries:\n cur.execute(query)\n records = cur.fetchall()\n column_names = list(map(lambda x: x[0], cur.description))\n output.append(pd.DataFrame(records, columns=column_names))\n \n for table in output:\n print(table, end='\\n\\n')",
"def dbtocsv():\n connection = sqlite3.connect(\"sensordata.db\")\n cursor = connection.cursor()\n cursor.execute(\"Select * from sensordata\")\n roadstationdata = cursor.fetchall()\n\n with open('roadstationdata.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','name','value','unit','time'])\n writer.writerows(roadstationdata)",
"def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))",
"def test_connection(self):\n print('\\ntest_connection')\n connector = btc_price.db.ConnectPSQL(host=DB_HOST, user=DB_USER, port=DB_PORT, db=DB_NAME)\n print(connector.show_table_name())\n print(connector.show_column_name('ticker'))",
"def querySciDB(cmd):\n #startT = benchmark.startTimer(cmd)\n\n proc = subprocess.Popen([\"/opt/scidb/12.10/bin/iquery\", \"-o\", \"csv+\", \"-a\", \"-q\", cmd], stdout = subprocess.PIPE)\n out,err = proc.communicate()\n\n lines = out.split(\"\\n\")\n # first line is header, last line is empty\n header = lines[0].split(\",\")\n rows = [line.split(\",\") for line in lines[1:-1]]\n #benchmark.endTimer(cmd, startT)\n\n return header, rows",
"def query_DB_satellites(outputpath=\"../data/\", user=\"anonimo\", passwd=\"secreto\"):\n #define the output file\n outputfile=outputpath+\"milky_way_satellites.csv\"\n # Build the SQL query\n \n query = \"with milky_way_halos as (select * from Bolshoi..BDMW where snapnum=416 and Mvir > 5.0E11 and Mvir < 6.0E11 ) select sub.* from milky_way_halos mwh, Bolshoi..BDMW sub where sub.snapnum = 416 and sub.hostFlag = mwh.bdmId\"\n\n # Build the wget command to query the database\n website = \"http://wget.multidark.org/MyDB?action=doQuery&SQL=\"\n username = user\n password = passwd\n \n wget_options=\" --content-disposition --cookies=on --keep-session-cookies --save-cookies=cookie.txt --load-cookies=cookie.txt --auth-no-challenge\" \n wget_options=wget_options+\" -O \"+outputfile +\" \"\n wget_command=\"wget --http-user=\"+username+\" --http-passwd=\"+password+\" \"+wget_options \n command=wget_command + \"\\\"\"+ website + query+\"\\\"\"\n print \"\"\n print query\n print \"\"\n print command\n print \"\"\n # execute wget in shell\n retcode = call(command,shell=True)",
"def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)",
"def compare_all(category, rent_type, minSize, maxPrice, minRooms, maxRooms, exc, balcony, pets, furnished):\n wg_api = WgGesucht()\n wg_results = wg_api.search(category, rent_type, minSize, maxPrice, minRooms, maxRooms, exc, balcony, pets, furnished)\n final_df = prepare_wg_data(wg_results)\n final_df.to_csv('Flat_search_results.csv', sep=str('\\t'), encoding='utf-8') # saving into csv file\n return final_df",
"def outputData(tname):\n\n table = pd.read_sql(\"SELECT * FROM {0}\".format(tname), ENGINE)\n table.to_csv(\"data/{0}.csv\".format(tname), sep=\",\", header=True, index=False, quoting=csv.QUOTE_NONNUMERIC)",
"def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)",
"def main():\n\n # open a connection to Cloud SQL\n conn_str = (\n f\"DRIVER={{{ODBC_DRIVER}}};SERVER={PROXY_ADDR};UID={DB_USER};PWD={DB_PASS}\"\n )\n with pyodbc.connect(conn_str, autocommit=True).cursor() as cursor:\n\n # create the database\n database_create(cursor=cursor, database=DB_NAME, drop=True)\n\n # create the tables\n sql_script = f\"\"\"USE {DB_NAME};\nCREATE TABLE jobdef (jobtype CHAR(20) PRIMARY KEY, start_page VARCHAR(120),\n single_domain BIT, subpath VARCHAR(120), max_pages INT,\n daily BIT);\nCREATE TABLE jobhist (job_id INT IDENTITY(1,1) PRIMARY KEY, jobtype CHAR(20),\n queued DATETIME2, jobstart DATETIME2, jobend DATETIME2,\n elapsed INT, links INT, pages INT, missing INT);\nCREATE TABLE crawled (job_id INT, page_url VARCHAR(120), crawled DATETIME2);\nCREATE TABLE notfound (job_id INT, found DATETIME2, source VARCHAR(120),\n target VARCHAR(120), link_text VARCHAR(120));\"\"\"\n cursor.execute(sql_script)\n cursor.commit()\n\n # load sample data from CSV files into each of the tables\n for table in [\"jobdef\", \"jobhist\", \"crawled\", \"notfound\"]:\n csv_insert(\n cursor=cursor,\n database=DB_NAME,\n table=table,\n filename=f\"initdata\\\\{table}.csv\",\n )\n table_print(cursor=cursor, table=table)",
"def setup_compare_functional_loci(con):\n\n cur = con.cursor()\n\n sql = \"delete from Compare_DNDS_Fscores\"\n cur.execute(sql)\n con.commit()\n\n sql = \"select id from DNDS_Models where name='Nsites_branch'\"\n cur.execute(sql)\n x = cur.fetchall()\n if x.__len__() == 0:\n write_log(\n con, \"There are no DNDS_Models in the database, so I'm skipping the comparison of DNDS to Df.\")\n return\n nsites_id = x[0][0]\n\n sql = \"select id from AlignmentMethods where name='muscle'\"\n cur.execute(sql)\n muscleid = cur.fetchone()\n if muscleid is None:\n print \"\\n. Warning - the comparison of functional loci is hardcoded to use the MUSCLE alignment\"\n print \"but it appears that the muscle method wasn't used with your data.\"\n print \"I'm skipping the comparison\"\n return\n muscleid = muscleid[0]\n ml_modelid = get_ml_model(con, muscle)\n\n sql = \"select id, almethod, anc1, anc2 from DNDS_Tests where dnds_model=\" + \\\n nsites_id.__str__() + \" and phylomodel=\" + ml_modelid.__str__()\n cur.execute(sql)\n x = cur.fetchall()\n for ii in x:\n dnds_testid = ii[0]\n almethod = ii[1]\n phylomodel = ml_modelid\n anc1 = ii[2]\n anc2 = ii[3]\n\n if anc1 == anc2:\n write_log(con, \"I'm skipping the dnds comparison \" +\n dnds_testid.__str__() + \" because anc 1 and 2 match.\")\n continue\n\n \"\"\"Find the matching Fscore test\"\"\"\n sql = \"select id from FScore_Tests where almethod=\" + almethod.__str__() + \" and phylomodel=\" + \\\n phylomodel.__str__() + \" and ancid1=\" + anc1.__str__() + \\\n \" and ancid2=\" + anc2.__str__()\n cur.execute(sql)\n y = cur.fetchall()\n if y.__len__() > 0:\n fscore_testid = y[0][0]\n\n sql = \"insert into Compare_DNDS_Fscores (dnds_testid, fscore_testid) values(\" + \\\n dnds_testid.__str__()\n sql += \",\" + fscore_testid.__str__() + \")\"\n cur.execute(sql)\n con.commit()",
"def make_sql_call(self):\n c_data = {'db_host': self.server,\n 'db_user': self.user,\n 'db_password': self.password,\n 'db_database': self.database}\n db_conn = self.SH.sql.helper.sql_conn_obj(c_data)\n result, detail = db_conn.connect()\n self.print_to_log(detail)\n result, detail = db_conn.execute(self.sql)\n db_conn.shutdown()\n self.print_to_log(detail)",
"def _execute_query(self, sql):\n url = self.db_url + \"?\" + urlencode({'action': 'doQuery', 'SQL': sql})\n install_opener(build_opener(self.auth_handler, self.cookie_handler))\n response = urlopen(url)\n cookie_jar.save(ignore_discard=True)\n\n # Check for OK response\n line = response.readline()\n if bytes(line) != b\"#OK\\n\":\n raise Exception(response.readlines())\n\n # Skip rows until we reach QUERYTIMEOUT\n while True:\n line = bytes(response.readline())\n if line == b\"\":\n raise Exception(\"Unexpected end of file while reading result\"\n \"header\")\n elif line.startswith(b\"#QUERYTIMEOUT\"):\n break\n\n # Skip QUERYTIME\n if not(bytes(response.readline()).startswith(b\"#QUERYTIME\")):\n raise Exception(\"Don't understand result header!\")\n\n # Read column info\n # (also discards line with full list of column names)\n columns = []\n while True:\n line = bytes(response.readline())\n if not line.startswith(b\"#\"):\n break\n else:\n m = re.match(b\"^#COLUMN ([0-9]+) name=([\\w]+) \"\n b\"JDBC_TYPE=(-?[0-9]+) JDBC_TYPENAME=([\\w]+)\\n$\",\n line)\n if m is not None:\n columns.append(m.groups())\n else:\n raise Exception(\"Don't understand column info: \"+line)\n\n # Construct record type for the output\n types = [numpy_dtype[col[3]] for col in columns]\n try:\n # Python 2 compatible\n names = [col[1] for col in columns]\n dtype = np.dtype([(n, t) for n, t in zip(names, types)])\n except TypeError:\n # Python 3 compatible\n names = [col[1].decode() for col in columns]\n dtype = np.dtype([(n, t) for n, t in zip(names, types)])\n\n # Return the data as a record array\n return np.genfromtxt(response, dtype=dtype, delimiter=\",\")",
"def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])",
"def run (self):\n if self.testing:\n return\n conn = Connection(self.db, self.host, self.user, self.passwd)\n conn.execute(self.sql)\n self.table = conn.fetch()\n conn.commit()",
"def query(self, sql):\r\n\r\n result_sets = []\r\n self.messages = \"\"\r\n messages = \"\"\r\n\r\n # self.batched_query(sql)\r\n\r\n with self.conn.cursor() as cur:\r\n self.cur = cur\r\n\r\n self.request_cancel = False\r\n try:\r\n cur.execute(sql)\r\n except Exception as ex:\r\n self.cur = None\r\n self.messages = str(ex)\r\n return None\r\n while True:\r\n try:\r\n description = cur.description\r\n except:\r\n self.messages = \"Error reading description\"\r\n if self.metadata() is not None and description is not None:\r\n description = list(map(lambda c: c+(self._better_description(c),),description))\r\n # print(description)\r\n try:\r\n # data = cur.fetchmany(10000)\r\n # while True:\r\n # d = cur.fetchmany(10000)\r\n # if d is None or len(d) == 0:\r\n # break\r\n # if self.request_cancel:\r\n # cur.cancel()\r\n # self.request_cancel = False\r\n # data = data + d\r\n data = cur.fetchall()\r\n\r\n # select @var = 'tto' does not produce any resultset and raised an exception\r\n # we catch it and ignore it.\r\n # TODO: is there a better way to handle that?\r\n except pytds.ProgrammingError as ex:\r\n data = None\r\n if str(ex) == \"Previous statement didn't produce any results\":\r\n pass\r\n else:\r\n break\r\n \r\n except Exception as ex:\r\n data = None\r\n self.messages = str(ex) + \" Error while fetching data\"\r\n break\r\n\r\n\r\n\r\n if data is not None:\r\n result_sets.append(ResultSet(description, data))\r\n\r\n try:\r\n have_more_set = cur.nextset()\r\n except:\r\n self.messages = \"Error reading next set\"\r\n break\r\n if have_more_set is None or have_more_set is False:\r\n break\r\n\r\n try:\r\n for msg in cur.messages:\r\n messages = messages + str(msg[1]) + \"\\n\"\r\n except:\r\n self.messages = \"Error reading messages\"\r\n self.messages = messages + self.messages\r\n\r\n # print(\"End \",str(len(result_sets)))\r\n\r\n self.cur = None\r\n\r\n # if not result_sets:\r\n # return None\r\n return result_sets",
"def export_csv( self, db_device_adapters, db_start, db_end, min_points = 10, csv_file_name = \"data.csv\" ):\n msg = f\"Export data to csv file {csv_file_name}...\"\n AppGlobal.gui.display_info_string( msg )\n sep = \"\\t\"\n for i_device_adapter in db_device_adapters:\n #time_data, inst_pw_data, total_power_data, = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n i_device_adapter.retrived_data_cache = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n time_data, inst_pw_data, total_power_data, = i_device_adapter.retrived_data_cache\n\n device_name = i_device_adapter.name\n\n if time_data is None:\n msg = f\"No data for {device_name}.\"\n AppGlobal.gui.display_info_string( msg )\n else:\n with open( csv_file_name, \"a\" ) as a_file: # we are appending\n a_file.write( f'\"device\"{sep}\"time_data\"{sep}\"inst_pw_data\"{sep}\"total_power_data\"\\n' )\n for ix_list, i_time in enumerate( time_data ):\n a_file.write( f\"{device_name}{sep}{time_data[ ix_list ]}{sep}{inst_pw_data[ ix_list ]}{sep}{total_power_data[ ix_list ]}\\n\" )\n\n msg = f\"...CSV file complete.\"\n AppGlobal.gui.display_info_string( msg )"
] | [
"0.60724294",
"0.5836433",
"0.56998223",
"0.5647265",
"0.55306256",
"0.55271506",
"0.551484",
"0.54908764",
"0.53698635",
"0.5367699",
"0.5367317",
"0.5350177",
"0.53416455",
"0.53352654",
"0.5332152",
"0.53131926",
"0.52831393",
"0.52660966",
"0.52494246",
"0.5238187",
"0.51945364",
"0.5180478",
"0.5155265",
"0.5155129",
"0.5137062",
"0.5120749",
"0.51071393",
"0.5083967",
"0.50728935",
"0.50665474"
] | 0.5951407 | 1 |
Commit queries of a connect instance. | def commit_query(conn):
conn.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commitQuery(self):\r\n\t\tself.session.commit()",
"def commit(self):\n self.conn.commit()",
"def commit(self):\n self.__connection.commit()",
"def commit(self):\n self._connection.execute_nonquery(\"sql\", \"COMMIT\", True)",
"def commit(self) -> None:\n self._connector.commit_transaction()",
"def commit(self):\n self.connection.commit()",
"def commit( self ) :\n self.oracle_connection.commit()",
"def commit(self):\n self._check_connection()\n\n for s in self._buffer_insert:\n self._connection.execute(s)\n del self._buffer_insert[:]\n\n self._connection.commit()",
"def commit(self):\n if self.transaction:\n self.conn.commit()\n self.transaction = False",
"def commit_changes(self):\n self.connection.commit()",
"def commit(self):\n self.sql_session.commit()",
"def commit(self):\n self.getSession().commit()",
"def commit_and_close(self):\n self.connection.commit()\n self.cursor.close()\n self.connection.close()",
"def commit(self):\n self.lock.acquire()\n self.__Session.commit()\n self.lock.release()",
"def commit(cls, sql, **kwargs):\n conn = kwargs['conn']\n\n cursor = conn.cursor(dictionary=True, buffered=False)\n if CHECKS_OFF:\n sql = TURN_CHECKS_OFF + sql\n\n for _ in cursor.execute(sql, kwargs.get('args'), multi=True):\n pass\n\n cls.close(conn, cursor)",
"def _commit(self):\n if self.__session is not None:\n self.__session.commit()",
"def finish(self):\n self.conn.commit()\n self.conn.close()",
"def save_query(self):\r\n self.conn.commit()",
"def commit(self):",
"def commit(self):\n curs = self.cursor()\n self.clearTempTables(curs)\n super(_MockConnection, self).commit()",
"def Commit(self):\n sql_cnxn = self.GetMasterConnection()\n try:\n sql_cnxn.commit()\n except MySQLdb.DatabaseError:\n logging.exception('Commit failed for cnxn, rolling back')\n sql_cnxn.rollback()",
"def commit(self):\n self.session.commit()",
"def commit(self):\n self.execute_sql(sql.commit)\n self.under_transaction = False",
"def commit(self):\n return self.connection.commit",
"def commit(self):\n return self.conn.commit()",
"def commit(self):\n self.cnx.commit()",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass"
] | [
"0.7059963",
"0.69966274",
"0.6971735",
"0.6961581",
"0.6949486",
"0.6940861",
"0.6940398",
"0.6907652",
"0.6823024",
"0.681941",
"0.67607075",
"0.6757692",
"0.6726335",
"0.6708127",
"0.6663223",
"0.66234124",
"0.6611488",
"0.65827453",
"0.6555924",
"0.65552056",
"0.65522164",
"0.65371674",
"0.65311325",
"0.65216696",
"0.65136147",
"0.65101004",
"0.6506583",
"0.6506583",
"0.6506583",
"0.6506583"
] | 0.7103915 | 0 |
nice_name tag returns the username when the full name is not available | def test_nice_name_returns_username(self):
class UserNoName():
username = 'my_username'
def get_full_name(self):
return None
rendered = self.render_nice_name(UserNoName())
self.assertEquals(rendered, 'my_username') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nice_name(self):\n if self.first_name or self.last_name:\n return \"%s %s\" % (self.first_name, self.last_name)\n else:\n key = \"profile.nice_name\"\n cache_key = \"%s.%s.%s\" % (settings.SITE_CACHE_KEY, key, self.pk) \n cached = cache.get(cache_key)\n if cached is None:\n cached = self.user.username\n cache.set(cache_key, cached)\n return cached",
"def _username_from_name(self, name):\r\n return name.replace(' ', '_')",
"def get_full_name(self):\n return self.username",
"def get_full_name(self):\n return self.username",
"def username(self) -> str:",
"def username(self) -> str:",
"def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username",
"def get_displayname(self):\n return self.full_name or self.user.username",
"def username(self) -> undefined.UndefinedOr[str]:",
"def get_short_name(self):\n return self.username",
"def get_short_name(self):\n return self.username",
"def get_short_name(self):\n return self.username",
"def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def test_nice_name_returns_full_namename(self):\n\n class User():\n username = 'my_username'\n\n def get_full_name(self):\n return 'my_full_name'\n\n rendered = self.render_nice_name(User())\n\n self.assertEquals(rendered, 'my_full_name')",
"def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name",
"def generateUsername(self):\n retval= \"{0}.{1}\".format( self.first_name.split()[0].lower(),\n self.last_name.split()[-1].lower() )\n \n return toAscii(retval)",
"def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username",
"def nice_name():\n\n pass",
"def label_from_instance(obj):\n if len(obj.first_name) > 0 and len(obj.last_name) > 0:\n return \"{} {}\".format(obj.first_name, obj.last_name)\n else:\n return \"<{}>\".format(obj.username)",
"def clean_username (self):\n return self.instance.username",
"def username(self) -> str:\n raise NotImplementedError",
"def handle_public_name(user, request):\n\n if not user.is_authenticated:\n return 'Anonymous User'\n\n if 'ANONYMIZE' in request.COOKIES:\n request.__dict__.setdefault('scrub_names', {})\n request.scrub_names[user] = user.id\n return 'User Name_%d' % user.id\n else:\n return (user.get_full_name() or user.username).replace('\"', \"'\")",
"def ldap_get_fullname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n fullname = (result.get(\"first-name\")[0], result.get(\"last-name\")[0])\n return ' '.join(str(name) for name in fullname)\n\n return None",
"def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()",
"def full_name(self):\n return self.user.get_full_name() or None",
"def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")",
"def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)",
"def second_name(self, instance):\r\n return instance.user.profile.second_name"
] | [
"0.75342166",
"0.73341274",
"0.7307055",
"0.7307055",
"0.7300437",
"0.7300437",
"0.72588587",
"0.7255315",
"0.7189447",
"0.7167019",
"0.71295244",
"0.71295244",
"0.71295244",
"0.7097332",
"0.70863456",
"0.70732045",
"0.7000764",
"0.69829106",
"0.69238865",
"0.6918486",
"0.69040084",
"0.6895128",
"0.6866983",
"0.684702",
"0.6846003",
"0.6823926",
"0.6771629",
"0.670287",
"0.6693635",
"0.6679865"
] | 0.7638717 | 0 |
nice_name tag returns the full name when is available | def test_nice_name_returns_full_namename(self):
class User():
username = 'my_username'
def get_full_name(self):
return 'my_full_name'
rendered = self.render_nice_name(User())
self.assertEquals(rendered, 'my_full_name') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nice_name():\n\n pass",
"def test_nice_name_returns_username(self):\n\n class UserNoName():\n username = 'my_username'\n\n def get_full_name(self):\n return None\n\n rendered = self.render_nice_name(UserNoName())\n\n self.assertEquals(rendered, 'my_username')",
"def get_nice_name(self, part):\n part = os.path.basename(part)\n nice_name = part.title().replace(\"_\", \" \")\n return self.nice_name_dictionary.get(part, nice_name)",
"def get_name() -> str:",
"def nice_name(self):\n if self.first_name or self.last_name:\n return \"%s %s\" % (self.first_name, self.last_name)\n else:\n key = \"profile.nice_name\"\n cache_key = \"%s.%s.%s\" % (settings.SITE_CACHE_KEY, key, self.pk) \n cached = cache.get(cache_key)\n if cached is None:\n cached = self.user.username\n cache.set(cache_key, cached)\n return cached",
"def get_name():",
"def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"",
"def friendly_name(self) -> str:\n return pulumi.get(self, \"friendly_name\")",
"def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))",
"def get_full_name(self):\n return self.name #self is base and it hits name filed",
"def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()",
"def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"",
"def get_full_name(self):\n\n return self.name",
"def get_name() -> str:\n pass",
"def print_name(nome, sobrenome):\r\n return nome + \" \" + sobrenome",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def full_name(self) -> Optional[str]:\n return pulumi.get(self, \"full_name\")",
"def friendly_name(self) -> Optional[str]:\n return pulumi.get(self, \"friendly_name\")",
"def display_name(self):",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def display_name(cls):\n return cls.name.replace('_', ' ').title()"
] | [
"0.7759537",
"0.7210823",
"0.71292967",
"0.7072953",
"0.70387155",
"0.69336873",
"0.688742",
"0.67883503",
"0.6753615",
"0.67322063",
"0.6729379",
"0.67096",
"0.67054737",
"0.6651297",
"0.66264844",
"0.6609411",
"0.6609411",
"0.6609411",
"0.6609411",
"0.6609411",
"0.6609411",
"0.6577421",
"0.6575875",
"0.65483785",
"0.65450674",
"0.65450674",
"0.65450674",
"0.65450674",
"0.65450674",
"0.6519995"
] | 0.73069805 | 1 |
Forwards output, extracts Thonny message, replaces normal prompts with raw prompts. This is executed when some code is running or just after requesting raw prompt. After submitting commands to the raw REPL, the output should be like {stdout}\x04\{stderr}\x04\n\> In the end of {stdout} there may be \x02{valueforthonny} Interrupts will alter the execution, but from the response parsing perspective they don't matter as they look like any other exception. Things get complicated because of softreboots, which always end with regular prompt. Softreboots can occur because of Ctrl+D, machine.soft_reset() | def _process_until_raw_prompt(self, capture_output=False):
# TODO: experiment with Ctrl+C, Ctrl+D, reset
eot_count = 0
value = None
done = False
out = b""
err = b""
while not done:
if (self._connection.num_bytes_received == 0
and time.time() - self._startup_time > 2):
self._send_output("[Device seems to be busy. Use Ctrl+C to interrupt.]\n", "stdout")
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
# Process input in chunks (max 1 parsing marker per chunk).
# Prefer whole lines (to reduce the number of events),
# but don't wait too long for eol.
output = self._connection.soft_read_until(BLOCK_CLOSERS, timeout=0.01)
stream_name = "stderr" if eot_count == 1 else "stdout"
if output.endswith(THONNY_MSG_START):
debug("MSGSTA: " + str(output))
output = output[: -len(THONNY_MSG_START)]
# Low chance of failure (eg. because of precisely timed reboot),
# therefore it's safe to use big timeout
temp = self._connection.soft_read_until(THONNY_MSG_END, timeout=3)
if temp.endswith(THONNY_MSG_END):
value = temp[: -len(THONNY_MSG_END)]
debug("GOTVALUE: " + str(value))
else:
# failure, restore everything to help diagnosis
output = output + THONNY_MSG_START + temp
elif output.endswith(EOT):
debug("EOT: " + str(output))
output = output[: -len(EOT)]
eot_count += 1
if eot_count == 2:
# Normal completion of the command
# big chance of being at the raw prompt
temp = self._connection.soft_read_until(RAW_PROMPT, timeout=0.1)
if temp == RAW_PROMPT and self._connection.incoming_is_empty():
done = True
elif temp:
# Failure, temp needs to be parsed again
self._connection.unread(temp)
elif output.endswith(FIRST_RAW_PROMPT) and self._connection.incoming_is_empty():
debug("FIRAPRO: " + str(output))
output = output[: -len(FIRST_RAW_PROMPT)]
done = True
elif (
output.endswith(NORMAL_PROMPT)
and self._connection.peek_incoming() == b"\r\n" + FIRST_RAW_PROMPT
):
debug("NOPRO: " + str(output))
output = output + self._connection.read_until(FIRST_RAW_PROMPT)
# skip both normal and raw prompt together
# (otherwise they get processed separately)
output = output[: -len(NORMAL_PROMPT + b"\r\n" + FIRST_RAW_PROMPT)]
done = True
elif output.endswith(NORMAL_PROMPT) and self._connection.incoming_is_empty():
debug("NOPRO2: " + str(output))
output = output[: -len(NORMAL_PROMPT)]
# switch to raw mode and continue
self._connection.write(RAW_MODE_CMD)
if capture_output:
if stream_name == "stdout":
out += output
else:
assert stream_name == "stderr"
err += output
else:
# TODO: deal with partial UTF-8 chars
self._send_output(output.decode(ENCODING), stream_name)
debug("doneproc")
return (
out.decode(ENCODING),
err.decode(ENCODING),
None if value is None else value.decode(ENCODING),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_prompt(self, timeout=30):\n #self.tc.expect(self.tool_prompt, timeout=timeout)\n #self.tf = self.tc.after.split()\n #return {'status': int(self.tf[self.tool_status_index]), 'output': self.tc.before}\n output = \"\"\n # Loop until we receive the special spt prompt while in pipe mode.\n while True:\n line = self.tc.stdout.readline()\n if re.search(self.tool_prompt, line):\n self.tf = line.split()\n break\n elif not len(line):\n # We've reached EOF or spt exited abnormally, usually a core dump!\n raise RuntimeError\n else:\n output += line\n #if self._debug:\n # print('Response: {0}'.format(self.tf))\n # print('Output: {0}'.format(output), end=None)\n #import pdb; pdb.set_trace()\n return {'status': int(self.tf[self.tool_status_index]), 'output': output}",
"def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))",
"def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }",
"def speech_response_prompt(output, reprompt_text, endsession):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': endsession\n }",
"def pseudo_raw_input(self, prompt):\n\n # Deal with the vagaries of readline and ANSI escape codes\n safe_prompt = self._surround_ansi_escapes(prompt)\n\n if self.use_rawinput:\n try:\n if sys.stdin.isatty():\n line = sm.input(safe_prompt)\n else:\n line = sm.input()\n if self.echo:\n sys.stdout.write('{}{}\\n'.format(safe_prompt, line))\n except EOFError:\n line = 'eof'\n else:\n if self.stdin.isatty():\n # on a tty, print the prompt first, then read the line\n self.poutput(safe_prompt, end='')\n self.stdout.flush()\n line = self.stdin.readline()\n if len(line) == 0:\n line = 'eof'\n else:\n # we are reading from a pipe, read the line to see if there is\n # anything there, if so, then decide whether to print the\n # prompt or not\n line = self.stdin.readline()\n if len(line):\n # we read something, output the prompt and the something\n if self.echo:\n self.poutput('{}{}'.format(safe_prompt, line))\n else:\n line = 'eof'\n return line.strip()",
"def exec_raw_no_follow(self, command) -> None:\n\n if isinstance(command, bytes):\n command_bytes = command\n else:\n command_bytes = bytes(command.encode(\"utf-8\"))\n\n # check we have a prompt\n data = self.read_until(1, b\">\")\n if not data.endswith(b\">\"):\n raise PyboardError(\"could not enter raw repl 5\")\n\n if self.use_raw_paste:\n # Try to enter raw-paste mode.\n self.con.write(b\"\\x05A\\x01\")\n data = self.con.read(2)\n if data == b\"R\\x00\":\n # Device understood raw-paste command but doesn't support it.\n pass\n elif data == b\"R\\x01\":\n # Device supports raw-paste mode, write out the command using this mode.\n return self.raw_paste_write(command_bytes)\n else:\n # Device doesn't support raw-paste, fall back to normal raw REPL.\n data = self.read_until(1, b\"w REPL; CTRL-B to exit\\r\\n>\")\n if not data.endswith(b\"w REPL; CTRL-B to exit\\r\\n>\"):\n print(data)\n raise PyboardError(\"could not enter raw repl\")\n # Don't try to use raw-paste mode again for this connection.\n self.use_raw_paste = False\n\n # write string\n debug(f'self.con.write \"{command_bytes}\"')\n self.con.write(command_bytes)\n\n # Alternative for write string above, do it in chuncks of max 256 bytes.\n # Write command using standard raw REPL, 256 bytes every 10ms.\n # for i in range(0, len(command_bytes), 256):\n # self.serial.write(command_bytes[i: min(i + 256, len(command_bytes))])\n # time.sleep(0.01)\n\n # Terminate command\n debug(r'self.con.write \"\\r\\x04\"')\n self.con.write(b\"\\x04\")\n\n # check if we could exec command\n data = self.read_until(2, b\"OK\", timeout=0.5)\n if data != b\"OK\":\n raise PyboardError(\"could not exec command (response: %r)\" % data)",
"def prompt(self):\n return input(self.message + \": \").strip()",
"def strip_prompt(self, a_string):\n output = super().strip_prompt(a_string)\n lines = output.split(self.RESPONSE_RETURN)\n if \"Done\" in lines[-1]:\n return self.RESPONSE_RETURN.join(lines[:-1])\n else:\n return output",
"def print_response(prompt, response, sep=' '):\n print(bold(prompt), end=sep)\n print(response)",
"def _dumb_prompt(self, message: AnyFormattedText = \"\") -> Iterator[Application[_T]]:\n # Send prompt to output.\n self.output.write(fragment_list_to_text(to_formatted_text(self.message)))\n self.output.flush()\n\n # Key bindings for the dumb prompt: mostly the same as the full prompt.\n key_bindings: KeyBindingsBase = self._create_prompt_bindings()\n if self.key_bindings:\n key_bindings = merge_key_bindings([self.key_bindings, key_bindings])\n\n # Create and run application.\n application = cast(\n Application[_T],\n Application(\n input=self.input,\n output=DummyOutput(),\n layout=self.layout,\n key_bindings=key_bindings,\n ),\n )\n\n def on_text_changed(_: object) -> None:\n self.output.write(self.default_buffer.document.text_before_cursor[-1:])\n self.output.flush()\n\n self.default_buffer.on_text_changed += on_text_changed\n\n try:\n yield application\n finally:\n # Render line ending.\n self.output.write(\"\\r\\n\")\n self.output.flush()\n\n self.default_buffer.on_text_changed -= on_text_changed",
"def say(output='', reprompt_text='', title='', should_end_session=True):\n if reprompt_text == '':\n reprompt_text = output\n\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': title,\n 'content': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n }",
"def prompt(self, console: io.IO, step: str,\n args: Dict[str, Any]) -> Dict[str, Any]:\n pass",
"def prompt(self, console: io.IO, step: str,\n args: Dict[str, Any]) -> Dict[str, Any]:\n pass",
"def pseudo_raw_input(self, prompt):\n\n if self.use_rawinput:\n try:\n line = sm.input(prompt)\n except EOFError:\n line = 'EOF'\n else:\n self.stdout.write(prompt)\n self.stdout.flush()\n line = self.stdin.readline()\n if not len(line):\n line = 'EOF'\n else:\n if line[-1] == '\\n': # this was always true in Cmd\n line = line[:-1]\n return line",
"def test_unknown(self):\n msg = \"*** Unknown syntax: asd\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"asd\")\n st = f.getvalue()\n self.assertEqual(msg, st)",
"def test_strip_prompt():\n string = \"\"\"MyRouter version 1.25.9\nmyhostname>\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\", base_prompt=\"myhostname>\")\n result = connection.strip_prompt(string)\n assert result == \"MyRouter version 1.25.9\"",
"async def terminal(event):\r\n command = utils.raw(event.message)\r\n await event.edit(f\"**Running command:**\\n`{command}`\")\r\n result = subprocess.getoutput(command)\r\n await event.edit(f\"**Running command:**\\n`{command}`\\n**Result:**\\n`{result}`\")",
"def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])",
"def response(self, data, response_type = \"terminal\"):\n if (response_type == \"terminal\"):\n print(data, end=\"\\n\")",
"def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)",
"def test_single_dialog_prompt_extra_line(monkeypatch, capsys):\n monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"value\" + \"\\n\"))\n _dialog_prompt(\n parameter=DialogParameter(\"Title\", comment=\"Comment\"),\n )\n captured = capsys.readouterr()\n assert captured.out.count(\"\\n\") == 2",
"def lets_get_punny():\n \n text = '\\033[35;1m' # text color\n background = '\\033[30;1;45m'\n \n chat = True\n while chat:\n\n # Get a message from the user\n msg = input(background + 'You say \\U0001F4AC:\\t')\n out_msg = None\n \n #Checks if input has question mark\n question = is_question(msg)\n defined_question = how_question(msg)\n \n # Checks if input has exclamation point\n exclamation = is_screaming(msg)\n\n # Prepare the input message\n msg = prepare_text(msg)\n\n # Check for an end msg = \n if end_chat(msg):\n out_msg = '¡Adiós! \\U0001F44B'\n print(out_msg)\n break\n \n # all my message outputs here \n if not out_msg:\n \n outs = []\n \n outs.append(selector(msg, GREETING_IN, GREETING_OUT)) # Greetings\n \n outs.append(selector(msg, QUESTION_GREETING_IN, QUESTION_GREETING_OUT))\n \n outs.append(selector(msg, JOKE_REQUEST_IN, JOKE_REQUEST_OUT)) # Responses for certain questions\n outs.append(selector(msg, NO_JOKE_IN, NO_JOKE_OUT))\n outs.append(selector(msg, NO_JOKE_REPLY_IN, NO_JOKE_REPLY_OUT))\n \n outs.append(selector(msg, YES_JOKE_IN, YES_JOKE_OUT))\n outs.append(selector(msg, YES_JOKE_REPLY_IN, JOKE_REPLY_OUT))\n \n # How jokes get responses works\n msg_str = ' '.join(msg)\n msg_str = msg_str.lower()\n \n if msg_str in JOKE_REPLY_IN_2:\n name = find_in_list(msg, JOKE_REPLY_IN_2)\n outs.append(joke_reply_2(msg))\n \n outs.append(respond_echo(selector(msg, LAUGH_IN, LAUGH_OUT), 1, \"\\U0001F923 \"))\n \n options = list(filter(None, outs))\n \n if options:\n out_msg = random.choice(options)\n \n if not out_msg and exclamation: \n out_msg = random.choice(SCREAMING)\n \n if not out_msg and question:\n out_msg = text + random.choice(UNKNOWN_QUESTION)\n\n # Catch-all to say something if msg not caught & processed so far\n if not out_msg:\n out_msg = random.choice(UNKNOWN)\n\n print(text + 'JokeBot \\U0001F47E:\\t', out_msg + '\\n')",
"def prompt(self, task, text='', print_=False):\n template = self.prompts[task]['prompt']\n res = self.format_prompt(task, template, text)\n if print_:\n print(res)\n else:\n return res",
"def test_sanitize_output():\n\n output = \"\"\"\nshow cdp neighbors\nCapability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge\n S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone,\n D - Remote, C - CVTA, M - Two-port Mac Relay\n\nDevice ID Local Intrfce Holdtme Capability Platform Port ID\n\nTotal cdp entries displayed : 0\ncisco3#\"\"\"\n output = output.lstrip()\n\n expected = \"\"\"\nCapability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge\n S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone,\n D - Remote, C - CVTA, M - Two-port Mac Relay\n\nDevice ID Local Intrfce Holdtme Capability Platform Port ID\n\nTotal cdp entries displayed : 0\"\"\"\n expected = expected.lstrip()\n\n connection = FakeBaseConnection(\n RESPONSE_RETURN=\"\\n\",\n RETURN=\"\\n\",\n ansi_escape_codes=False,\n base_prompt=\"cisco3#\",\n )\n\n result = connection._sanitize_output(\n output,\n strip_command=True,\n command_string=\"show cdp neighbors\\n\",\n strip_prompt=True,\n )\n assert result == expected",
"def _prepare_state_prompts_without_proxy_pc(self):\n hops_config = self._configurations[TextualDevice.connection_hops]\n cfg_ux2adb = hops_config[UnixRemote.unix_remote][AdbRemote.adb_shell]\n cfg_adb2adbroot = hops_config[AdbRemote.adb_shell][AdbRemote.adb_shell_root]\n adb_shell_cmd_params = cfg_ux2adb[\"command_params\"]\n adb_shell_prompt = self._get_adb_shell_prompt(adb_shell_cmd_params)\n adb_shell_root_prompt = cfg_adb2adbroot[\"command_params\"][\"expected_prompt\"]\n if adb_shell_root_prompt is None:\n if adb_shell_prompt.endswith(\"$\"):\n adb_shell_root_prompt = adb_shell_prompt[:-1] + \"#\"\n else:\n consequence = \"Won't be able to detect {} state\".format(AdbRemote.adb_shell_root)\n fix = \"Please provide configuration with 'expected_prompt' for {} state\".format(AdbRemote.adb_shell_root)\n self._log(logging.WARNING, \"Unknown prompt for {} state. {}. {}.\".format(AdbRemote.adb_shell_root,\n consequence, fix))\n adb_shell_root_prompt = \"Unknown_adb_root_prompt\"\n\n state_prompts = {\n AdbRemote.adb_shell: adb_shell_prompt,\n AdbRemote.adb_shell_root: adb_shell_root_prompt,\n }\n return state_prompts",
"def output(self, response: str):\n\n # Try to output through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n IO.stdout(response)",
"def test_strip_no_prompt():\n string = \"\"\"MyRouter version 1.25.9\nadditional text\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\", base_prompt=\"myhostname>\")\n result = connection.strip_prompt(string)\n assert result == string",
"def get_help_response():\n card_title = \"Welcome\"\n speech_output = \"Please give a command for your painting lights.\"\n\n response = response_builders.build_response(session_attributes,\n response_builders.build_speechlet_response(card_title,\n speech_output, reprompt_text, should_end_session))\n return response",
"def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())",
"def prompt(self):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tif self.rf4ce_frame.frame_ciphered:\n\t\t\t\t\tciphered_status = \"ciphered\"\n\t\t\t\telse:\n\t\t\t\t\tciphered_status = \"plain\"\n\t\t\t\ta = hue.lightblue(\"{}\".format(self.rf4ce_frame.frame_counter))\n\t\t\t\tb = hue.lightblue(\"0x{:02x}\".format(self.rf4ce_frame.profile_indentifier))\n\t\t\t\tc = hue.lightblue(ciphered_status)\n\t\t\t\tcmd = raw_input(\"({} - {} - {})>>> \".format(a, b, c))\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\traise StopIteration\n\n\t\t\tif cmd.startswith(\"profile\"):\n\t\t\t\ttry:\n\t\t\t\t\tyield InjectorCmd(InjectorCmd.PROFILE, cmd.split()[1])\n\t\t\t\texcept:\n\t\t\t\t\tself.log(\"Malformed command\", hue.bad)\n\t\t\t\t\tcontinue\n\n\t\t\telif cmd.startswith(\"counter\"):\n\t\t\t\ttry:\n\t\t\t\t\tyield InjectorCmd(InjectorCmd.COUNTER, cmd.split()[1])\n\t\t\t\texcept:\n\t\t\t\t\tself.log(\"Malformed command\", hue.bad)\n\t\t\t\t\tcontinue\n\n\t\t\telif cmd.startswith(\"delay\"):\n\t\t\t\ttry:\n\t\t\t\t\tyield InjectorCmd(InjectorCmd.DELAY, cmd.split()[1])\n\t\t\t\texcept:\n\t\t\t\t\tself.log(\"Malformed command\", hue.bad)\n\t\t\t\t\tcontinue\n\n\t\t\telif cmd.startswith(\"ciphered\"):\n\t\t\t\ttry:\n\t\t\t\t\tyield InjectorCmd(InjectorCmd.CIPHERED, cmd.split()[1])\n\t\t\t\texcept:\n\t\t\t\t\tself.log(\"Malformed command\", hue.bad)\n\t\t\t\t\tcontinue\n\n\t\t\telif cmd.startswith(\"help\"):\n\t\t\t\tyield InjectorCmd(InjectorCmd.HELP, None)\n\n\t\t\telif cmd.startswith(\"exit\"):\n\t\t\t\traise StopIteration\n\n\t\t\telse:\n\t\t\t\tfor packet in cmd.split():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = binascii.unhexlify(packet)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.log(\"Malformed command\", hue.bad)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tyield InjectorCmd(InjectorCmd.PACKET, data)"
] | [
"0.59774065",
"0.59364516",
"0.5923323",
"0.5923323",
"0.5696055",
"0.56759435",
"0.56740737",
"0.562927",
"0.55930865",
"0.5571562",
"0.5549954",
"0.55442077",
"0.55442077",
"0.5541861",
"0.5516136",
"0.55004907",
"0.54771006",
"0.5472862",
"0.54632956",
"0.5442928",
"0.5430304",
"0.5398319",
"0.53692234",
"0.53671795",
"0.53634316",
"0.5352779",
"0.5349102",
"0.53478515",
"0.53377676",
"0.5327826"
] | 0.6925466 | 0 |
given a file of predictions and a file of targets compute evaluation metrics and return dict containing results (including a sum of squared errors so that errors can be added across multiple chromosomes) metrics can be computed on gene/enhancer/promoter subsets by passing lists of bin ids matching these subsets (optionally after filtering by eg. presence on blacklist via interval_filter) | def evaluate_predictions(unfiltered_preds, unfiltered_targets, gene_bins=None,
enhancer_bins=None, promoter_bins=None, retain_bins=None):
unfiltered_errors = np.square(unfiltered_preds-unfiltered_targets)
if retain_bins is None:
retain_bins = np.arange(unfiltered_errors.shape[0])
# print('unfiltered errors shape, unfiltered errors size', unfiltered_errors.shape, unfiltered_errors.size)
gene_weights = np.zeros(unfiltered_preds.shape) # just (binned_chrom_length,)
enh_weights = np.zeros(unfiltered_preds.shape)
prom_weights = np.zeros(unfiltered_preds.shape)
sse_dict = {}
count_dict = {} # store number of bins we're summing over for each metric
corr = gwcorr(unfiltered_targets, unfiltered_preds)
spear = gwspear(unfiltered_targets, unfiltered_preds)
sse_dict['cwcorr'] = corr
sse_dict['cwspear'] = spear
count_dict['cwcorr'] = 1
count_dict['cwspear'] = 1
if gene_bins is not None:
gene_weights[gene_bins] = 1.0
gene_errors = gene_weights*unfiltered_errors
gene_errors = gene_errors[retain_bins].sum()
sse_dict['gene'] = gene_errors
count_dict['gene'] = np.sum(gene_weights[retain_bins])
if promoter_bins is not None:
prom_weights[promoter_bins] = 1.0
prom_errors = prom_weights*unfiltered_errors
prom_errors = prom_errors[retain_bins].sum()
sse_dict['prom'] = prom_errors
count_dict['prom'] = np.sum(prom_weights[retain_bins])
if enhancer_bins is not None:
enh_weights[enhancer_bins] = 1.0
enh_errors = enh_weights*unfiltered_errors
enh_errors = enh_errors[retain_bins].sum()
sse_dict['enh'] = enh_errors
count_dict['enh'] = np.sum(enh_weights[retain_bins])
errors = unfiltered_errors[retain_bins]
targets = unfiltered_targets[retain_bins]
assert targets.shape == errors.shape
del unfiltered_targets
# TODO - use np.percentile
n_1pc = int(targets.shape[0] * 0.01)
y_true_sorted = np.sort(targets)
y_true_top1 = y_true_sorted[-n_1pc]
# y_true_top1 = np.percentile(targets, 99)
# print(y_true_top1, np.percentile(targets, 99))
idx_obs = targets >= y_true_top1
del targets
top1_obs = errors[idx_obs]
count_dict['top1_obs'] = top1_obs.size #.shape[0] would be exactly the same
sse_dict['top1_obs'] = top1_obs.sum()
preds = unfiltered_preds[retain_bins]
assert preds.shape == errors.shape
del unfiltered_preds
n_1pc = int(preds.shape[0] * 0.01)
y_pred_sorted = np.sort(preds)
y_pred_top1 = y_pred_sorted[-n_1pc]
idx_pred = preds >= y_pred_top1
del preds
top1_preds = errors[idx_pred]
count_dict['top1_preds'] = top1_preds.size
sse_dict['top1_preds'] = top1_preds.sum()
count_dict['global'] = errors.size
sse_dict['global'] = errors.sum()
return sse_dict, count_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval(\n task1_prediction_filename: str,\n task2_prediction_filename: str,\n target_filename: str,\n output_dir: str,\n case_ids_source: Optional[Union[str, List[str]]] = \"target\",\n) -> Tuple[OrderedDict, str]:\n create_dir(output_dir)\n\n # eval task1, task2 or both\n task1 = task1_prediction_filename is not None and task1_prediction_filename != \"\"\n task2 = task2_prediction_filename is not None and task2_prediction_filename != \"\"\n\n if case_ids_source is None:\n if task1:\n case_ids_source = \"task1_pred\"\n else:\n case_ids_source = \"task2_pred\"\n\n dataframes_dict = {}\n metrics = {}\n post_proc = partial(post_processing, task1=task1, task2=task2)\n # task 1\n if task1:\n # metrics to evaluate\n metrics.update(\n {\n \"task1_auc\": CI(\n MetricAUCROC(\n pred=\"task1_pred.array\",\n target=\"target.Task1-target\",\n class_names=TASK1_CLASS_NAMES,\n pre_collect_process_func=post_proc,\n ),\n stratum=\"target.Task1-target\",\n ),\n \"task1_roc_curve\": MetricROCCurve(\n pred=\"task1_pred.array\",\n target=\"target.Task1-target\",\n class_names=[None, \"\"],\n pre_collect_process_func=post_proc,\n output_filename=os.path.join(output_dir, \"task1_roc.png\"),\n ),\n }\n )\n # read files\n task1_pred_df = pd.read_csv(\n task1_prediction_filename, dtype={PRED_CASE_ID_NAME: object}\n )\n # verify input\n assert set(task1_pred_df.keys()).issubset(\n EXPECTED_TASK1_PRED_KEYS\n ), f\"Expecting task1 prediction file {os.path.abspath(task1_prediction_filename)} to include also the following keys: {EXPECTED_TASK1_PRED_KEYS - set(task1_pred_df.keys())}\"\n task1_pred_df[\"id\"] = task1_pred_df[PRED_CASE_ID_NAME]\n dataframes_dict[\"task1_pred\"] = task1_pred_df\n\n # task 2\n if task2:\n # metrics to evaluate\n metrics.update(\n {\n \"task2_auc\": CI(\n MetricAUCROC(\n pred=\"task2_pred.array\",\n target=\"target.Task2-target\",\n class_names=TASK2_CLASS_NAMES,\n pre_collect_process_func=post_proc,\n ),\n stratum=\"target.Task2-target\",\n ),\n \"task2_roc_curve\": MetricROCCurve(\n pred=\"task2_pred.array\",\n target=\"target.Task2-target\",\n class_names=TASK2_CLASS_NAMES,\n output_filename=os.path.join(output_dir, \"task2_roc.png\"),\n pre_collect_process_func=post_proc,\n ),\n }\n )\n # read files\n task2_pred_df = pd.read_csv(\n task2_prediction_filename, dtype={PRED_CASE_ID_NAME: object}\n )\n # verify input\n assert set(task2_pred_df.keys()).issubset(\n EXPECTED_TASK2_PRED_KEYS\n ), f\"Expecting task2 prediction file {os.path.abspath(task2_prediction_filename)} to include also the following keys: {EXPECTED_TASK2_PRED_KEYS - set(task2_pred_df.keys())}\"\n task2_pred_df[\"id\"] = task2_pred_df[PRED_CASE_ID_NAME]\n dataframes_dict[\"task2_pred\"] = task2_pred_df\n\n # read files\n target_df = pd.read_csv(target_filename, dtype={TARGET_CASE_ID_NAME: object})\n # verify input\n assert set(target_df.keys()).issubset(\n EXPECTED_TARGET_KEYS\n ), f\"Expecting target file {os.path.abspath(target_filename)} to include also the following keys: {EXPECTED_TARGET_KEYS - set(target_df.keys())}\"\n target_df[\"id\"] = target_df[TARGET_CASE_ID_NAME]\n dataframes_dict[\"target\"] = target_df\n\n # analyze\n evaluator = EvaluatorDefault()\n results = evaluator.eval(\n ids=list(dataframes_dict[case_ids_source][\"id\"]),\n data=dataframes_dict,\n metrics=metrics,\n output_dir=None,\n )\n\n # output\n return decode_results(results, output_dir=output_dir, task1=task1, task2=task2)",
"def compute_metrics_from_files(p_path_to_reference_file,\r\n p_path_to_candidate_file,\r\n p_max_bleu_order):\r\n\r\n reference_dictionary, reference_no_answer_query_ids = \\\r\n load_file(p_path_to_reference_file)\r\n candidate_dictionary, candidate_no_answer_query_ids = load_file(p_path_to_candidate_file)\r\n query_id_answerable = set(reference_dictionary.keys())-reference_no_answer_query_ids\r\n query_id_answerable_candidate = set(candidate_dictionary.keys())-candidate_no_answer_query_ids\r\n \r\n true_positives = len(query_id_answerable_candidate.intersection(query_id_answerable))\r\n false_negatives = len(query_id_answerable)-true_positives\r\n true_negatives = len(candidate_no_answer_query_ids.intersection(reference_no_answer_query_ids))\r\n false_positives = len(reference_no_answer_query_ids)-true_negatives\r\n precision = float(true_positives)/(true_positives+false_positives) if (true_positives+false_positives)>0 else 1.\r\n recall = float(true_positives)/(true_positives+false_negatives) if (true_positives+false_negatives)>0 else 1.\r\n F1 = 2 *((precision*recall)/(precision+recall))\r\n filtered_reference_dictionary = \\\r\n {key: value for key, value in reference_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n filtered_candidate_dictionary = \\\r\n {key: value for key, value in candidate_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n for query_id, answers in filtered_candidate_dictionary.items():\r\n assert \\\r\n len(answers) <= 1, \\\r\n 'query_id %d contains more than 1 answer \\\"%s\\\" in candidate file' % \\\r\n (query_id, str(answers))\r\n\r\n reference_query_ids = set(filtered_reference_dictionary.keys())\r\n candidate_query_ids = set(filtered_candidate_dictionary.keys())\r\n common_query_ids = reference_query_ids.intersection(candidate_query_ids)\r\n assert (len(common_query_ids) == len(reference_query_ids)) and \\\r\n (len(common_query_ids) == len(candidate_query_ids)), \\\r\n 'Reference and candidate files must share same query ids'\r\n\r\n all_scores = {}\r\n bleu_scores, _ = \\\r\n Bleu(p_max_bleu_order).compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n for i, bleu_score in enumerate(bleu_scores):\r\n all_scores['bleu_%d' % (i+1)] = bleu_score\r\n\r\n rouge_score, _ = Rouge().compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n all_scores['rouge_l'] = rouge_score\r\n all_scores['F1'] = F1\r\n similarity = 0\r\n for key in filtered_reference_dictionary:\r\n candidate_answer = nlp(filtered_candidate_dictionary[key][0])\r\n reference_answer = filtered_reference_dictionary[key]\r\n answersimilarity = 0\r\n for answer in reference_answer:\r\n answersimilarity += candidate_answer.similarity(nlp(answer))\r\n similarity += answersimilarity/len(reference_answer)\r\n semantic_similarity = similarity/len(filtered_reference_dictionary)\r\n all_scores['Semantic_Similarity'] = semantic_similarity\r\n return all_scores",
"def _run_evaluation(\n sess, experiment, eval_config, output_dir, min_range, max_range, num_bins,\n torsion_bins):\n tf.io.gfile.makedirs(os.path.join(output_dir, 'pickle_files'))\n\n logging.info('Eval config is %s\\nnum_bins: %d', eval_config, num_bins)\n num_examples = 0\n num_crops = 0\n start_all_time = time.time()\n\n # Either do the whole test set, or up to a specified limit.\n max_examples = experiment.num_eval_examples\n if eval_config.max_num_examples > 0:\n max_examples = min(max_examples, eval_config.max_num_examples)\n\n while num_examples < max_examples:\n one_prediction = compute_one_prediction(\n num_examples, experiment, sess, eval_config, num_bins, torsion_bins)\n\n single_message = one_prediction.single_message\n num_crops_local = one_prediction.num_crops_local\n sequence = one_prediction.sequence\n filebase = one_prediction.filebase\n softmax_probs = one_prediction.softmax_probs\n ss = one_prediction.ss\n asa = one_prediction.asa\n torsions = one_prediction.torsions\n\n num_examples += 1\n num_crops += num_crops_local\n\n # Save the output files.\n filename = os.path.join(output_dir,\n 'pickle_files', '%s.pickle' % filebase)\n distogram_io.save_distance_histogram(\n filename, softmax_probs, filebase, sequence,\n min_range=min_range, max_range=max_range, num_bins=num_bins)\n\n if experiment.model.torsion_multiplier > 0:\n torsions_dir = os.path.join(output_dir, 'torsions')\n tf.io.gfile.makedirs(torsions_dir)\n distogram_io.save_torsions(torsions_dir, filebase, sequence, torsions)\n\n if experiment.model.secstruct_multiplier > 0:\n ss_dir = os.path.join(output_dir, 'secstruct')\n tf.io.gfile.makedirs(ss_dir)\n secstruct.save_secstructs(ss_dir, filebase, None, sequence, ss)\n\n if experiment.model.asa_multiplier > 0:\n asa_dir = os.path.join(output_dir, 'asa')\n tf.io.gfile.makedirs(asa_dir)\n secstruct.save_secstructs(asa_dir, filebase, None, sequence,\n np.expand_dims(asa, 1), label='Deepmind 2D ASA')\n\n time_spent = time.time() - start_all_time\n logging.info(\n 'Evaluate %d examples, %d crops %.1f crops/ex. '\n 'Took %.1fs, %.3f s/example %.3f crops/s\\n%s',\n num_examples, num_crops, num_crops / float(num_examples), time_spent,\n time_spent / num_examples, num_crops / time_spent, single_message)\n\n logging.info('Tested on %d', num_examples)",
"def main(targets):\n # Parse through the datasets and select only relevant columns\n cpu_df = data_exploration.parse_cpu_data(\"data/raw/hw_metric_histo.csv000\")\n sys_df = data_exploration.parse_sys_data(\"data/raw/system_sysinfo_unique_normalized.csv000\")\n\n # Create a new reference to the optimized DataFrame\n optimized_df = data_exploration.optimize_dataframe(cpu_df)\n\n # grab the specific column \"HW::CORE:C0:PERCENT\" as a feature\n cpu = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:C0:PERCENT:\")\n\n # grab the specific column \"HW::CORE:TEMPERATURE:CENTIGRADE\" as a feature\n temp = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:TEMPERATURE:CENTIGRADE:\")\n\n # grab the GUIDs from each dataset and put them into lists\n sys_guid = data_exploration.get_guid(sys_df, 'guid')\n hw_guid = data_exploration.get_guid(cpu_df, 'guid')\n\n # checking for the GUID overlap in both datasets\n syshw_overlap = [guid for guid in sys_guid if guid in hw_guid]\n\n # objective is to create a dataframe of only matching GUIDs\n hwcpu_match = data_exploration.get_cpu_guid(cpu, syshw_overlap)\n\n # only grabbing the relevant columns to be matched on\n hwtemp_match = data_exploration.get_temp_guid(temp, syshw_overlap)\n\n # instantiating our dataframes to be joined\n hwtemp = pd.DataFrame(hwtemp_match.groupby('guid')['temp_mean'].mean())\n hwcpu = pd.DataFrame(hwcpu_match.groupby('guid')['utilization_mean'].mean())\n\n # joining our matched dataframes together, only using relevant columns\n combined = sys_df.join(hwcpu, on=['guid'], how='left')\n combined = combined.join(hwtemp, on=['guid'], how='left')\n combined = combined.drop(columns=['guid', 'model_normalized', \"processornumber\"])\n\n # create copy of our joined dataframe to be used for modelling\n feature_columns = combined.copy()\n\n # selecting only relevant columns to use for features\n feature_columns = feature_columns[['os','cpu_family', 'cpuvendor',\n 'graphicscardclass', 'persona']]\n\n # creating a completely one-hot encoded dataframe only containing relevant columns\n dummy = pd.get_dummies(feature_columns)\n\n # converting our categorical variables to be predicted on into numerical values\n cleanup_nums = {'persona': {'Web User': 0, 'Casual User': 1, 'Gamer':2, 'Casual Gamer': 3,\n 'Office/Productivity':4, 'Content Creator/IT': 5,\n 'Communication': 6, 'Win Store App User': 7, 'Entertainment': 8,\n 'File & Network Sharer':9, 'Unknown': 10}}\n\n # replacing the values in the column 'persona' to be numerical\n encode_persona = combined['persona'].to_frame().replace(cleanup_nums)\n\n # putting our old means back into the dummy dataframe\n dummy['util_mean'] = combined['utilization_mean']\n dummy['temp_mean'] = combined['temp_mean']\n # dummy = dummy.drop(columns=['persona'])\n dummy['persona'] = encode_persona['persona']\n\n dummy = dummy.dropna()\n nona_test = dummy.copy()\n\n # we want to predict on Y\n Y = nona_test['persona']\n X = nona_test.drop(columns=['persona'])\n\n # creating our test/train split\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n # all the models we are going to use\n names = [\"Nearest_Neighbors\", \"Linear_SVM\", \"Polynomial_SVM\", \"RBF_SVM\", \"Gradient_Boosting\"]\n\n # all of our predictors scaled to the degree of our datasets\n classifiers = [KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(kernel=\"poly\", degree=3, C=0.025),\n SVC(kernel=\"rbf\", C=1, gamma=2),\n GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]\n\n scores = []\n # we write in our accuracy scores to [scores]\n for name, clf in zip(names, classifiers):\n clf.fit(X_train, Y_train)\n score = clf.score(X_test, Y_test)\n scores.append(score)\n\n show = data_exploration.get_model_scores(names, scores)\n model_scores = data_exploration.plot_graphical_model_scores(show)",
"def main(argv):\n from argparse import ArgumentParser\n argparser = ArgumentParser(description=\"\")\n argparser.add_argument(\"--germeval\",\n help=\"use GermEval format\", action=\"store_true\")\n argparser.add_argument(\"-g\", \"--glob-ptrn\",\n help=\"globbing pattern to use for finding files in\"\n \" directories\", type=str, default=\"*.tsv\")\n argparser.add_argument(\"-v\", \"--verbose\",\n help=\"output prediction errors\",\n action=\"store_true\")\n argparser.add_argument(\"gold_file\",\n help=\"file or directory containing gold data\")\n argparser.add_argument(\"pred_file\",\n help=\"file or directory containing predicted\"\n \" labels\")\n args = argparser.parse_args(argv)\n\n gold_data = read_data(args.gold_file, args.glob_ptrn, args.germeval)\n pred_data = read_data(args.pred_file, args.glob_ptrn, args.germeval)\n\n # check whether we have the same set of ids in both datasets\n gold_ids = gold_data.loc[:, ID]\n gold_ids_set = set(gold_ids)\n pred_ids = pred_data.loc[:, ID]\n pred_ids_set = set(pred_ids)\n xor_ids = (gold_ids_set - pred_ids_set) | (pred_ids_set - gold_ids_set)\n assert len(gold_ids) == len(pred_ids) \\\n and all(gold_ids.values == pred_ids.values), \\\n \"Unmatched ids: {!r} vs. {!r} (ids present in just one\" \\\n \" of the sets: {!r})\".format(gold_ids, pred_ids, xor_ids)\n\n # compute and output classification statistics\n compute_stat(gold_data, pred_data, args.verbose)",
"def evaluate_and_dump_predictions(pred, qids, qfile, afile, ix_ans_dict, filename):\n assert len(pred) == len(qids), \"Number of predictions need to match number of question IDs\"\n answers = []\n for i, val in enumerate(pred):\n qa_pair = {}\n qa_pair['question_id'] = int(qids[i])\n qa_pair['answer'] = ix_ans_dict[str(val + 1)] # note indexing diff between python and torch\n answers.append(qa_pair)\n vqa = VQA(afile, qfile)\n fod = open(filename, 'wb')\n json.dump(answers, fod)\n fod.close()\n # VQA evaluation\n vqaRes = vqa.loadRes(filename, qfile)\n vqaEval = VQAEval(vqa, vqaRes, n=2)\n vqaEval.evaluate()\n acc = vqaEval.accuracy['overall']\n print(\"Overall Accuracy is: %.02f\\n\" % acc)\n return acc",
"def evaluate_bm(all_metrics):\n f_gt, n_gt, n_st = 0, 0, 0\n nbox_gt, nbox_st = 0, 0\n c, g, fp, missed, ids = 0, 0, 0, 0, 0\n IDTP, IDFP, IDFN = 0, 0, 0\n MT, ML, PT, FRA = 0, 0, 0, 0\n overlap_sum = 0\n for i in range(len(all_metrics)):\n nbox_gt += all_metrics[i].idmetrics.nbox_gt\n nbox_st += all_metrics[i].idmetrics.nbox_st\n\n # Total ID Measures\n IDTP += all_metrics[i].idmetrics.IDTP\n IDFP += all_metrics[i].idmetrics.IDFP\n IDFN += all_metrics[i].idmetrics.IDFN\n\n # Total ID Measures\n MT += all_metrics[i].MT\n ML += all_metrics[i].ML\n PT += all_metrics[i].PT\n FRA += all_metrics[i].FRA\n f_gt += all_metrics[i].f_gt\n n_gt += all_metrics[i].n_gt\n n_st += all_metrics[i].n_st\n c += all_metrics[i].c\n g += all_metrics[i].g\n fp += all_metrics[i].fp\n missed += all_metrics[i].missed\n ids += all_metrics[i].mme\n overlap_sum += sum(sum(all_metrics[i].d))\n\n # IDP = IDTP / (IDTP + IDFP)\n IDP = IDTP / (IDTP + IDFP) * 100\n\n # IDR = IDTP / (IDTP + IDFN)\n IDR = IDTP / (IDTP + IDFN) * 100\n\n # IDF1 = 2 * IDTP / (2 * IDTP + IDFP + IDFN)\n IDF1 = 2 * IDTP / (nbox_gt + nbox_st) * 100\n FAR = fp / f_gt\n MOTP = (overlap_sum / c) * 100\n\n # MOTAL = 1 - (# fp + # fn + #log10(ids)) / # gts\n MOTAL = (1 - (fp + missed + np.log10(ids + 1)) / g) * 100\n\n # MOTA = 1 - (# fp + # fn + # ids) / # gts\n MOTA = (1 - (fp + missed + ids) / g) * 100\n\n # recall = TP / (TP + FN) = # corrected boxes / # gt boxes\n recall = c / g * 100\n\n # precision = TP / (TP + FP) = # corrected boxes / # det boxes\n precision = c / (fp + c) * 100\n metrics = [IDF1, IDP, IDR, recall, precision, FAR, n_gt,\n MT, PT, ML, fp, missed, ids, FRA, MOTA, MOTP, MOTAL]\n return metrics",
"def analyse_results(self, \n target_dir, \n param_file = \"TRAINED_PARAMS_END.model\",\n w_norm_file = \"W_NORMS.dat\",\n num_to_test = 100,\n get_means_from = {\n 'NOISY' :'RECONST_NOISY_ERRORS.dat',\n 'MISSING':'RECONST_MISSING_ERRORS.dat'\n },\n given_inputs = []):\n \n path_to_json = os.path.join(target_dir, \"PARAMETERS.json\")\n \n with open(path_to_json, 'r') as json_file:\n \n param_dict = json.load(json_file)\n \n self.num_hidden = param_dict['GLOBAL']['num_hidden']\n \n num_runs = param_dict['GLOBAL']['num_runs']\n \n path_to_mix_params = os.path.join(target_dir, \"MIX_PARAMS.dat\")\n \n reg_dict = {}\n \n for f_name in param_dict.keys():\n \n if f_name != \"GLOBAL\":\n reg_dict[param_dict[f_name]['algorithm']] =\\\n param_dict[f_name]['regressor']\n \n self.mixture = False\n \n E_gaps = {}\n \n recon_errors = {}\n \n p_tilda_data = {}\n \n end_w_norms = {}\n \n for field in get_means_from.keys():\n \n recon_errors[field] = {}\n \n if os.path.exists(path_to_mix_params):\n \n self.mixture = True\n \n self.mix_params = np.loadtxt(path_to_mix_params)\n \n for run_ind in range(num_runs):\n \n sub_f1 = \"run%s\"%run_ind\n \n sub_dir = os.path.join(target_dir, sub_f1)\n \n if os.path.isdir(sub_dir): \n print(\"Processing %s\"%sub_dir)\n tr_file = \"TRAIN_IMAGES.dat\"\n check_input_file = os.path.join(sub_dir, tr_file)\n \n if os.path.exists(check_input_file):\n inputs = np.loadtxt(check_input_file)\n # to make fair comparison, number of is samples\n # is set to the number of test inputs\n self.num_samples = inputs.shape[0]\n else:\n \n if isinstance(given_inputs, np.ndarray):\n N = given_inputs.shape[0]\n inds = self.np_rand_gen.choice(N, \n num_to_test,\n replace = False)\n \n inputs = given_inputs[inds,:]\n \n self.num_samples = num_to_test\n \n else:\n print(\"Error: %s does not contain file %s\"\n %(sub_dir2, tr_file) +\" and given_inputs is []\")\n print(\"Script execution will terminate\") \n sys.exit()\n \n self.batch_size = inputs.shape[0]\n \n if self.mixture:\n self.set_mixture_means(inputs = inputs)\n \n is_samples, _ = self.is_sampler()\n \n for sub_f2 in os.listdir(sub_dir):\n \n sub_dir2 = os.path.join(sub_dir, sub_f2)\n \n if os.path.isdir(sub_dir2):\n \n if \"_\" in sub_f2:\n spl_str = sub_f2.split(\"_\")\n if len(spl_str) > 3:\n algorithm = spl_str[0]+\"_\"+spl_str[1]\n else:\n algorithm = spl_str[0]\n \n else:\n algorithm = sub_f2\n \n field_name =algorithm \n \n if reg_dict[algorithm] != None:\n if reg_dict[algorithm] in sub_f2:\n if \"_\" in reg_dict[algorithm]:\n reg_name_s = reg_dict[algorithm].split(\"_\")\n \n reg_name_s = reg_name_s[0][0].upper() +\\\n reg_name_s[1][0].upper()\n field_name +=\" %s\"%reg_name_s\n else:\n field_name +=\" %s\"%reg_dict[algorithm]\n \n get_val = sub_f2\n get_val = get_val.split(reg_dict[algorithm])[1]\n \n field_name +=\" %s\"%get_val\n \n if field_name not in E_gaps.keys():\n \n E_gaps[field_name] = []\n \n if field_name not in p_tilda_data.keys():\n \n p_tilda_data[field_name] = []\n \n if field_name not in end_w_norms.keys():\n \n end_w_norms[field_name] = []\n \n par_path = os.path.join(sub_dir2, param_file)\n \n w_norms_path = os.path.join(sub_dir2, w_norm_file)\n \n w_norms = np.loadtxt(w_norms_path)\n \n end_w_norms[field_name].append(w_norms[-1])\n \n if os.path.exists(par_path):\n \n css_diff, p_tilda_vals =\\\n self.compare_css_terms(x_inputs = inputs,\n x_samples = is_samples,\n full_path = par_path)\n \n E_gaps[field_name].append(css_diff)\n \n mean_val = np.mean(p_tilda_vals[0:self.batch_size])\n \n p_tilda_data[field_name].append(mean_val)\n \n else:\n \n print(\"Error: %s does not exist\"%par_path)\n sys.exit()\n \n for f_exp in get_means_from.keys():\n \n file_name = get_means_from[f_exp]\n \n check_err_file = os.path.join(sub_dir2, file_name)\n \n errors = np.loadtxt(check_err_file)\n \n mean_val = np.mean(errors)\n \n if field_name not in recon_errors[f_exp].keys():\n \n recon_errors[f_exp][field_name] = []\n \n recon_errors[f_exp][field_name].append(mean_val)\n \n return E_gaps, recon_errors, p_tilda_data, end_w_norms",
"def evaluate_individual(predictions, test_files, models):\n\n print(\"\\nAccuracy for individual models\\n\")\n \n # Fix Location\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_location\"] == prediction[\"predicted_location\"]:\n correct_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = correct_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n if prediction[\"predicted_location\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = total_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Location accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Location accuracy overall is {accuracy * 100} %\")\n \n # Fix type\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_type\"] == prediction[\"predicted_type\"]:\n correct_predictions[FixType[prediction[\"predicted_type\"]].value] = correct_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n if prediction[\"predicted_type\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"predicted_type\"]].value] = total_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Type accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Type accuracy overall is {accuracy * 100} %\")\n \n # We repeat the predictions to evaluate the insert and modify models individually, regardless of the predicted fix type \n\n raw_training_samples = []\n\n if test_files.endswith(\".json\"): # Single JSON file\n with open(test_files) as file:\n logging.info(\"Source ending in .json. Predicting on single JSON file.\")\n raw_training_samples = json.load(file)\n else: # Folder path\n for filename in listdir(test_files):\n with open(test_files + filename) as file:\n raw_training_samples.extend(json.load(file))\n \n correct_predictions_insert = 0\n total_predictions_insert = 0\n correct_predictions_modify = 0\n total_predictions_modify = 0\n insert_tokens = []\n modify_tokens = []\n\n for sample in raw_training_samples:\n # Insert\n if sample[\"metadata\"][\"fix_type\"] == \"insert\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[2])\n token = IOProcessor.postprocess(pred, 2)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_insert = correct_predictions_insert + 1\n else: # Incorrect prediction\n insert_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_insert = total_predictions_insert + 1\n # Modify\n if sample[\"metadata\"][\"fix_type\"] == \"modify\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[3])\n token = IOProcessor.postprocess(pred, 3)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_modify = correct_predictions_modify + 1\n else: # Incorrect prediction\n modify_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_modify = total_predictions_modify + 1\n\n insert_accuracy = correct_predictions_insert / total_predictions_insert\n modify_accuracy = correct_predictions_modify / total_predictions_modify\n print(f\"Fix Token accuracy for insert is {insert_accuracy * 100} %\")\n print(f\"Fix Token accuracy for modify is {modify_accuracy * 100} %\")\n\n # The following code may be used to create a swarm plot of the erroneous predictions for fix locations\n # This does, however, require the installation of the pandas, seaborn, and matplotlib libraries.\n \n # import seaborn as sns\n # import matplotlib.pyplot as plt\n # import pandas as pd\n # location_distance_array = []\n # for prediction in predictions:\n # actual_sample, tokens = IOProcessor.preprocess(prediction[\"correct_data\"][\"wrong_code\"])\n # label = get_token_index(prediction[\"correct_data\"][\"wrong_code\"], tokens, prediction[\"correct_data\"][\"correct_location\"])\n # if prediction[\"predicted_token_location\"] - label == 0:\n # pass\n # else:\n # location_distance_array.append([prediction[\"predicted_token_location\"] - label, prediction[\"correct_data\"][\"correct_type\"]])\n \n # df = pd.DataFrame(data=location_distance_array)\n # sns.set_theme(style=\"whitegrid\")\n # f, ax = plt.subplots(figsize=(6, 4))\n # sns.despine(bottom=True, left=True)\n # sns.swarmplot(y=0, x=1, data=df, palette=\"dark\", size=6)\n # ax.set_xlabel('')\n # ax.set_ylabel('')\n # plt.ylim([-15, 16])\n \n # plt.savefig('line_plot.pdf', bbox_inches='tight', pad_inches=0)",
"def evaluate(self, benchmark_data, metrics={}, options={}):\n dir_name = list(benchmark_data.keys())[0]\n relations = list(benchmark_data.values())[0]\n \n if \"relation\" in options :\n relations = [options[\"relation\"]]\n \n precision_total = 0\n for relation in relations :\n if relation.endswith(\"_inv\") :\n continue\n relation = relation.replace(\":\", \"_\")\n evaluate.setup(relation, dir_name)\n precision_total += evaluate.evaluate_logic()\n \n result = {}\n result[\"MAP\"] = precision_total/len(relations)\n\n return result",
"def compute_metrics(\n self,\n preds: Dict[str, torch.Tensor],\n targets: Dict[str, torch.Tensor],\n phase: str,\n ) -> Dict[str, torch.Tensor]:\n if phase == \"train\":\n metrics_dict = self.train_metrics\n elif phase == \"val\":\n metrics_dict = self.val_metrics\n elif phase == \"test\":\n metrics_dict = self.test_metrics\n\n ret = {}\n for metric_name, metric in metrics_dict.items():\n if metric is not None:\n branch = metric_name.split(\"_\")[0]\n ret[metric_name] = metric(preds[branch], targets[branch])\n\n return ret",
"def evaluate_predictions_from_filepaths(\n ground_truth_filepath: str, prediction_filepath: str, node_types: List[str]\n) -> Dict[str, float]:\n\n ground_truth_tree_jsons = read_jsonl(ground_truth_filepath)\n prediction_tree_jsons = read_jsonl(prediction_filepath)\n\n return evaluate_predictions_from_jsons(\n ground_truth_tree_jsons, prediction_tree_jsons, node_types\n )",
"def calculateMeasures(\n folder_gold=\"data/dev/\", folder_pred=\"data_pred/dev/\", remove_anno=\"\"\n):\n\n flist_gold = os.listdir(folder_gold)\n res_all_gold = []\n res_all_pred = []\n targets = []\n\n if type(remove_anno) == str:\n remove_anno = [remove_anno]\n if \"types\" in remove_anno:\n remove_anno.append(\"rel\")\n\n for f in flist_gold:\n # ignoring non-.ann files, should there be any\n if not str(f).endswith(\".ann\"):\n continue\n f_gold = open(os.path.join(folder_gold, f), \"r\")\n try:\n f_pred = open(os.path.join(folder_pred, f), \"r\")\n res_full_pred, res_pred, spans_pred, rels_pred = normaliseAnnotations(\n f_pred, remove_anno\n )\n except IOError:\n print(\n f\n + \" file missing in \"\n + folder_pred\n + \". Assuming no predictions are available for this file.\"\n )\n res_full_pred, res_pred, spans_pred, rels_pred = [], [], [], []\n\n res_full_gold, res_gold, spans_gold, rels_gold = normaliseAnnotations(\n f_gold, remove_anno\n )\n\n spans_all = set(spans_gold + spans_pred)\n\n for i, r in enumerate(spans_all):\n if r in spans_gold:\n target = res_gold[spans_gold.index(r)].split(\" \")[0]\n res_all_gold.append(target)\n if not target in targets:\n targets.append(target)\n else:\n # those are the false positives, contained in pred but not gold\n res_all_gold.append(\"NONE\")\n\n if r in spans_pred:\n target_pred = res_pred[spans_pred.index(r)].split(\" \")[0]\n res_all_pred.append(target_pred)\n else:\n # those are the false negatives, contained in gold but not pred\n res_all_pred.append(\"NONE\")\n\n if \"keys\" in remove_anno:\n targets = [\"Hyponym-of\", \"Synonym-of\"]\n # y_true, y_pred, labels, targets\n prec, recall, f1, support = precision_recall_fscore_support(\n res_all_gold, res_all_pred, labels=targets, average=None\n )\n # unpack the precision, recall, f1 and support\n metrics = {}\n for k, target in enumerate(targets):\n metrics[target] = {\n \"precision\": prec[k],\n \"recall\": recall[k],\n \"f1-score\": f1[k],\n \"support\": support[k],\n }\n\n # now micro-averaged\n if not \"types\" in remove_anno:\n prec, recall, f1, s = precision_recall_fscore_support(\n res_all_gold, res_all_pred, labels=targets, average=\"micro\"\n )\n metrics[\"overall\"] = {\n \"precision\": prec,\n \"recall\": recall,\n \"f1-score\": f1,\n \"support\": sum(support),\n }\n else:\n # just binary classification, nothing to average\n metrics[\"overall\"] = metrics[\"KEYPHRASE-NOTYPES\"]\n\n print_report(metrics, targets)\n return metrics",
"def compute_metrics(self, results: list) -> dict:",
"def compute_metrics(self, results: list) -> Dict[str, float]:\n logger: MMLogger = MMLogger.get_current_instance()\n\n # pred_coords: [N, K, D]\n pred_coords = np.concatenate(\n [result['pred_coords'] for result in results])\n if pred_coords.ndim == 4 and pred_coords.shape[1] == 1:\n pred_coords = np.squeeze(pred_coords, axis=1)\n # gt_coords: [N, K, D]\n gt_coords = np.stack([result['gt_coords'] for result in results])\n # mask: [N, K]\n mask = np.concatenate([result['mask'] for result in results])\n # action_category_indices: Dict[List[int]]\n action_category_indices = defaultdict(list)\n for idx, result in enumerate(results):\n action_category = result['action'].split('_')[0]\n action_category_indices[action_category].append(idx)\n\n error_name = self.mode.upper()\n\n logger.info(f'Evaluating {self.mode.upper()}...')\n metrics = dict()\n\n metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask,\n self.ALIGNMENT[self.mode])\n\n for action_category, indices in action_category_indices.items():\n metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe(\n pred_coords[indices], gt_coords[indices], mask[indices])\n\n return metrics",
"def evaluate(pred_file, ref_file):\n ref_dict, pred_dict, query_dict, id_dict = build_pred_ref_dict(ref_file, pred_file, ref_file)\n total, acc, scores = res_eval_with_type_acc(query_dict, pred_dict, ref_dict, id_dict, save=False)\n em = calculate_exact_match(pred_dict, ref_dict)\n print('Comp Acc: {:.3f}%\\tBleu-4: {:.3f}\\tRouge-L: {:.3f}'.format(acc, scores['Bleu-4'], scores['Rouge-L']))\n print('EM: {:.3f}%'.format(em))\n # calculate_sketch_type_acc(ref_file, pred_file)\n # calculate_exact_match_for_each_q_type(ref_file, pred_file)\n return total, acc, scores, em",
"def compute(self, result_file_dict):\r\n for part in self.parts:\r\n #=====================Need to change, temporal=========================\r\n if part == 'train':\r\n continue # because the train not have the label\r\n #=======================================================================\r\n gt = self.gt_dict[part]\r\n result_file = result_file_dict[part]\r\n # import ipdb; ipdb.set_trace()\r\n for key, item in result_file.items():\r\n self._result_name = item\r\n # score_records, num_videos = self.load_results(result_file)\r\n score_records, num_videos = self.load_results(item)\r\n logger.info(f'Compute Metric of {item}')\r\n assert num_videos == len(gt), f'the number of saved videos does not match the ground truth, {num_videos} != {len(gt)}'\r\n temp_result = self.eval_method(score_records, gt, str(key))\r\n if temp_result > self.optimal_resulst:\r\n self.optimal_resulst = temp_result\r\n \r\n return self.optimal_resulst",
"def evaluate(gt_file, re_file, logger=None):\n gts = json.load(open(gt_file, 'r'))\n scorers = [\n (Bleu(4), [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\"]),\n (Meteor(), \"METEOR\"),\n (Rouge(), \"ROUGE_L\"),\n (Cider(), \"CIDEr\")\n ]\n metrics = []\n res = json.load(open(re_file, 'r'))\n res = {c['image_id']: [c['caption']] for c in res}\n gts = {k: v for k, v in zip(gts['image_ids'], gts['captions']) if k in res}\n for scorer, method in scorers:\n if logger is not None:\n logger.info('computing %s score...' % (scorer.method()))\n score, scores = scorer.compute_score(gts, res)\n if type(method) == list:\n for sc, scs, m in zip(score, scores, method):\n if logger is not None:\n logger.info(\"%s: %0.3f\" % (m, sc))\n metrics.extend(score)\n else:\n if logger is not None:\n logger.info(\"%s: %0.3f\" % (method, score))\n metrics.append(score)\n return metrics",
"def get_accuracy(path_targets, path_pred, key_to_infer, path_accuracy):\n list_targets = []\n list_preds = []\n with open(path_targets, 'r') as f:\n for line in f.readlines():\n list_targets.append(line[:-1])\n with open(path_pred, 'r') as f:\n for line in f.readlines():\n list_preds.append(line[:-1])\n \n len_list = len(list_targets)\n \n if key_to_infer[1] == str or key_to_infer[1] == float:\n correct = 0\n incorrect = 0\n for idx in range(len_list):\n if list_targets[idx] == list_preds[idx]:\n correct +=1\n else:\n incorrect += 1\n with open(path_accuracy, 'w') as f:\n f.write(\"Test Accuracy:\"+'\\n')\n f.wirte(\"Total tests: {}\".format(len_list)+'\\n')\n f.write(\"Correct predictions: {}\".format(str(correct))+'\\n')\n f.write(\"Incorrect predictions: {}\".format(str(incorrect))+'\\n')\n f.write(\"Accuracy: {}\".format(str(correct/len_list*100)+'%'))\n \n else:\n correct = 0\n incorrect = 0\n for idx in range(len_list):\n if int(list_targets[idx]) == int(round(float(list_preds[idx]))):\n correct +=1\n else:\n incorrect += 1\n with open(path_accuracy, 'w') as f:\n f.write(\"Test Accuracy:\"+'\\n')\n f.write(\"Total tests: {}\".format(len_list)+'\\n')\n f.write(\"Correct predictions: {}\".format(str(correct))+'\\n')\n f.write(\"Incorrect predictions: {}\".format(str(incorrect))+'\\n')\n f.write(\"Accuracy: {}\".format(str(correct/float(len_list)*100)+'%'))",
"def evaluate_predictions_from_jsons( # TODO: Change name to end2end_evaluate ?\n ground_truth_tree_jsons: List[Dict],\n prediction_tree_jsons: List[Dict],\n node_types: List[str] = None,\n) -> Dict[str, float]:\n node_types = node_types or [\"ml\", \"module\", \"model\"]\n\n ground_truth_trees = TreeNode.read_from_jsons(ground_truth_tree_jsons, [])\n predictions_trees = TreeNode.read_from_jsons(prediction_tree_jsons, [])\n\n node_type_to_percentage_errors = {}\n for node_type in node_types:\n assert node_type in (\"model\", \"module\", \"ml\")\n\n id_to_gold_energy = {}\n for tree in ground_truth_trees:\n for attribute_object in tree.get_subtree_nodes_attributes(\n [node_type], [\"id\", \"gold_energy\"]\n ):\n id_to_gold_energy[attribute_object[\"id\"]] = attribute_object[\n \"gold_energy\"\n ]\n\n id_to_predicted_energy = {}\n for tree in predictions_trees:\n for attribute_object in tree.get_subtree_nodes_attributes(\n [node_type], [\"id\", \"predicted_energy\"]\n ):\n id_to_predicted_energy[attribute_object[\"id\"]] = attribute_object[\n \"predicted_energy\"\n ]\n\n expected_ids = id_to_gold_energy.keys()\n gold_energies = [id_to_gold_energy[id_] for id_ in expected_ids]\n predicted_energies = []\n for id_ in expected_ids:\n predicted_energy = id_to_predicted_energy.get(id_, None)\n\n if not predicted_energy:\n print(\n f\"WARNING: No predicted energy found for node-id {id_}. Force setting 0.\"\n )\n predicted_energy = 0\n\n predicted_energies.append(predicted_energy)\n\n percentage_error = get_percentage_error_list(gold_energies, predicted_energies)\n node_type_to_percentage_errors[node_type] = round(percentage_error, 2)\n\n return node_type_to_percentage_errors",
"def compute_metrics(self, outputs: List[Dict[str, torch.Tensor]]) -> dict:\n distance_pos, distance_neg = [], []\n for minibatch in outputs:\n minibatch = minibatch[\"val_prediction\"]\n src_embedding = minibatch[\"src_sentemb\"]\n ref_embedding = minibatch[\"ref_sentemb\"]\n pos_embedding = minibatch[\"pos_sentemb\"]\n neg_embedding = minibatch[\"neg_sentemb\"]\n\n distance_src_pos = F.pairwise_distance(pos_embedding, src_embedding)\n distance_ref_pos = F.pairwise_distance(pos_embedding, ref_embedding)\n harmonic_distance_pos = (2 * distance_src_pos * distance_ref_pos) / (\n distance_src_pos + distance_ref_pos\n )\n distance_pos.append(harmonic_distance_pos)\n\n distance_src_neg = F.pairwise_distance(neg_embedding, src_embedding)\n distance_ref_neg = F.pairwise_distance(neg_embedding, ref_embedding)\n harmonic_distance_neg = (2 * distance_src_neg * distance_ref_neg) / (\n distance_src_neg + distance_ref_neg\n )\n distance_neg.append(harmonic_distance_neg)\n\n return {\n \"kendall\": self.metrics.compute(\n torch.cat(distance_pos), torch.cat(distance_neg)\n )\n }",
"def study_targets(root_folder=None):\n # Read targets file\n if root_folder is None:\n root_folder = Path.cwd()\n files = glob.glob(str(root_folder / '*TARGETS.csv'))\n assert len(files) == 1, ('Root data folder must contain one targets '\n 'csv file.')\n targets = pd.read_csv(root_folder / files[0])\n\n # Cleanup and remove invalid targets\n path_scores = []\n path_scores.append(pd.to_numeric(targets['Corwyn Score']\n .replace('W', np.nan)))\n path_scores.append(pd.to_numeric(targets['Cathy Score']\n .replace('W', np.nan)))\n valid = ~(path_scores[0].isna() & path_scores[1].isna())\n has_both = ~(path_scores[0].isna() | path_scores[1].isna())\n assert not np.any(has_both), \"Implement averaging of pathological scores\"\n\n # Use whichever pathologist score is available without averaging\n path_score = pd.concat([path_scores[0][valid], path_scores[1][valid]],\n axis=1).max(axis=1)\n path_score.name = 'Trauma Score' # 0, 1, 2, 3 categorical\n\n # Get pathologist names\n paths = pd.Series(index=path_scores[0].index,\n name='Pathologist')\n for i in range(len(path_scores)):\n scores = path_scores[i]\n name = scores.name.split()[0]\n scores = ~scores.isna()\n paths[scores] = name\n paths = paths[valid]\n\n id_labels = ['Patient Code', 'Protocol', 'Tissue', 'Load (g)']\n abs_deltas = targets.loc[valid, 'Absolute Delta (um)']\n deltas = targets.loc[valid, 'Percent Delta']\n pscores = targets.loc[valid, 'P Score']\n ids = targets.loc[valid, id_labels]\n for label in id_labels:\n if label == 'Load (g)':\n func = int\n else:\n func = str.upper\n ids[label] = ids[label].apply(func)\n targets = pd.concat([ids,\n abs_deltas,\n deltas,\n pscores,\n paths,\n path_score], axis=1)\n\n return targets",
"def run_evaluation(forecast_probabilities, observed_labels, output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # TODO(thunderhoser): Make binarization threshold an input argument to this\n # method.\n (binarization_threshold, best_csi\n ) = model_eval.find_best_binarization_threshold(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n threshold_arg=model_eval.THRESHOLD_ARG_FOR_UNIQUE_FORECASTS,\n criterion_function=model_eval.get_csi,\n optimization_direction=model_eval.MAX_OPTIMIZATION_DIRECTION,\n unique_forecast_precision=FORECAST_PRECISION_FOR_THRESHOLDS)\n\n print (\n 'Best binarization threshold = {0:.4f} ... corresponding CSI = {1:.4f}'\n ).format(binarization_threshold, best_csi)\n\n print 'Binarizing forecast probabilities...'\n forecast_labels = model_eval.binarize_forecast_probs(\n forecast_probabilities=forecast_probabilities,\n binarization_threshold=binarization_threshold)\n\n print 'Creating contingency table...'\n contingency_table_as_dict = model_eval.get_contingency_table(\n forecast_labels=forecast_labels, observed_labels=observed_labels)\n print '{0:s}\\n'.format(str(contingency_table_as_dict))\n\n print 'Computing performance metrics...'\n pod = model_eval.get_pod(contingency_table_as_dict)\n pofd = model_eval.get_pofd(contingency_table_as_dict)\n success_ratio = model_eval.get_success_ratio(contingency_table_as_dict)\n focn = model_eval.get_focn(contingency_table_as_dict)\n accuracy = model_eval.get_accuracy(contingency_table_as_dict)\n csi = model_eval.get_csi(contingency_table_as_dict)\n frequency_bias = model_eval.get_frequency_bias(contingency_table_as_dict)\n peirce_score = model_eval.get_peirce_score(contingency_table_as_dict)\n heidke_score = model_eval.get_heidke_score(contingency_table_as_dict)\n\n print (\n 'POD = {0:.4f} ... POFD = {1:.4f} ... success ratio = {2:.4f} ... '\n 'FOCN = {3:.4f} ... accuracy = {4:.4f} ... CSI = {5:.4f} ... frequency '\n 'bias = {6:.4f} ... Peirce score = {7:.4f} ... Heidke score = {8:.4f}\\n'\n ).format(pod, pofd, success_ratio, focn, accuracy, csi, frequency_bias,\n peirce_score, heidke_score)\n\n auc, scikit_learn_auc = _create_roc_curve(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n bss_dict = _create_attributes_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n aupd = _create_performance_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n evaluation_file_name = '{0:s}/model_evaluation.p'.format(output_dir_name)\n print 'Writing results to: \"{0:s}\"...'.format(evaluation_file_name)\n model_eval.write_results(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n binarization_threshold=binarization_threshold, pod=pod, pofd=pofd,\n success_ratio=success_ratio, focn=focn, accuracy=accuracy, csi=csi,\n frequency_bias=frequency_bias, peirce_score=peirce_score,\n heidke_score=heidke_score, auc=auc, scikit_learn_auc=scikit_learn_auc,\n aupd=aupd, bss_dict=bss_dict, pickle_file_name=evaluation_file_name)",
"def eval_detection(gt_mask, r):\n ids = np.unique(r['class_ids'])\n merged_masks = {}\n qualities = dict([(1,0), (2,0), (3,0)])\n\n for i in ids:\n merged_masks[i] = (np.zeros_like(r['masks'][:,:,0]) > 0)\n n_masks = r['masks'].shape[-1]\n for c in range(n_masks):\n if i != r['class_ids'][c] or r['scores'][c] < .7:\n continue\n\n mask = r['masks'][:,:,c]\n merged_masks[i] |= mask\n\n gt_mask_i = (gt_mask[:,:,i-1] > 0)\n intersection = gt_mask_i & merged_masks[i]\n union = gt_mask_i | merged_masks[i]\n qualities[i] = np.sum(intersection) / np.sum(union)\n \n return qualities, merged_masks",
"def _run(prediction_file_name, num_bootstrap_reps, output_dir_name):\n\n file_metadata_dict = prediction_io.file_name_to_metadata(\n prediction_file_name\n )\n output_file_name = evaluation.find_file(\n directory_name=output_dir_name,\n zenith_angle_bin=file_metadata_dict[prediction_io.ZENITH_ANGLE_BIN_KEY],\n albedo_bin=file_metadata_dict[prediction_io.ALBEDO_BIN_KEY],\n month=file_metadata_dict[prediction_io.MONTH_KEY],\n grid_row=file_metadata_dict[prediction_io.GRID_ROW_KEY],\n grid_column=file_metadata_dict[prediction_io.GRID_COLUMN_KEY],\n raise_error_if_missing=False\n )\n\n result_table_xarray = evaluation.get_scores_all_variables(\n prediction_file_name=prediction_file_name,\n num_bootstrap_reps=num_bootstrap_reps\n )\n print(SEPARATOR_STRING)\n\n t = result_table_xarray\n scalar_target_names = t.coords[evaluation.SCALAR_FIELD_DIM].values\n\n for k in range(len(scalar_target_names)):\n print((\n 'Variable = \"{0:s}\" ... stdev of target and predicted values = '\n '{1:f}, {2:f} ... MSE and skill score = {3:f}, {4:f} ... '\n 'MAE and skill score = {5:f}, {6:f} ... bias = {7:f} ... '\n 'correlation = {8:f} ... KGE = {9:f}'\n ).format(\n scalar_target_names[k],\n numpy.nanmean(t[evaluation.SCALAR_TARGET_STDEV_KEY].values[k, :]),\n numpy.nanmean(\n t[evaluation.SCALAR_PREDICTION_STDEV_KEY].values[k, :]\n ),\n numpy.nanmean(t[evaluation.SCALAR_MSE_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_MSE_SKILL_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_MAE_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_MAE_SKILL_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_BIAS_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_CORRELATION_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.SCALAR_KGE_KEY].values[k, :])\n ))\n\n print(SEPARATOR_STRING)\n\n vector_target_names = t.coords[evaluation.VECTOR_FIELD_DIM].values\n heights_m_agl = t.coords[evaluation.HEIGHT_DIM].values\n\n for k in range(len(vector_target_names)):\n print('Variable = \"{0:s}\" ... PRMSE = {1:f}'.format(\n vector_target_names[k],\n numpy.nanmean(t[evaluation.VECTOR_PRMSE_KEY].values[k, :])\n ))\n\n print(SEPARATOR_STRING)\n\n for k in range(len(vector_target_names)):\n for j in range(len(heights_m_agl)):\n print((\n 'Variable = \"{0:s}\" at {1:d} m AGL ... '\n 'stdev of target and predicted values = {2:f}, {3:f} ... '\n 'MSE and skill score = {4:f}, {5:f} ... '\n 'MAE and skill score = {6:f}, {7:f} ... bias = {8:f} ... '\n 'correlation = {9:f} ... KGE = {10:f}'\n ).format(\n vector_target_names[k], int(numpy.round(heights_m_agl[j])),\n numpy.nanmean(\n t[evaluation.VECTOR_TARGET_STDEV_KEY].values[j, k, :]\n ),\n numpy.nanmean(\n t[evaluation.VECTOR_PREDICTION_STDEV_KEY].values[j, k, :]\n ),\n numpy.nanmean(t[evaluation.VECTOR_MSE_KEY].values[j, k, :]),\n numpy.nanmean(\n t[evaluation.VECTOR_MSE_SKILL_KEY].values[j, k, :]\n ),\n numpy.nanmean(t[evaluation.VECTOR_MAE_KEY].values[j, k, :]),\n numpy.nanmean(\n t[evaluation.VECTOR_MAE_SKILL_KEY].values[j, k, :]\n ),\n numpy.nanmean(t[evaluation.VECTOR_BIAS_KEY].values[j, k, :]),\n numpy.nanmean(\n t[evaluation.VECTOR_CORRELATION_KEY].values[j, k, :]\n ),\n numpy.nanmean(t[evaluation.VECTOR_KGE_KEY].values[j, k, :])\n ))\n\n print(SEPARATOR_STRING)\n\n try:\n aux_target_field_names = (\n t.coords[evaluation.AUX_TARGET_FIELD_DIM].values\n )\n aux_predicted_field_names = (\n t.coords[evaluation.AUX_PREDICTED_FIELD_DIM].values\n )\n except:\n aux_target_field_names = []\n aux_predicted_field_names = []\n\n for k in range(len(aux_target_field_names)):\n print((\n 'Target variable = \"{0:s}\" ... predicted variable = \"{1:s}\" ... '\n 'stdev of target and predicted values = {2:f}, {3:f} ... '\n 'MSE and skill score = {4:f}, {5:f} ... '\n 'MAE and skill score = {6:f}, {7:f} ... bias = {8:f} ... '\n 'correlation = {9:f} ... KGE = {10:f}'\n ).format(\n aux_target_field_names[k], aux_predicted_field_names[k],\n numpy.nanmean(t[evaluation.AUX_TARGET_STDEV_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_PREDICTION_STDEV_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_MSE_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_MSE_SKILL_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_MAE_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_MAE_SKILL_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_BIAS_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_CORRELATION_KEY].values[k, :]),\n numpy.nanmean(t[evaluation.AUX_KGE_KEY].values[k, :])\n ))\n\n print(SEPARATOR_STRING)\n\n print('Writing results to: \"{0:s}\"...'.format(output_file_name))\n evaluation.write_file(\n result_table_xarray=result_table_xarray,\n netcdf_file_name=output_file_name\n )",
"def error_analysis(predictions, gold, result_collector):\n # scores = defaultdict(list)\n for iteration_id, texts in predictions.items():\n # map iteration id to fold\n fold = str(int(iteration_id) / 5)\n for tid, pred_tree in texts.items():\n gold_tree = gold[tid]\n print(iteration_id, fold, tid)\n print(gold_tree.get_triples())\n print(pred_tree.get_triples())\n for level, scores in eval_prediction([gold_tree], [pred_tree]):\n result_collector.add_result(tid, fold, level, scores)\n print(\"Done.\")",
"def eval_metrics_for_seqtags(self, predicted_answers):\n total_correct_in_all = 0\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids, include padding ids.\n correct_label_indices = sample['correct_seq_labels']\n # use extend to add all the labels in the seq, include the head padding and tail padding\n label_true.extend(correct_label_indices)\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n label_pred.extend(sample['pred_seq_tags'])\n\n if total_correct_in_all != 0:\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes,\n title='Confusion matrix when seq labeling, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, normalize=True,\n title='Normalized confusion matrix when seq labeling')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics",
"def evaluate(self) -> Dict[str, Any]:\n kwargs = {\"ids\": self._ids}\n return {\n metric.value: self._metric_funcs[metric](\n self._targets, self._preds, **kwargs\n )\n for metric in self._metrics\n }",
"def get_eval_metric_ops(targets, predictions, tensors):\n # TODO(seominjoon): yp should also consider no answer case.\n yp1 = tf.expand_dims(predictions['yp1'], -1)\n yp2 = tf.expand_dims(predictions['yp2'], -1)\n answer_mask = tf.sequence_mask(targets['num_answers'])\n start_correct = tf.reduce_any(\n tf.equal(targets['word_answer_starts'], yp1) & answer_mask, 1)\n end_correct = tf.reduce_any(\n tf.equal(targets['word_answer_ends'], yp2) & answer_mask, 1)\n correct = start_correct & end_correct\n em = tf.py_func(\n _enum_fn(_exact_match_score, dtype='float32'), [\n predictions['a'], targets['answers'], predictions['has_answer'],\n answer_mask\n ], 'float32')\n f1 = tf.py_func(\n _enum_fn(_f1_score, dtype='float32'), [\n predictions['a'], targets['answers'], predictions['has_answer'],\n answer_mask\n ], 'float32')\n\n eval_metric_ops = {\n 'acc1': tf.metrics.mean(tf.cast(start_correct, 'float')),\n 'acc2': tf.metrics.mean(tf.cast(end_correct, 'float')),\n 'acc': tf.metrics.mean(tf.cast(correct, 'float')),\n 'em': tf.metrics.mean(em),\n 'f1': tf.metrics.mean(f1),\n }\n\n for key in tensors:\n if key.startswith('skim_rate_'):\n skim_rate = tf.py_func(\n _enum_fn(\n lambda x: x,\n dtype='float32'), [tensors[key]], 'float32')\n eval_metric_ops[key] = tf.metrics.mean(skim_rate)\n\n return eval_metric_ops",
"def compute_score(predictions, references, vocab=None, beam_size=4):\n assert np.rank(predictions) == 3\n assert predictions.shape[0] == references.shape[0]\n batch_size = predictions.shape[0]\n predictions = tf.make_ndarray(tf.make_tensor_proto(predictions)).tolist()\n references = tf.make_ndarray(tf.make_tensor_proto(references)).tolist()\n hypotheses_list = []\n references_list = []\n\n for index in range(batch_size):\n # All predictions in beam search are used for validation scores.\n for b_index in range(beam_size):\n h = predictions[index][b_index]\n try:\n eos_index = h.index(input_utils.EOS)\n except ValueError:\n eos_index = len(h)\n hypotheses_list.append(h[:eos_index])\n\n ref = references[index][0].decode().split('|')\n ref_list = [r.strip().split(' ') for r in ref if r.strip()]\n references_list.append(ref_list)\n\n all_scores = collections.defaultdict(list)\n for hypothesis, references in zip(hypotheses_list, references_list):\n if vocab is not None and len(vocab):\n # Skip PADDING, UNK, EOS, START (0-3).\n hypothesis = [\n vocab[word_id].numpy().decode()\n for word_id in hypothesis\n if word_id > 3\n ]\n\n h_str = ' '.join(str(e) for e in hypothesis)\n r_str = [' '.join(str(e) for e in ref) for ref in references]\n\n print('hypothesis: ', str(h_str))\n print('references: ', str(r_str))\n print('\\n')\n scores = screen2words_eval.coco_evaluate(r_str, h_str)\n for key, score in scores.items():\n all_scores[key].append(score)\n score_names = [\n 'BLEU-1', 'BLEU-2', 'BLEU-3', 'BLEU-4', 'ROUGE-1-f1-mean',\n 'ROUGE-1-f1-min', 'ROUGE-1-f1-max', 'ROUGE-2-f1-mean', 'ROUGE-2-f1-min',\n 'ROUGE-2-f1-max', 'ROUGE-L-f1-mean', 'ROUGE-L-f1-min', 'ROUGE-L-f1-max'\n ]\n\n return [np.array(all_scores[name], dtype=np.float32) for name in score_names]"
] | [
"0.622663",
"0.6073436",
"0.58134985",
"0.5804928",
"0.5719069",
"0.5711731",
"0.5710975",
"0.568237",
"0.5680572",
"0.5669467",
"0.5663991",
"0.5612322",
"0.5609203",
"0.5606721",
"0.55780536",
"0.5572466",
"0.55717826",
"0.55348396",
"0.54948217",
"0.548214",
"0.5472467",
"0.547049",
"0.5464292",
"0.5462596",
"0.5460109",
"0.5458652",
"0.54495853",
"0.5436114",
"0.5431877",
"0.54314184"
] | 0.62762123 | 0 |
Assumption is that bins which when sorted lie outside top_bottom_bin_range are outliers and the relevant 'robust' minimum and maximum are percentiles within the top_bottom_bin_range This is a bit lie the scikitlearn robust scaler idea; though not exactly | def find_robust_min_max(x, pct_thresh=0.05, top_bottom_bin_range=2000000):
y = x[x > 0]
idxs = np.argsort(y)
abs_max = y[idxs[-1]]
abs_min = y[idxs[0]]
robust_max = y[idxs[-int(pct_thresh * top_bottom_bin_range)]]
robust_min = y[idxs[int(pct_thresh * top_bottom_bin_range)]]
log.info('Array length original, non-zero: {}, {}'.format(len(x), len(y)))
log.info('Absolute min, max: {}, {}'.format(abs_min, abs_max))
log.info('Robust min, max: {}, {}'.format(robust_min, robust_max))
return robust_min, robust_max | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def binning(data, low, high):\n if len(data) == 0: return 1\n\n mask1 = (data >= low)\n mask2 = (data < high)\n mask3 = numpy.logical_and(mask1, mask2)\n data = data[mask3]\n\n if len(data) == 0: return 10\n\n data.sort()\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n binwidth = 2. * (q3 - q1) / len(data)**(1./3.)\n if binwidth > 0.:\n return max(10, int(math.ceil((high - low)/binwidth)))\n else:\n return 10",
"def _get_optimal_threshold(arr, num_bins=1001, num_quantized_bins=255):\n if not isinstance(arr, np.ndarray):\n raise TypeError('get_optimal_threshold only supports input type of np.ndarray,'\n ' while received type=%s' % (str(type(arr))))\n min_val = np.min(arr)\n max_val = np.max(arr)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))\n zero_bin_idx = num_bins // 2\n num_half_quantized_bins = num_quantized_bins // 2\n assert np.allclose(hist_edges[zero_bin_idx] + hist_edges[zero_bin_idx + 1],\n 0, rtol=1e-5, atol=1e-7)\n\n thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)\n divergence = np.zeros_like(thresholds)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)\n # i means the number of bins on half axis excluding the zero bin.\n for i in range(num_quantized_bins // 2,\n num_bins // 2 + 1):\n p_bin_idx_start = zero_bin_idx - i\n p_bin_idx_stop = zero_bin_idx + i + 1\n thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n\n # generate reference distribution p\n p = sliced_nd_hist.copy()\n assert p.size % 2 == 1\n assert p.size >= num_quantized_bins\n # put left outlier count in p[0]\n left_outlier_count = np.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n # put right outlier count in p[-1]\n right_outlier_count = np.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n # is_nonzeros[k] indicates whether hist[k] is nonzero\n is_nonzeros = (sliced_nd_hist != 0).astype(np.int32)\n\n # calculate how many bins should be merged to generate quantized distribution q\n num_merged_bins = p.size // num_quantized_bins\n # merge hist into num_quantized_bins bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n # expand quantized_bins into p.size bins\n q = np.zeros(p.size, dtype=np.float32)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = float(quantized_bins[j]) / float(norm)\n q[sliced_nd_hist == 0] = 0\n p = _smooth_distribution(p)\n # There is a chance that q is an invalid probability distribution.\n try:\n q = _smooth_distribution(q)\n except ValueError:\n divergence[i - num_half_quantized_bins] = float(\"inf\")\n else:\n divergence[i - num_half_quantized_bins] = stats.entropy(p, q)\n quantized_bins[:] = 0\n\n min_divergence_idx = np.argmin(divergence)\n min_divergence = divergence[min_divergence_idx]\n opt_th = thresholds[min_divergence_idx]\n return min_val, max_val, min_divergence, opt_th",
"def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1",
"def min_max_normalization(\n data: np.ndarray, top: int = 255, floor: bool = True\n) -> np.ndarray:\n # Converting dtype can prevent Overflow Error\n data = data.astype(float)\n\n lmin = data.min()\n lmax = data.max()\n if floor:\n return np.floor((data - lmin) / (lmax - lmin) * top)\n else:\n return (data - lmin) / (lmax - lmin)",
"def autobin_stats(x,y,n_bins=8,stat='average',n_points=None):\n \n if not ascend(x):\n ix=argsort(x)\n x=take(x,ix)\n y=take(y,ix)\n n=len(x)\n if n_points==None: \n #This throws out some points\n n_points=n/n_bins\n else: \n n_bins=n/n_points\n #if there are more that 2 points in the last bin, add another bin\n if n%n_points>2: n_bins=n_bins+1\n \n if n_points<=1:\n print('Only 1 or less points per bin, output will be sorted input vector with rms==y')\n return x,y\n xb,yb=[],[]\n \n #print 'stat', stat\n if stat=='average' or stat=='mean': func=mean\n elif stat=='median': func=median\n elif stat=='rms' or stat=='std' : func=std\n elif stat=='std_robust' or stat=='rms_robust': func=std_robust\n elif stat=='mean_robust': func=mean_robust\n elif stat=='median_robust': func=median_robust\n elif stat=='p2p': func=p2p # --DC\n elif stat=='min': func=min # --DC\n elif stat=='max': func=max # --DC\n \n for i in range(n_bins):\n xb.append(mean(x[i*n_points:(i+1)*n_points]))\n if func==std and n_points==2:\n print('n_points==2; too few points to determine rms')\n print('Returning abs(y1-y2)/2. in each bin as rms')\n yb.append(abs(y[i*n_points]-y[i*n_points+1])/2.)\n else:\n yb.append(func(y[i*n_points:(i+1)*n_points]))\n if i>2 and xb[-1]==xb[-2]: \n yb[-2]=(yb[-2]+yb[-1])/2.\n xb=xb[:-1]\n yb=yb[:-1]\n return array(xb),array(yb)",
"def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data",
"def get_bins(val: List[float]) -> List[float]:\n r_min = np.min(val)\n r_max = np.max(val)\n min_bins = 2\n max_bins = 50\n # Calculate bin width using either Freedman-Diaconis or Sturges estimator\n bin_edges = np.histogram_bin_edges(val, bins=\"auto\")\n if len(bin_edges) < min_bins:\n return list(np.linspace(start=r_min, stop=r_max, num=min_bins))\n elif len(bin_edges) <= max_bins:\n return list(bin_edges)\n # Clamp to max_bins by estimating a good bin range to be more robust to outliers\n q75, q25 = np.percentile(val, [75, 25])\n iqr = q75 - q25\n width = 2 * iqr / max_bins\n start = max((q75 + q25) / 2 - iqr, r_min)\n stop = min(start + max_bins * width, r_max)\n # Take the minimum of range and 2x IQR to account for outliers\n edges = list(np.linspace(start=start, stop=stop, num=max_bins))\n prefix = [r_min] if start > r_min else []\n suffix = [r_max] if stop < r_max else []\n return prefix + edges + suffix",
"def outlier_thresholds(dataframe, col_name, low_quantile, up_quantile):\n quartile1 = dataframe[col_name].quantile(low_quantile)\n quartile3 = dataframe[col_name].quantile(up_quantile)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit",
"def bin_data(y, num_bins, std_away):\n mean = np.mean(y)\n std = np.std(y)\n pitch_shifts = np.arange(-num_bins, num_bins + 1)\n thresholds = (std * std_away) * pitch_shifts + mean\n\n result = []\n for point in y:\n if point < thresholds[0]:\n result.append(pitch_shifts[0] - 1)\n elif point > thresholds[-1]:\n result.append(pitch_shifts[-1] + 1)\n else:\n for i in range(len(thresholds) - 1):\n if point >= thresholds[i] and point < thresholds[i + 1]:\n result.append(i - num_bins)\n return np.array(result)",
"def window_standardize(img, lower_bound, upper_bound):\n img = np.clip(img, lower_bound, upper_bound)\n # x=x*2-1: map x to [-1,1]\n img = 2 * (img - lower_bound) / (upper_bound - lower_bound) - 1\n return img",
"def get_weighted_bins(data_df, x_key = 'ecc', y_key = 'size', weight_key = 'rsq', n_bins = 10):\n \n # sort values by eccentricity\n data_df = data_df.sort_values(by=[x_key])\n\n #divide in equally sized bins\n bin_size = int(len(data_df)/n_bins) \n \n mean_x = []\n mean_x_std = []\n mean_y = []\n mean_y_std = []\n \n # for each bin calculate rsq-weighted means and errors of binned ecc/gain \n for j in range(n_bins): \n \n mean_x.append(weightstats.DescrStatsW(data_df[bin_size * j:bin_size * (j+1)][x_key],\n weights = data_df[bin_size * j:bin_size * (j+1)][weight_key]).mean)\n mean_x_std.append(weightstats.DescrStatsW(data_df[bin_size * j:bin_size * (j+1)][x_key],\n weights = data_df[bin_size * j:bin_size * (j+1)][weight_key]).std_mean)\n\n mean_y.append(weightstats.DescrStatsW(data_df[bin_size * j:bin_size * (j+1)][y_key],\n weights = data_df[bin_size * j:bin_size*(j+1)][weight_key]).mean)\n mean_y_std.append(weightstats.DescrStatsW(data_df[bin_size * j:bin_size * (j+1)][y_key],\n weights = data_df[bin_size * j:bin_size * (j+1)][weight_key]).std_mean)\n\n return mean_x, mean_x_std, mean_y, mean_y_std",
"def find_outlier_range(dfs: pd.Series, iqr_factor=1.5, clip_low=None, clip_high=None):\n quant1 = dfs.quantile(0.25)\n quant3 = dfs.quantile(0.75)\n IQR = quant3 - quant1\n low = quant1 - iqr_factor * IQR\n high = quant3 + iqr_factor * IQR\n\n if clip_low is not None:\n low = max(clip_low, low)\n if clip_high is not None:\n high = min(clip_high, high)\n return (low, high)",
"def remove_outliers(lst):\n slst = sorted(lst)\n three_iqr = 3 * get_IQR(lst)\n low_boundary = float(np.percentile(lst, 25)) - three_iqr\n high_boundary = float(np.percentile(lst, 75)) + three_iqr\n\n return filter(lambda x: x >= low_boundary and x <= high_boundary, slst)",
"def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1",
"def test_range_argument_ignored(self):\n bins_range = (1, 2)\n\n bin_edges, hist, _, _ = hist_w_unc(\n self.input,\n bins=self.bin_edges,\n bins_range=bins_range,\n normed=False,\n )\n\n # check if we end up with the same bin edges anyway\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist, hist)",
"def get_continuum_in_range(w,s,low_low, low_high, high_low, high_high,\n pmin=12,pmax=88, only_correct_negative_values = False,\n fit_degree=2, plot = True, verbose = True, warnings=True) :\n s_low = s[np.where((w <= low_low))] \n s_high = s[np.where((w >= high_high))] \n \n w_fit = w[np.where((w > low_low) & (w < high_high))]\n w_fit_low = w[np.where((w > low_low) & (w < low_high))]\n w_fit_high = w[np.where((w > high_low) & (w < high_high))]\n\n y_fit = s[np.where((w > low_low) & (w < high_high))]\n y_fit_low = s[np.where((w > low_low) & (w < low_high))]\n y_fit_high = s[np.where((w > high_low) & (w < high_high))]\n\n # Remove outliers\n median_y_fit_low = np.nanmedian(y_fit_low)\n for i in range(len(y_fit_low)):\n if np.nanpercentile(y_fit_low,2) > y_fit_low[i] or y_fit_low[i] > np.nanpercentile(y_fit_low,98): y_fit_low[i] =median_y_fit_low\n\n median_y_fit_high = np.nanmedian(y_fit_high)\n for i in range(len(y_fit_high)):\n if np.nanpercentile(y_fit_high,2) > y_fit_high[i] or y_fit_high[i] > np.nanpercentile(y_fit_high,98): y_fit_high[i] =median_y_fit_high\n \n w_fit_cont = np.concatenate((w_fit_low,w_fit_high))\n y_fit_cont = np.concatenate((y_fit_low,y_fit_high))\n \n try:\n fit = np.polyfit(w_fit_cont,y_fit_cont, fit_degree)\n yfit = np.poly1d(fit)\n y_fitted = yfit(w_fit)\n \n y_fitted_low = yfit(w_fit_low)\n median_low = np.nanmedian(y_fit_low-y_fitted_low)\n rms=[]\n for i in range(len(y_fit_low)):\n rms.append(y_fit_low[i]-y_fitted_low[i]-median_low)\n \n # rms=y_fit-y_fitted\n lowlimit=np.nanpercentile(rms,pmin)\n highlimit=np.nanpercentile(rms,pmax)\n \n corrected_s_ =copy.deepcopy(y_fit)\n for i in range(len(w_fit)):\n if w_fit[i] >= low_high and w_fit[i] <= high_low: # ONLY CORRECT in [low_high,high_low] \n if only_correct_negative_values:\n if y_fit[i] <= 0 : \n corrected_s_[i] = y_fitted[i]\n else:\n if y_fit[i]-y_fitted[i] <= lowlimit or y_fit[i]-y_fitted[i] >= highlimit: corrected_s_[i] = y_fitted[i]\n \n \n corrected_s = np.concatenate((s_low,corrected_s_))\n corrected_s = np.concatenate((corrected_s,s_high))\n \n \n if plot:\n ptitle = \"CorrectionBase in range \"+np.str(np.round(low_low,2))+\" - [ \"+np.str(np.round(low_high,2))+\" - \"+np.str(np.round(high_low,2))+\" ] - \"+np.str(np.round(high_high,2))\n plot_plot(w_fit,[y_fit,y_fitted,y_fitted-highlimit,y_fitted-lowlimit,corrected_s_], color=[\"r\",\"b\", \"black\",\"black\",\"green\"], alpha=[0.3,0.7,0.2,0.2,0.5],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high],ptitle=ptitle, ylabel=\"Normalized flux\") \n #plot_plot(w,[s,corrected_s],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high])\n except Exception:\n if warnings: print(\" Fitting the continuum failed! Nothing done.\")\n corrected_s = s\n\n return corrected_s",
"def all_bucket_boundaries(self):\n\n lower = self._lower_bounds[0]\n for i in xrange(1, self.total_buckets):\n upper = self._lower_bounds[i]\n yield (lower, upper)\n lower = upper\n\n yield (lower, float('Inf'))",
"def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))",
"def createDecile(resp:pd.Series,score:pd.Series,buckets:int=10) -> pd.DataFrame: \n \n input_df=pd.DataFrame({'target_1':resp,'score':score}) \n input_df['target_0'] = 1-input_df['target_1'] \n input_df['decile'] = pd.qcut(input_df['score'],buckets,duplicates='drop')\n binned_df = input_df.groupby('decile', as_index = False)\n \n aggregated_df = pd.DataFrame()\n aggregated_df['min_score'] = binned_df.min().score.apply('{0:.3f}'.format)\n aggregated_df['max_score'] = binned_df.max().score.apply('{0:.3f}'.format)\n aggregated_df['target_1'] = binned_df.sum().target_1\n aggregated_df['target_0'] = binned_df.sum().target_0\n aggregated_df['total'] = (aggregated_df['target_1'] + aggregated_df['target_0'])\n aggregated_df['target_1_ratio'] = (aggregated_df['target_1'] / aggregated_df['total']).apply('{0:.1%}'.format)\n aggregated_df['mean_score'] = binned_df.mean().score.apply('{0:.3f}'.format) \n \n sorted_df = (aggregated_df.sort_values(by = 'max_score', ascending = False)).reset_index(drop = True)\n sorted_df['gain'] = (sorted_df['target_1'].cumsum()/sorted_df['target_1'].sum()).apply('{0:.1%}'.format)\n sorted_df['lift'] = ((sorted_df['target_1']/sorted_df.total)/(sorted_df['target_1'].sum()/sorted_df.total.sum())).apply('{0:.2f}'.format)\n sorted_df['KS'] = np.round(((sorted_df['target_1'] / sorted_df['target_1'].sum()).cumsum() - (sorted_df['target_0'] / sorted_df['target_0'].sum()).cumsum()), 4) * 100\n \n mark = lambda x: '◄─ ' if x == sorted_df.KS.max() else ''\n sorted_df['max_KS'] = sorted_df.KS.apply(mark)\n sorted_df.index +=1\n \n return sorted_df",
"def bin_medscat(x, y, percentiles=[5,50,95], for_errorbar_plot=False, dropna=True, bins=\"auto\", **kwargs):\n if dropna: x, y = get_finite(x, y)\n h, xe = np.histogram(x, bins=bins, **kwargs)\n xout = (xe[1:]+xe[:-1])/2.\n indices = np.digitize(x, xe)\n yout = np.zeros((len(xe)-1,len(percentiles)))+np.nan\n for ix in np.unique(indices):\n # Skip things outside the bin range\n if ix >= len(yout): continue\n # Percentile in this bin\n ii = ix==indices\n yout[ix,:] = np.percentile(y[ii], percentiles)\n if for_errorbar_plot:\n e1 = yout[:,1] - yout[:,0]\n e2 = yout[:,2] - yout[:,1]\n return xout, yout[:,1], [e1,e2]\n return xout, yout",
"def test_under_overflow_values(self):\n values_with_inf = np.array([-1, 1, 2, 100, np.inf])\n\n with self.subTest(\"Under/overflow values without under/overflow bins.\"):\n bins, hist, _, _ = hist_w_unc(values_with_inf, bins=5, bins_range=(0, 5))\n np.testing.assert_almost_equal(bins, np.linspace(0, 5, 6))\n # in this case only 40% of the values are shown in the plot\n np.testing.assert_almost_equal(hist, np.array([0, 0.2, 0.2, 0, 0]))\n\n with self.subTest(\"Under/overflow values with under/overflow bins.\"):\n bins, hist, _, _ = hist_w_unc(\n values_with_inf, bins=5, bins_range=(0, 5), underoverflow=True\n )\n np.testing.assert_almost_equal(bins, np.linspace(0, 5, 6))\n np.testing.assert_almost_equal(hist, np.array([0.2, 0.2, 0.2, 0, 0.4]))",
"def split_into_classes(self, bounds=None, percentile=95):\n\n if self._sample_storage is not None:\n occr = self.volumise_occr(self._sample_storage)\n moccr = self.marginalise_occr_period(self._sample_storage)\n else:\n raise AttributeError(\"No stored samples found.\")\n\n if bounds is None:\n bounds = []\n for b in [1.25, 2.25, 3.75, 5.75]:\n if b in self._R_boundaries:\n bounds.append(b)\n for b in [1.0, 2.0, 4.0, 6.0]:\n if b in self._R_boundaries:\n bounds.append(b)\n\n bound_pairs = []\n bound_args = []\n\n for i in range(len(bounds) - 1):\n bound_pairs.append([bounds[i], bounds[i+1]])\n bound_args.append(\n [np.argwhere(self._R_boundaries == bound_pairs[-1][0])[0, 0],\n np.argwhere(self._R_boundaries == bound_pairs[-1][1])[0, 0]])\n\n if pd.isnull(bound_args[-1]).any():\n raise ValueError(\"Boundary not found; given: {}, found:{}\"\n \"\".format(bound_pairs[-1], bound_args[-1]))\n\n # Add two more bound pairs\n if 1.5 in self._R_boundaries:\n bound_pairs.append([1.5, 2.0])\n bound_args.append(\n [np.argwhere(self._R_boundaries == 1.5)[0, 0],\n np.argwhere(self._R_boundaries == 2.0)[0, 0]])\n bound_pairs.append([1.5, 4.0])\n bound_args.append(\n [np.argwhere(self._R_boundaries == 1.5)[0, 0],\n np.argwhere(self._R_boundaries == 4.0)[0, 0]])\n\n bound_pairs.append([bound_pairs[0][0], bound_pairs[1][1]])\n bound_args.append([bound_args[0][0], bound_args[1][1]])\n\n # period_pairs = []\n # for i in range(len(self._P_boundaries) - 1):\n period_pairs = self.occr_p_names\n\n full_df = pd.DataFrame(index=range(np.shape(moccr)[0]))\n #split_df = pd.DataFrame()\n # columns=[\"{}-{}\".format(*bp) for bp in bound_pairs])\n\n for i, ((b1, b2), (a1, a2)) in enumerate(zip(bound_pairs, bound_args)):\n full_df[\"{}-{}\".format(b1, b2)] = moccr[:, a1:a2].sum(axis=-1)\n\n full_df = full_df.describe(percentiles=[0.16, 0.50, 0.84,\n 0.25, 0.75, 0.95])\n\n for i, ((b1, b2), (a1, a2)) in enumerate(zip(bound_pairs, bound_args)):\n int_occr = occr[:, a1:a2, :].sum(axis=1)\n\n for j, pp in enumerate(period_pairs):\n ulim = np.percentile(int_occr[:,j], percentile)\n full_df.loc[pp, \"{}-{}\".format(b1, b2)] = ulim\n\n # The med+-sig rate at the planet discovery\n if len(self.occr_p_names) > 1:\n for i, ((b1, b2), (a1, a2)) in enumerate(zip(bound_pairs,\n bound_args)):\n int_occr = occr[:, a1:a2, :].sum(axis=1)\n\n j = 0\n for check_P in self._P_boundaries[1:]:\n if check_P > 3.49:\n break\n else:\n j += 1\n\n pp = 'mean: ' + self.occr_p_names[j]\n\n # try:\n # j = np.argwhere(\n # np.isclose(self._P_boundaries, 5, atol=0.001))[0,0] - 1\n # pp = 'mean' + self.occr_p_names[j]\n # except IndexError:\n # continue\n\n med = np.percentile(int_occr[:,j], 50)\n low = np.percentile(int_occr[:,j], 16)\n hi = np.percentile(int_occr[:,j], 84)\n\n full_df.loc[pp, '{}-{}'.format(b1, b2)] = \\\n \"{:.3g}-{:.3g}+{:.3g}\".format(med, med-low, hi-med) \n\n\n #full_df = pd.concat([full_df, split_df])\n\n return full_df",
"def rebin (self, bins, tol=1e-4):\n\n bins = np.copy (np.sort (bins))\n for (i, b) in enumerate (bins):\n misses = np.abs (b - self.bins)\n j = np.argmin (misses)\n closest = np.min (misses)\n if closest > tol:\n raise ValueError (\n '{0} is not among current bin edges'.format (b))\n bins[i] = self.bins[j]\n if bins[0] != self.bins[0]:\n raise ValueError (\n 'binning startpoint should match ({0} vs {1})'.format (\n bins[0], self.bins[0]))\n if bins[-1] != self.bins[-1]:\n raise ValueError (\n 'binning endpoint should match ({0} vs {1})'.format (\n bins[-1], self.bins[-1]))\n\n n_newbins = len (bins) - 1\n newbin_indices = np.digitize (self.bins, bins)[:-1] - 1\n values = np.array ([\n np.sum (self.values[newbin_indices == i])\n for i in range (n_newbins)\n ])\n if self.errors is not None:\n errors = np.array ([\n np.sqrt (np.sum (self.errors[newbin_indices == i]**2))\n for i in range (n_newbins)\n ])\n else:\n errors = None\n return Hist (bins, values, errors)",
"def get_bucket_boundaries(feature):\n return np.unique(np.percentile(feature, range(0, 100))).tolist()",
"def set_bin_(data, bin_num, scale=100.):\n temp_data = -numpy.sort(-numpy.abs(data))\n\n bin_size = len(temp_data) / bin_num * 2\n bins = numpy.array([temp_data[int(i * bin_size)] for i in range(1, int(bin_num / 2))])\n bins = numpy.sort(numpy.append(numpy.append(-bins, [0.]), bins))\n bound = numpy.max(numpy.abs(data)) * scale\n bins = numpy.sort(numpy.append(-bound, numpy.append(bins, bound)))\n return bins",
"def test_range_argument(self):\n # we test with range from 0 to 2, with 3 bins -> [0, 0.66, 1.33, 2] exp. bins\n bins_range = (0, 2)\n bins_exp = np.array([0, 2 / 3, 1 + 1 / 3, 2])\n hist_exp = np.array([1, 2, 0])\n\n bin_edges, hist, _, _ = hist_w_unc(\n self.input,\n bins=self.n_bins,\n bins_range=bins_range,\n normed=False,\n )\n\n # check if we end up with the same bin edges anyway\n np.testing.assert_array_almost_equal(bins_exp, bin_edges)\n np.testing.assert_array_almost_equal(hist_exp, hist)",
"def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data",
"def _find_limits(hist, treated, untreated):\n # Treated Sample\n # Set the lowest frequency observed in the treated subsample\n # as the default for the lower limit of the common support\n lower_limit = np.min(treated)\n\n # The following algorithm checks for any empty histogram bins\n # (starting from 0 going up to 0.5).\n # If an empty histogram bin is found, the lower_limit is set to\n # the corresponding P(Z) value of the next bin above.\n for low in range(len(hist[0][0])):\n\n # Only consider values in the interval [0, 0.5)\n if hist[1][low] > 0.5:\n break\n\n # If the algorithm starts below the sample minimum,\n # move on to the next bin\n elif hist[1][low] < np.min(treated):\n continue\n\n else:\n # If the current bin is non-empty, we have still continuous\n # support and the sample minimum remains our lower limit\n if hist[0][0][low] > 0:\n pass\n\n # If an empty bin is found, set the lower limit to the next bin above\n # and move on to the next bin until P(Z) = 0.5 is reached\n else:\n lower_limit = hist[1][low + 1]\n\n # Untreated Sample\n # Set the highest frequency observed in the untreated subsample\n # as the default for the upper limit of the common support\n upper_limit = np.max(untreated)\n\n # The following algorithm checks for any empty histogram bins\n # (starting from 1 going down to 0.5).\n # If an empty histogram bin is found, the upper_limit is set to the\n # current next bin.\n for up in reversed(range(len(hist[0][1]))):\n\n # Only consider values in the interval (0.5, 1]\n if hist[1][up] < 0.5:\n break\n\n # If the algorithm starts above the sample maximum, move on to the next bin\n elif hist[1][up] > np.max(untreated):\n continue\n\n else:\n # If the current bin is non-empty, we have still continuous support and\n # the sample maximum remains our upper limit\n if hist[0][1][up] > 0:\n pass\n\n # If an empty bin is found, set the upper limit to the next bin below\n # and move on to the next bin until P(Z) = 0.5 is reached\n else:\n upper_limit = hist[1][up]\n\n return lower_limit, upper_limit",
"def histogram(hist_data, bins, minimum, maximum):\r\n hist_data2 = [x for x in hist_data if x <= maximum and x >= minimum]\r\n y, x = np.histogram(hist_data2, bins=bins)\r\n x_new = [((x[i] + x[i - 1]) / 2) for i in range(1, len(x))]\r\n max_freq = max(y)\r\n mean = x_new[y.argmax()]\r\n sigma = (x[1] - x[0]) * 4\r\n\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(y, x=x_new, center = mean, amplitude = max_freq, sigma = sigma, nan_policy= 'omit')\r\n\r\n return x, y, x_new, fit.best_fit",
"def normalize(slice, bottom=99, down=1):\n #有点像“去掉最低分去掉最高分”的意思,使得数据集更加“公平”\n b = np.percentile(slice, bottom)\n t = np.percentile(slice, down)\n slice = np.clip(slice, t, b) #限定范围numpy.clip(a, a_min, a_max, out=None)\n\n #除了黑色背景外的区域要进行标准化\n image_nonzero = slice[np.nonzero(slice)]\n if np.std(slice) == 0 or np.std(image_nonzero) == 0:\n return slice\n else:\n tmp = (slice - np.mean(image_nonzero)) / np.std(image_nonzero)\n # since the range of intensities is between 0 and 5000 ,\n # the min in the normalized slice corresponds to 0 intensity in unnormalized slice\n # the min is replaced with -9 just to keep track of 0 intensities\n # so that we can discard those intensities afterwards when sampling random patches\n tmp[tmp == tmp.min()] = -9 #黑色背景区域\n return tmp"
] | [
"0.6620887",
"0.6594978",
"0.62357104",
"0.6183821",
"0.61543703",
"0.61226857",
"0.61187845",
"0.6117019",
"0.61008453",
"0.6096406",
"0.6086877",
"0.6066685",
"0.60292286",
"0.6000606",
"0.5948171",
"0.5942545",
"0.59317714",
"0.5907111",
"0.59046483",
"0.58851564",
"0.58800495",
"0.58703244",
"0.5838277",
"0.58366513",
"0.58327174",
"0.582422",
"0.58081174",
"0.5791248",
"0.57813597",
"0.5779384"
] | 0.7037333 | 0 |
The method used to make sure that the number of starting chips for each player can be set. | def test_set_starting_chips(self):
# Setup new game and attempt to set their valid number of starting chips
valid_chips = [
1,
10,
100,
9999,
]
for chips in valid_chips:
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(chips)
self.assertEqual(game.starting_chips, chips, msg="The number of player starting chips was not correctly set with " + str(chips) + " chips.")
# Make sure that the new game state was correctly set
self.assertEqual(game.state.name, "get_number_of_players", msg="The game state was not correctly set after setting the number of starting chips.")
# Try to set invalid chip numbers
invalid_chips = [
0,
-1,
-100,
1.5,
-1.5,
]
for chips in invalid_chips:
game = Game()
game.setup_new_game()
game.set_pack_number(1)
success = False
try:
game.set_starting_chips(chips)
except InvalidGameStartingChips:
success = True
self.assertTrue(success, msg="An invalid number of starting chips " + str(chips) + " was able to be set.")
# Try to reset the number of starting chips to throw an error
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(100)
success = False
try:
game.set_starting_chips(200)
except InvalidGameMethodOrder:
success = True
self.assertTrue(success, msg="The number of starting chips was incorrectly able to be reset.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_partial_deck_has_fewer_cards(self):\n self.assertEqual(len(self.partialDeck.deck), 46)",
"def enough_players():\n return True",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def min_players(self):\n return 2",
"def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)",
"def count_chips(board, player):\n cont = 0\n for row in board:\n for col in row:\n if col == PLAYER_CHIPS[player]:\n cont += 1\n return cont",
"def checkForPickup(self):\n if self.counter == 0:\n if self.game.player.reticule in self.overlapping_sprites and (games.keyboard.is_pressed(games.K_a) \\\n or games.keyboard.is_pressed(games.K_d)):\n self.counter = 15\n if self.held == 0:\n self.game.player.held_item = self\n self.held = 1\n self.y = self.game.player.y\n else:\n self.game.player.held_item = None\n self.held = 0",
"def populate_states(self, list, player):\n if self.pre_state is None:\n for action in list:\n self.occupied[action] = player\n self.available_moves.remove(action)\n return 1\n print(\"you can only populate at the init state\")\n return 0",
"def test_initial_limit(self):\n\n g = test_setup.simple_two_player()\n\n p1, p2 = g.players\n\n self.assertEqual(g._clientele_limit(p1), 2)\n self.assertEqual(g._clientele_limit(p2), 2)",
"def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False",
"def check_upgrades(self):\n if \"HMI-25\" in self.player.get_item_names():\n self.player.set_capacity(DEFAULT_CAPACITY + 25)\n if \"HMI-50\" in self.player.get_item_names():\n self.player.set_capacity(DEFAULT_CAPACITY + 50)",
"def test_set_players_number(self):\n\n # Setup new games and attempt to set thier number of players\n valid_players = [\n 1,\n 2,\n 10,\n 999,\n ]\n for players in valid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(players)\n self.assertEqual(game.players_number, players, msg=\"The number of players was not correctly set with \" + str(players) + \" players.\")\n\n # Make sure that the new game state is correctly set\n self.assertEqual(game.state.name, \"get_player_names\", msg=\"The game state was not correctly set after setting the number of players in the game.\")\n\n # Try to set invalid player numbers\n invalid_players = [\n 0,\n -1,\n -100,\n 1.5,\n ]\n for players in invalid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n success = False\n try:\n game.set_players_number(players)\n except InvalidGamePlayersNumber:\n success = True\n self.assertTrue(success, msg=\"An invalid number of players \" + str(players) + \" was able to be set.\")\n\n # Try to reset the number of players to throw an error\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(3)\n success = False\n try:\n game.set_players_number(2)\n except InvalidGameMethodOrder:\n success = True\n self.assertTrue(success, msg=\"The number of players was incorrectly able to be reset.\")",
"def tile_picked(self):\n assert len(self.hand) == 5\n self.turn_count += 1",
"def test_deal_insufficient_cards(self):\n cards = self.deck._deal(100)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)",
"def slot_choke(self):\n if self.choke:\n _choke = [1 for x in range(8)]\n else:\n _choke = [random.randint(0,4) for x in range(8)]\n \n return _choke",
"def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number",
"def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)",
"def init_devices(self):\n self.hp_nb = int(self.rs_nb* self.hp_proportion/(1- self.hp_proportion))\n self.defense_cost = self.hp_nb * self.hp_unit_cost\n rs_devices = [True for i in range(self.rs_nb)] #rs --> True\n hp_devices = [False for i in range(self.hp_nb)] #hp --> False\n self.devices = rs_devices + hp_devices\n shuffle(self.devices)",
"def initialize_round(list_of_players):\n Card.rejected_cards = []\n Card.initialize_deck_from_json()\n for player in list_of_players:\n player.initialize_cards()\n player.thalom = False",
"def _pre_calc_player(self):\n self._player_scan_unshifted = []\n\n # Only 1,2,3 required, but 0..3 calculated\n NUMBER_RANGE = 4\n\n # Only 1,2,4 required, but 0..4 calculated\n SIZE_RANGE = 5\n\n # Gaps are 0, 2, 4, 8\n GAP_RANGE = 9\n\n GRAPHIC_RANGE = 256\n\n # Create enough empty lists to allow direct indexing.\n self._player_scan_unshifted = [[] for x in range(NUMBER_RANGE)]\n for number in [1,2,3]:\n\n self._player_scan_unshifted[number] = [[] for x in range(SIZE_RANGE)]\n for size in [1,2,4]:\n\n self._player_scan_unshifted[number][size] = [[] for x in range(GAP_RANGE)]\n for gap in [0,2,4,8]:\n self._player_scan_unshifted[number][size].append([])\n for reflect in range(2):\n self._player_scan_unshifted[number][size][gap].append([])\n for g in range(GRAPHIC_RANGE):\n # Create the 8-bit 'graphic'\n graphic = [False] * 8\n for i in range(8):\n if (g >> i) & 0x01:\n graphic[i] = True\n\n if reflect:\n graphic.reverse()\n\n # Scale the graphic, so each pixel is 'size' big\n graphic = [x for x in graphic for _ in [0] * size]\n\n scan = [False] * Stella.FRAME_WIDTH\n for n in range(number):\n offset = n*gap*8\n scan[offset:offset + len(graphic)] = graphic\n\n self._player_scan_unshifted[number][size][gap][reflect].append(scan)",
"def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess",
"def _test_player_list_size(self):\n return len(self.player_list)",
"def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0",
"def __init__(self, min_player_count):\n self.min_player_count = min_player_count",
"def __init__(self, board_width, board_height):\n # Create board using generate_grid_dict method with given width and height.\n board = self.generate_grid_dict(board_width, board_height)\n self.draw = False\n pygame.init()\n\n # Set the caption for the board and the font for the win prompt.\n pygame.display.set_caption('Connect4 - Player 1')\n self.game_font = pygame.freetype.Font(\"SF Distant Galaxy.ttf\", 40)\n\n # size of each square of the grid:\n self.square_size = 80\n # generate board width (amount of squares and square width):\n self.width = board_width * self.square_size\n # generate board height (amount of squares and square height):\n self.height = board_height * self.square_size\n # generate the radius of the chips depending on the square size:\n self.radius = int(self.square_size / 4)\n # find the middle of the square for chip placement:\n self.square_mid = int(self.square_size / 2)\n # set the screen size with the board width and height:\n self.screen = pygame.display.set_mode((self.width, self.height))\n\n # Fill the screen with a white background.\n background = pygame.Surface(self.screen.get_size())\n background.fill((255, 255, 255))\n self.background = background.convert()\n\n # Build the grid.\n for i in range(0, self.width, self.square_size):\n pygame.draw.rect(self.background, (0, 0, 0), (i, 0, 0, self.height))\n for i in range(0, self.height, self.square_size):\n pygame.draw.rect(self.background, (0, 0, 0), (0, i, self.width, 0))\n self.screen.blit(self.background, (0, 0))\n\n # Setup, so player one starts.\n self.playerOne = True\n self.red = 250\n self.blue = 0\n\n # Help dict for logic, when drawing a chip. Maps 0 -> size, 1 -> size - 1...\n self.draw_dict_mapping = {}\n for i in range(self.height//80 + 1):\n self.draw_dict_mapping[i] = self.height//80 - i\n\n # Start the game with the run game method.\n self.run_game(board)",
"def test_consumed_cards_longer(self):\n game = TestGames.replay(9, [3, 1, 0, 0, 1, 2, 2, 0, 6, 3,\n 0, 0, 1, 2, 6, 0, 0, 0, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [3 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 1 / 2, # handmaid\n 1 / 2, # prince\n 1 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess",
"def setUp(self) -> None:\n self.max = 100\n self.multiples_of_3_and_5 = [n for n in range(1, self.max + 1) if n % 3 == 0 and n % 5 == 0]\n self.multiples_of_3 = [n for n in range(1, self.max + 1) if n % 3 == 0 and n not in self.multiples_of_3_and_5]\n self.multiples_of_5 = [n for n in range(1, self.max + 1) if n % 5 == 0 and n not in self.multiples_of_3_and_5]",
"def selectables_length(self) -> int:\n\n return 1",
"def startState(self):\n\n n_squares_per_row = int(math.ceil(math.sqrt(self.n_snakes))**2)\n square_size = self.grid_size // int(n_squares_per_row)\n assignment = random.sample(range(n_squares_per_row ** 2), self.n_snakes)\n\n\n assert self.grid_size >= 3*n_squares_per_row\n\n snakes = {}\n for snake, assign in enumerate(assignment):\n head = (random.randint(1, square_size-2) + (assign // n_squares_per_row) * square_size,\n random.randint(1, square_size-2) + (assign % n_squares_per_row) * square_size)\n snakes[snake] = newSnake([head, utils.add(head, random.sample(DIRECTIONS, 1)[0])], snake)\n\n fruits_to_put = 2 * int(self.fruit_ratio) + 1\n start_state = State(snakes, {})\n start_state.addNRandomfruits(fruits_to_put, self.grid_size)\n return start_state",
"def initial_track_check(self, track) -> List[int]:\n init_pos = DataSupporter.get_track_initial_position(track)\n collided_indexes = []\n for bot_index, bot_car in enumerate(self.bot_cars):\n if DataSupporter.dist(init_pos, bot_car.position_PLAY) < 5:\n collided_indexes.append(bot_index)\n\n if self.car is not None:\n if DataSupporter.dist(self.car.position_PLAY, init_pos) < 5:\n collided_indexes.append(-1)\n\n return collided_indexes"
] | [
"0.5986601",
"0.5935184",
"0.591481",
"0.58434373",
"0.57822806",
"0.5757061",
"0.5752133",
"0.5748309",
"0.57169116",
"0.56981397",
"0.56899023",
"0.56770945",
"0.56551063",
"0.56398237",
"0.5611947",
"0.5601338",
"0.55866843",
"0.55602324",
"0.55322117",
"0.5494507",
"0.5480017",
"0.5473797",
"0.5456246",
"0.54514533",
"0.5444946",
"0.54306406",
"0.54159105",
"0.54146665",
"0.5393185",
"0.5370512"
] | 0.76785713 | 0 |
The method used to make sure that the names of the players in the game can be set. | def test_set_player_names(self):
# Setup new games and attempt to set their players' names
valid_players = [
["Bob", "Sam", "Cal", "Kris"],
["Player 1", "Player 2", "Player 3", "Player 4", "Player 5"],
["Bot"],
["P1", "P2", "P3"],
]
for players in valid_players:
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(100)
game.set_players_number(len(players))
game.set_player_names(players)
self.assertEqual(game.player_names, players, msg="The game's player names were not correctly set with: " + str(players))
# Make sure that the new game state is corectly set
self.assertEqual(game.state.name, "start_game", msg="The game's state was not correctly set after setting the player names.")
# Try to set invalid players
invalid_players = [
None,
[None, None],
[123, 456, 789],
["Bob", "Sam", 123],
["John", ""],
]
for players in invalid_players:
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(100)
game.set_players_number(len(players or "1"))
success = False
try:
game.set_player_names(players)
except InvalidGamePlayerNames:
success = True
self.assertTrue(success, msg="The following invalid series of player names was able to be set: " + str(players))
# Test the case where the number of players given is not the same as the number of names given
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(100)
game.set_players_number(2)
success = False
try:
game.set_player_names(["P1", "P2", "P3"])
except InvalidGamePlayerNames:
success = True
self.assertTrue(success, msg="A number of player names unequal to the number to the number of players in the game was able to be set.")
# Try to reset the names of the players to throw an error
game = Game()
game.setup_new_game()
game.set_pack_number(1)
game.set_starting_chips(100)
game.set_players_number(3)
game.set_player_names(["P1", "P2", "P3"])
success = False
try:
game.set_player_names(["P01", "P02", "P03"])
except InvalidGameMethodOrder:
success = True
self.assertTrue(success, msg="The names of the players was incorrectly able to be reset.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True",
"def set_players(self, player_min: int, player_max: int):\n name: str = ' '\n print('Please give between %i and %i names for your players'\n % (player_min, player_max))\n while (name != '') and (len(self.players) < player_max):\n name = input('Players {}: '.format(len(self.players)+1))\n if name != '':\n self.players.append(Player(name))\n elif len(self.players) < player_min:\n name = ' '\n\n print()\n print('{} players registered.'.format(len(self.players)))\n print()",
"def set_name(self):\n player1 = input('Enter a name for player 1: ')\n self._players.append(player1)\n player2 = input('Enter a name for player 2: ')\n self._players.append(player2)\n print()\n return self._players",
"def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])",
"def create_players(self):\n for i in range(self.number_of_players):\n self.players_names.append(pyip.inputStr(\n prompt=f'\\nEnter name of player {i + 1}:\\n'))",
"def setup_players(self, players):\n\t\tself.players.clear()\n\t\tids = set([p.get_player_id() for p in players])\n\t\tfor p in self.state.get_players():\n\t\t\tif p not in ids:\n\t\t\t\traise PlayerException(p)\n\t\tfor p in players:\n\t\t\tself.players[p.get_player_id()] = p",
"def initialize_players():\n while True:\n nb_of_players = input(\"\\nEntrez le nombre de joueurs : \")\n if not nb_of_players.isdigit():\n print(\"You have to enter a number!\")\n else:\n nb_of_players = int(nb_of_players)\n if nb_of_players < 2:\n print(\"You have to enter at least two!\")\n else:\n break\n nb_of_players = int(nb_of_players)\n list_of_players = [] #This list is going to be returned\n names_secure = [] #stores player's names in lower mode for security\n for index in range(1, nb_of_players+1):\n while True:\n player_name = input(\"Entrer le nom du joueur {} \".format(index))\n if (player_name.lower() == 'end' or player_name.lower() in names_secure):\n print(\"Incorrect Name\")\n else:\n names_secure.append(player_name.lower())\n new_player = Player(player_name)\n list_of_players.append(new_player)\n break\n return list_of_players",
"def player_name(player: Character) -> None:\r\n global censored_words\r\n while player.name == \"\":\r\n playerName = str(input(\"please enter player one's name: \"))\r\n for i in censored_words:\r\n if playerName == i:\r\n print(\"please choose another name\")\r\n playerName = \" \"\r\n elif playerName == playerTwo.name or playerName == playerOne.name:\r\n print(\"please choose another name\")\r\n playerName = \" \"\r\n else:\r\n player.name = playerName",
"def test_valid_game_setup(self):\n self.assertEqual(self._game.active_players(), 1)\n for x in xrange(1, 4):\n self._game.add_player(self._users[x], x)\n self.assertEqual(self._game.active_players(), 4)\n self.assertIsREADY(self._game)",
"def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass",
"def _update_players(self):\n if not self.game_state:\n raise RuntimeError(\"Cannot call update_players when the game has not started!\")\n for player in self.player_list:\n # Do not update a player that has exited or been expelled from the level\n if not self.game_state.is_character_expelled(player.entity) and not \\\n player.entity in self.game_state.get_completed_characters():\n self._update_player(player)",
"def get_game_ready():\n\tnum_players = int(input(\"\"\"How many players will be playing today? (between 2 and 5): \"\"\"))\n\twhile num_players > 5 or num_players < 2:\n\t\tnum_players = int(input(\"\"\"Between 2 and 5 players please: \"\"\"))\n\tnum_number_of_people = int(input(\"\"\"How many of these players will be humans?: \"\"\"))\n\twhile num_number_of_people > num_players or num_number_of_people < 0:\n\t\tnum_number_of_people = int(input(f\"\"\"Please enter a number equal to or less than the number of players ({num_players}): \"\"\"))\n\tnum_people = num_number_of_people\n\twhile num_people > 0:\n\t\tNAMES[abs(num_people - num_number_of_people)] = input(f\"\"\"Name of player {abs(num_people - num_number_of_people)+1}: \"\"\")\n\t\tnum_people -= 1\n\twhile len(NAMES) > num_players:\n\t\tNAMES.pop()\n\treturn NAMES",
"def setNewPlayerName(self, playerName):\n if(self.__playerName==\"???\" and not(self.__playerAlreadyExists(playerName)) ):\n #verify that there isn't already a player by this name\n fileLocs=FileLocations()\n filename = fileLocs.playerProfiles+r\"\\profiles.p\"\n playerList = []\n index = -1\n f = None\n try:\n #load list of players -- if there's no file, then we skip this step \n f = open(filename, \"r\")\n playerList = pickle.load(f)\n f.close() \n except IOError: \n pass\n finally:\n #add name to list\n self.__playerName=playerName\n playerList.append(playerName)\n try:\n f = open(filename,\"w\")\n pickle.dump(playerList, f)\n f.close()\n self.savePlayerInfo()\n except IOError:\n raise PlayerIOError(\"Unable to add player name to profile.p\")\n return True\n else:\n return False ##unsuccessful -- return false",
"def update_players_name(self, player_1, player_2):\n self.model.player_1 = player_1\n self.model.player_2 = player_2\n self.logger.info(\"User_1 has name %s, user_2 has name %s\", player_1, player_2)",
"def enough_players():\n return True",
"def get_player_name(self):\n if self.name_entered is False:\n self.name = self.input_name(\"Please enter your name:\")\n self.name_entered = True\n self.score_file()",
"def create_players_list(self):\n for p in self.players_names:\n self._players_list.append(Player(p))",
"def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]",
"async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))",
"def setExistingPlayerName(self, playerName):\n if self.__playerAlreadyExists(playerName):\n self.__playerName = playerName\n return True\n else:\n return False",
"def check_player_name(words):\n\n # check for a player by looking at all the words as a string then working backwards.\n for x in reversed(range(len(words) + 1)):\n\n name = words[:x]\n remaining = words[x:]\n\n if name == []:\n continue\n\n player = nhl_players.get_player(_make_name(name))\n if player:\n # print (\"player hit %s\" % player )\n return player, remaining\n\n return None, words",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def __init__(self):\r\n self.players = {}",
"def seed_players(self):\n raise NotImplementedError()",
"def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]",
"def ready_new_round_players(self):\n for player in self.players:\n if player.is_playing:\n player.has_played = False",
"def test_boxscore_player_stats(self):\n test_v_player_stat = self.BS.vTeam_player_stats[0]['firstName']\n test_answer_v = 'Isaac'\n test_h_player_stat = self.BS.hTeam_player_stats[0]['firstName']\n test_answer_h = 'Pascal'\n self.assertEqual(test_v_player_stat, test_answer_v)\n self.assertEqual(test_h_player_stat, test_answer_h)",
"def test_set_players_number(self):\n\n # Setup new games and attempt to set thier number of players\n valid_players = [\n 1,\n 2,\n 10,\n 999,\n ]\n for players in valid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(players)\n self.assertEqual(game.players_number, players, msg=\"The number of players was not correctly set with \" + str(players) + \" players.\")\n\n # Make sure that the new game state is correctly set\n self.assertEqual(game.state.name, \"get_player_names\", msg=\"The game state was not correctly set after setting the number of players in the game.\")\n\n # Try to set invalid player numbers\n invalid_players = [\n 0,\n -1,\n -100,\n 1.5,\n ]\n for players in invalid_players:\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n success = False\n try:\n game.set_players_number(players)\n except InvalidGamePlayersNumber:\n success = True\n self.assertTrue(success, msg=\"An invalid number of players \" + str(players) + \" was able to be set.\")\n\n # Try to reset the number of players to throw an error\n game = Game()\n game.setup_new_game()\n game.set_pack_number(1)\n game.set_starting_chips(100)\n game.set_players_number(3)\n success = False\n try:\n game.set_players_number(2)\n except InvalidGameMethodOrder:\n success = True\n self.assertTrue(success, msg=\"The number of players was incorrectly able to be reset.\")",
"def __add_players(self):\n players_list = []\n players_list.extend([(\"NEW PLAYER\", \"**new**\")])\n players_list.extend(self._roster.get_roster())\n players_list.extend([(\"BACK TO MENU\", \"**menu**\")])\n\n players = [\n inquirer.List(\n 'selection',\n message=\"ADD/REMOVE (Use ↑ and ↓ to select, ENTER to confirm)\",\n choices=players_list,\n default=\"NEW PLAYER\",\n carousel=True)\n ]\n\n self.clear_screen()\n self.__print_logo()\n selection = inquirer.prompt(players)['selection']\n\n if selection == \"**menu**\":\n pass\n elif selection == \"**new**\":\n name = self.__prompt_name()\n if name:\n self._roster.add_player(name)\n else:\n delete = inquirer.confirm(\n f\"Do you want to remove '{selection}'?\", default=True\n )\n if delete:\n self._roster.remove_player(selection)\n input(f\"'{selection}' removed. Press ENTER to continue.\")",
"def _load_player_map(self) -> None:\n # Loading people that have had ab appearance in the year specified\n # This might not be general enough as some players get paid even if they don't play\n sql = \"\"\"\\\n select p.playerid, p.namefirst, p.namelast, p.namegiven, a.team_id\n from people p\n INNER JOIN appearances a ON p.playerid = a.playerid and a.yearid = %s\n \"\"\"\n\n self._cursor.execute(sql, (self._yearid,))\n duplicates = 0\n all_players = self._cursor.fetchall()\n for player in all_players:\n r = {'playerid': player[0], 'namefirst': player[1], 'namelast': player[2],\n 'namegiven': player[3], 'team_id': player[4]}\n\n # Build a key from namefirst, namelast and team_id, then remove all spaces\n # Make sure we don't already have the player loaded, count and report duplicates.\n key = \"{}{}{}\".format(player[1], player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key) is None:\n self._player_map[key] = r\n else:\n duplicates += 1\n\n # We'll add the player again using his given first name if different from namefirst\n given_first = player[3].split()[0]\n if given_first != player[1]:\n key2 = \"{}{}{}\".format(given_first, player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key2) is None:\n self._player_map[key2] = r\n else:\n duplicates += 1\n\n if duplicates > 0:\n raise RuntimeError(\"Duplicates found building player map: \" + str(duplicates))"
] | [
"0.7112906",
"0.6798745",
"0.67947006",
"0.67672193",
"0.6759732",
"0.66581255",
"0.6556097",
"0.65228826",
"0.64666003",
"0.64418626",
"0.63995534",
"0.63855034",
"0.6371685",
"0.6351709",
"0.63382185",
"0.63223565",
"0.63055414",
"0.62601405",
"0.61606365",
"0.6153325",
"0.6150226",
"0.61350065",
"0.61110663",
"0.61060596",
"0.60981554",
"0.6094168",
"0.60940397",
"0.60913616",
"0.6086816",
"0.6058583"
] | 0.77651775 | 0 |
Selects two customers that are nearest to each other and their neighbours and removes them from the solution. See ``customers_to_remove`` for the degree of destruction done. Similar to cross route removal in Hornstra et al. (2020). | def cross_route(current: Solution, rnd_state: Generator) -> Solution:
problem = Problem()
destroyed = deepcopy(current)
customers = set(range(problem.num_customers))
removed = SetList()
while len(removed) < customers_to_remove():
candidate = rnd_state.choice(tuple(customers))
route_candidate = destroyed.find_route(candidate)
_remove(destroyed, candidate, removed, customers)
# Find the nearest customer that is not yet removed and in a different
# route, and remove it and its neighbours as well.
for other in problem.nearest_customers[candidate]:
if other not in route_candidate and other not in removed:
_remove(destroyed, other, removed, customers)
break
destroyed.unassigned = removed.to_list()
return destroyed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_existing_customers(self):\n # remove the customers which are not active (.is_active )\n self.to_move = False\n #for cust in self.customers:\n # print(cust.state)\n self.customers = [cust for cust in self.customers if cust.state != 'checkout']\n #if cust.to_move():\n # self.to_move = True",
"def delete_customer(self, customer_to_del):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n customer_list.remove(customer)\n self._customer_repo.overwrite_customer_list(customer_list)\n credit_card_list = self._customer_repo.get_credit_card_list()\n for credit_card in credit_card_list:\n if credit_card.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n credit_card_list.remove(credit_card)\n self._customer_repo.overwrite_credit_card_list(credit_card_list)",
"def remove_existing_customers(self):\n\n for i in range(len(self.customers)):\n if self.customers[i].is_active() == False:\n self.customers[i]= 'out'\n self.customers = [item for item in self.customers if item!='out' ]",
"def intraroute_2opt(route, customers):\n if route.ncustomers < 2: return False\n r = copy.deepcopy(route)\n c1 = random.randint(1,r.ncustomers-1)\n c2 = random.randint(c1+2,r.ncustomers+1)\n #print(c1, c2)\n r.customers[c1:c2] = r.customers[c1:c2][::-1]\n #print(r.customers)\n r.update(customers)\n if r.violate_windows(customers):\n return False\n else:\n route.customers[c1:c2] = r.customers[c1:c2]\n route.update(customers)\n print(\"succeed intraroute 2opt\")\n return True",
"def interroute_2opt(route1, route2, customers):\n r1 = copy.deepcopy(route1)\n r2 = copy.deepcopy(route2)\n c1 = random.randint(1,r1.ncustomers)\n c2 = random.randint(1,r2.ncustomers)\n r1cs = r1.customers[:]\n r1.customers[c1:] = r2.customers[c2:]\n r1.ncustomers = len(r1.customers) - 2\n #print(r1cs, r1.customers, r1.ncustomers)\n r1.update(customers)\n if r1.violate_windows(customers): return False\n r2.customers[c2:] = r1cs[c1:]\n r2.ncustomers = len(r2.customers) - 2\n r2.update(customers)\n if r2.violate_windows(customers): return False\n route1.customers[:] = r1.customers\n route1.ncustomers = r1.ncustomers\n route1.update(customers)\n route2.customers[:] = r2.customers\n route2.ncustomers = r2.ncustomers\n route2.update(customers)\n print(\"succeed interroute 2opt\")\n return True",
"def movable_intraroute_customers(route, customers):\n mcust = []\n for c in range(route.ncustomers):\n if len(factible_route_positions(route.customers[c+1],\n route,customers)) > 1:\n mcust.append(c)\n return mcust",
"def prune_to(self, names, merge_monotomies=True):\n self.prune_to_nodes(self.get_nodes(names), merge_monotomies)",
"def remove_neighbor(self):\n self.fono -= 1",
"def get_dists_to_from_new_cust(\n customer: Customer,\n known_customers: List[Customer],\n osr_api_key: str,\n) -> Dict[Tuple[str, str], float]:\n\n # If there are no existing customers, just return a 0-distance dict for the\n # single self-directed arc.\n if not known_customers:\n return {(customer.cust_id, customer.cust_id): 0.0}\n\n # Instantiate our client to pull the distance matrix.\n osr_client = client.Client(key=osr_api_key)\n\n # Get the list of customer coordinates, along with their IDs.\n # Note that they're in (lon, lat) order instead of (lat, lon).\n cust_coords_w_id = [(known_cust.cust_id, (known_cust.lon, known_cust.lat)) for known_cust in known_customers]\n\n # Append the new customer.\n cust_coords_w_id.append((customer.cust_id, (customer.lon, customer.lat)))\n\n # Note the number of total customers, along with the index of the new customer that we need distances for.\n n = len(cust_coords_w_id)\n new_cust_idx = n-1\n\n # Define the basic request we'll make to fetch the distance matrix.\n request = {\n 'locations': [cust_coords[1] for cust_coords in cust_coords_w_id],\n 'profile': 'driving-car',\n 'metrics': ['duration'],\n }\n\n # First, get the distances FROM the new customer to all the existing customers\n request['sources'] = [new_cust_idx]\n response = osr_client.distance_matrix(**request)['durations']\n logging.info(\"Distances *FROM* new customer retrieved\")\n new_distances = {\n (cust_coords_w_id[new_cust_idx][0], cust_coords_w_id[i][0]): response[0][i]\n for i in range(n) # This also includes the distance from the new customer to itself\n }\n\n # Next, get the distances TO the new customer from all the existing customers\n request.pop('sources') # no longer want to specify a single source\n request['destinations'] = [new_cust_idx] # but we do want to specify a destination\n response = osr_client.distance_matrix(**request)['durations']\n logging.info(\"Distances *TO* new customer retrieved\")\n new_distances.update({\n (cust_coords_w_id[i][0], cust_coords_w_id[new_cust_idx][0]): response[i][0]\n for i in range(new_cust_idx) # don't include the dist to itself this time\n })\n \n return new_distances",
"def eliminiateEmptyEdges(self, distance = 100):\n print \"Edge elimination started\"\n \n selected_edge_ids = []\n # let us \n \n for point in self.gps_points:\n results = self.idx.nearest(((point.getPoint().x-distance/2), \n (point.getPoint().y-distance/2),\n (point.getPoint().x+distance/2),\n (point.getPoint().y+distance/2)), objects=True)\n for result in results:\n from_node = self.node_counter__node.get(result.object.from_node.getAttributes().get(\"nodecounter\"))\n to_node = self.node_counter__node.get(result.object.to_node.getAttributes().get(\"nodecounter\"))\n edge_counter = self.G.edge[from_node][to_node].get(\"edgecounter\")\n if edge_counter not in selected_edge_ids:\n selected_edge_ids.append(edge_counter)\n print str(len(selected_edge_ids)) + \" edges found to keep.\"\n \n elimination_counter = 0\n for edge in self.G.edges():\n edgecounter = self.G.edge[edge[0]][edge[1]].get(\"edgecounter\")\n if edgecounter not in selected_edge_ids:\n edge_tuple = (self.G.edge[edge[0]][edge[1]].get(\"edge\").from_node, self.G.edge[edge[0]][edge[1]].get(\"edge\").to_node)\n self.G.remove_edge(*edge_tuple)\n elimination_counter = elimination_counter + 1\n \n print str(elimination_counter) + \" edges eliminated.\"",
"def removeNeighbor(self, neighborID):",
"def __remove_clients__(self, r, clients_to_remove,\r\n prune=True, reason=None):\r\n for entry in clients_to_remove:\r\n r.clients.remove(entry)\r\n if entry in r.clients:\r\n print >> sys.stderr, 'ERROR: DUPLICATE CLIENT ENTRY...'\r\n print >> sys.stderr, ' ENTRY', repr(entry), type(entry[0])\r\n print >> sys.stderr, ' CLIENTS', repr(r.clients)\r\n assert entry not in r.clients # an op,i pair should be unique\r\n if not r.clients:\r\n if prune:\r\n self.__prune_r__([r], reason)\r\n return False\r\n return True\r\n return False",
"def remove_outliers(self, tolerance: int = 2):\r\n\r\n # Find the median distance between the rows of the field in pixels\r\n d = []\r\n for i in range(1, len(self.lines)):\r\n d.append(abs(self.lines[i][1] - self.lines[i-1][1]))\r\n \r\n row_dist = median(d)\r\n\r\n # Iterate through all of the rows\r\n for row_num in range(len(self.rows)):\r\n i = 0\r\n\r\n # Iterate through each plant in the row\r\n while i < len(self.rows[row_num]):\r\n\r\n # Find each plants distance from it's line\r\n c = self.rows[row_num][i].get_center()\r\n dis = abs(c[0]\r\n * self.lines[row_num][0]\r\n + self.lines[row_num][1]\r\n - c[1])\r\n\r\n # If it's too far off, remove it\r\n if dis < row_dist/tolerance:\r\n i += 1\r\n else:\r\n self.remove_plant_by_center(c)",
"def remove_nodes_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_in in self.successors[nd.name]:\n self.predecessors[nd_in.name].remove(nd)\n self.edges.remove((nd, nd_in))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)",
"def remove_adjacent_grid(self, pg1, pg2, adj_grids):\n\n if pg1.size()>pg2.size():\n bigger = pg1\n smaller = pg2\n else:\n bigger = pg2\n smaller = pg1\n \n for adj in adj_grids:\n \n\n # If the adjacent grids are a subset of the secondary grids (i.e. not necessary)\n # remove them from each\n if adj in bigger.secondary_grids:\n debug.info(3,\"Removing {} from bigger secondary {}\".format(adj, bigger))\n bigger.grids.remove(adj)\n bigger.secondary_grids.remove(adj)\n self.blocked_grids.add(adj)\n elif adj in smaller.secondary_grids:\n debug.info(3,\"Removing {} from smaller secondary {}\".format(adj, smaller))\n smaller.grids.remove(adj)\n smaller.secondary_grids.remove(adj)\n self.blocked_grids.add(adj)\n else:\n # If we couldn't remove from a secondary grid, we must remove from the primary\n # grid of at least one pin\n if adj in bigger.grids:\n debug.info(3,\"Removing {} from bigger primary {}\".format(adj, bigger))\n bigger.grids.remove(adj)\n elif adj in smaller.grids:\n debug.info(3,\"Removing {} from smaller primary {}\".format(adj, smaller))\n smaller.grids.remove(adj)",
"def disconnect_lowest_ecc(G, num_remove):\n num_removed = []\n spectral_gap = []\n\n g = G.copy()\n vs = np.random.choice(list(g.nodes()), num_remove, replace=False)\n for i, v in enumerate(vs):\n neighbors = list(g.neighbors(v))\n if len(neighbors) == 0:\n continue\n ecc = np.array([nx.eccentricity(G, n) for n in neighbors])\n remove = np.argmin(ecc)\n g.remove_edge(v, neighbors[remove])\n\n num_removed.append(i)\n spectral_gap.append(get_spectral_gap(g))\n\n return num_removed, spectral_gap",
"def shift_2_cust(self, sol_in2, cust, c_loc, curr_temp, sol_type2, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in2[c_loc[0]])\r\n route_new = route_ing\r\n move_to_route = c_loc[0]\r\n orgn_type1 = sol_type2[c_loc[0]]\r\n cust_folw = route_ing[c_loc[1]+1]\r\n origin_cost1 = check_violation(route_ing, orgn_type1)[1]\r\n route_ing.remove(cust) # remove c in the current route\r\n del route_ing[c_loc[1]] # remove customer following c\r\n new_type1 = route_type(route_ing)\r\n adjust_cost1 = check_violation(route_ing, new_type1)[1]\r\n best_cut_cost0 = -1000\r\n best_cut_cost = best_cut_cost0 # best cost cut of moving this customer\r\n for j, rou in enumerate(sol_in2):\r\n orgn_type2 = sol_type2[j]\r\n origin_cost2 = check_violation(rou, orgn_type2)[1]\r\n if j == c_loc[0]: # moving in the same route\r\n for k in range(1, len(route_ing)):\r\n if k == c_loc[1]:\r\n continue\r\n rou_test = route_ing[:k] + [cust, cust_folw] + route_ing[k:]\r\n if check_violation(rou_test, orgn_type2)[0]:\r\n adjust_cost2 = check_violation(rou_test, orgn_type2)[1]\r\n cost_cut_test = origin_cost1 - adjust_cost2\r\n if cost_cut_test > best_cut_cost:\r\n best_cut_cost = cost_cut_test\r\n route_new = rou_test\r\n move_to_route = j\r\n\r\n\r\n else: # moving to a different route\r\n for k in range(1, len(rou)):\r\n rou_test = rou[:k] + [cust, cust_folw] + rou[k:]\r\n if check_violation(rou_test, 5)[0]:\r\n new_type2 = route_type(rou_test)\r\n adjust_cost2 = check_violation(rou_test, new_type2)[1]\r\n cost_cut_test = origin_cost1 + origin_cost2 - adjust_cost1 - adjust_cost2\r\n if cost_cut_test > best_cut_cost:\r\n best_cut_cost = cost_cut_test\r\n route_new = rou_test\r\n move_to_route = j\r\n\r\n\r\n if best_cut_cost > 1e-5:\r\n # print('shift2 good', best_cut_cost)\r\n sol_in2[move_to_route] = route_new\r\n sol_type2[move_to_route] = route_type(route_new)\r\n if move_to_route != c_loc[0]: # moving to a different route\r\n sol_in2[c_loc[0]] = route_ing\r\n sol_type2[c_loc[0]] = route_type(route_ing)\r\n\r\n elif sa_lns and best_cut_cost < -1e-5:\r\n prb = random.uniform(0, 1)\r\n if np.exp(best_cut_cost / curr_temp) > prb:\r\n # print('shift2', best_cut_cost)\r\n sol_in2[move_to_route] = route_new\r\n sol_type2[move_to_route] = route_type(route_new)\r\n if move_to_route != c_loc[0]: # moving to a different route\r\n sol_in2[c_loc[0]] = route_ing\r\n sol_type2[c_loc[0]] = route_type(route_ing)\r\n\r\n # return sol_in2\r",
"def remove_route(g, origin, destination, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n \n # Removes both directions and returns \n if(choice_dir == \"y\"):\n \n \n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != destination_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != origin_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n \n # Removes one direction and returns\n if(choice_dir == \"n\"):\n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n g.city_dict[key].set_flights_in(new_flights_in)\n \n return g",
"def delete_outlayers(input_list, price_diff):\n \n # Get maximum and minimum indices\n max_pos = price_diff.index(max(price_diff))\n min_pos = price_diff.index(min(price_diff))\n \n # Remove correspondant values from input list\n input_list.remove(input_list[max_pos])\n input_list.remove(input_list[min_pos])",
"def remove_self(self):\n if self.game.rules[\"trapping\"]:\n [neighbor.untrap() for neighbor in self.get_neighbors() if neighbor.trapped and self in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]\n self.game.empty_square(self.position)\n self.position = None",
"def hill_climb(data, selected_cities):\n #Picking a random solution as the current best.\n shortest_tour = random.sample(selected_cities, len(selected_cities))\n shortest_distance = get_distance(data, shortest_tour)\n\n if(len(shortest_tour) > 1):\n\n fair_evaluations = 1000 #Number of evaluations\n worse_neighbors = 0\n\n #Compare to neighbor solution(s) and repeat until no better solution is found.\n while worse_neighbors <= fair_evaluations:\n copy = shortest_tour.copy()\n new_tour = swap_random_cities(copy) #Choose neighbor\n new_distance = get_distance(data, new_tour)\n\n if new_distance < shortest_distance:\n shortest_tour = new_tour\n shortest_distance = new_distance\n worse_neighbors = 0\n\n else: worse_neighbors += 1\n\n return shortest_tour, shortest_distance",
"def _reduce_distances(self, threshold):\n reduced = self.orig_dists.copy()\n reduced[reduced <= threshold] = 0\n # Remove ignored from all consideration\n ignrd_indices = [self.index[name] for name in self.ignored]\n if ignrd_indices:\n reduced[:,ignrd_indices] = np.inf\n reduced[ignrd_indices,:] = np.inf\n # Check if the given parameters are feasible\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n ca_indices = chsn_indices | avail_indices\n unassigned_indices = np.array(list(self._not_ignored_inds - ca_indices))\n if len(unassigned_indices) == 0:\n unassigned_orphans = unassigned_indices\n else:\n ca_indices = list(ca_indices)\n avail_in_range = np.count_nonzero(reduced[np.ix_(unassigned_indices,ca_indices)] == 0, axis=1)\n unassigned_orphans = unassigned_indices[avail_in_range == 0]\n return reduced, unassigned_orphans",
"def remove_backedges(self):\n\n # Add virtual super node\n super_node = 1000000\n self.add_node(super_node)\n\n # connect super node to all nodes that don't have a provider through c2p\n for np_node in self.no_provider_nodes:\n self.add_edge(super_node, np_node, relationship=-1, as1=super_node, as2=np_node)\n\n qnode = namedtuple('Node', 'asn path')\n\n q = list()\n q.append(qnode(super_node, list()))\n\n visited = list()\n\n num_deleted_edges = 0\n j = 0\n while q:\n node = q.pop()\n\n # debug output\n if node.asn in self.no_provider_nodes:\n j += 1\n print str(j) + \"/\" + str(len(self.no_provider_nodes))\n\n # update list of visited nodes and copy it\n if node.path:\n path = list(node.path)\n else:\n path = list()\n\n path.append(node.asn)\n visited.append(node.asn)\n\n # first we mark backedges and after checking all neighbors, we remove them\n edges_to_remove = []\n\n for neighbor in nx.all_neighbors(self, node.asn):\n edge_data = self.get_edge_data(node.asn, neighbor)\n # node is a customer of neighbor - only follow provider to customer links\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == node.asn:\n # if we see a backedge, mark it and continue search\n if neighbor in path:\n edges_to_remove.append((node.asn, neighbor))\n # if we haven't looked at this node yet, we add it to the list of nodes\n elif neighbor not in visited:\n q.append(qnode(neighbor, list(path)))\n\n # remove the marked edges\n for edge in edges_to_remove:\n num_deleted_edges += 1\n self.remove_edge(edge[0], edge[1])\n\n # Remove virtual node and all edges\n for np_node in self.no_provider_nodes:\n self.remove_edge(super_node, np_node)\n self.remove_node(super_node)\n\n print \"Removed \" + str(num_deleted_edges) + \" backedges\"",
"def floor_remove(points, set_max_window_size = 20, set_slope = 1.0, set_initial_distance = 0.5, set_max_distance = 3.0):\n PointCloud = points\n ind = pclpy.pcl.vectors.Int()\n pmf = pclpy.pcl.segmentation.ApproximateProgressiveMorphologicalFilter.PointXYZ()\n pmf.setInputCloud(PointCloud)\n pmf.setMaxWindowSize(set_max_window_size)\n pmf.setSlope(set_slope)\n pmf.setInitialDistance(set_initial_distance)\n pmf.setMaxDistance(set_max_distance)\n pmf.extract(ind)\n ext = pclpy.pcl.filters.ExtractIndices.PointXYZ()\n ground = pclpy.pcl.PointCloud.PointXYZ()\n non_ground_points = pclpy.pcl.PointCloud.PointXYZ()\n ext.setInputCloud(PointCloud)\n ext.setIndices(ind)\n ext.filter(ground)\n ext.setNegative(True)\n ext.filter(non_ground_points)\n\n return non_ground_points.xyz, ground.xyz",
"def _remove_connection(user, friend):\n # stat\n removed_count = 0\n updated_count = 0\n dep_count = 0\n # global flags\n updating = False\n updating_rev = False\n # conn's without shortest paths\n deps_lost = []\n deps_rev_lost = []\n # current\n current = Degree.objects.filter(from_user=user, to_user=friend)\n current_rev = Degree.objects.filter(to_user=user, from_user=friend)\n current.delete()\n current_rev.delete()\n\n\n # find all dependants\n # path traversing\n #deps = Degree.objects.extra(where=[\"path like '%%\"+str(user.id)+\"%%\"+str(friend.id)+\"%%'\"]).order_by('distance')\n #deps_rev = Degree.objects.extra(where=[\"path like '%%\"+str(friend.id)+\"%%\"+str(user.id)+\"%%'\"]).order_by('distance')\n deps = Degree.objects.filter(path__iregex=r'(,|\\A)%s,%s(,|\\Z)' % (user.id, friend.id))\n deps_rev = Degree.objects.filter(path__iregex=r'(,|\\A)%s,%s(,|\\Z)' % (friend.id, user.id))\n\n if deps.count():\n updating = True\n dep_count += deps.count()\n if deps_rev.count():\n updating_rev = True\n dep_count += deps_rev.count()\n\n \"\"\"\n logic here:\n ask all nighbours if they have path to old node\n if no, wait until loop is finished\n if someone made new connection\n look again\n \"\"\"\n deps = list(deps)\n deps_lost = list(deps)\n i=1\n while updating:\n i+=1\n # flag for global update\n # we need to run all dependants at least once\n y=0\n updating = False\n for dep in deps:\n #deps.remove(dep)\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n # hooray! shortest pass\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n # check length (and current path)\n #if '%s,%s' % (user.id, friend.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), dep.path):\n # if we have wrong path\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n deps_lost.remove(dep)\n updating = True\n else:\n # path already updated from neighbour\n # we need to check current length\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updating = True\n except Degree.DoesNotExist:\n continue\n except:\n logger = logging.getLogger(__name__)\n logger.warning('Error in updating, id of degree = %s' % dep.id)\n raise\n # if no shortest found\n # append to lost list\n #if not updated:\n #deps_lost.append(dep)\n\n deps_rev = list(deps_rev)\n deps_rev_lost = list(deps_rev)\n i=1\n while updating_rev:\n i+=1\n y=0\n updating_rev = False\n for dep in deps_rev:\n y+=1\n #deps_rev.remove(dep)\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n #if '%s,%s' % (friend.id, user.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), dep.path):\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n deps_rev_lost.remove(dep)\n else:\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n except Degree.DoesNotExist:\n continue\n except:\n logger = logging.getLogger(__name__)\n logger.warning('Error in updating rev, id of degree = %s' % dep.id)\n raise\n\n\n #if not deps_lost and not deps_rev_lost:\n\n # remove all connections\n # we can't do that\n # since although we have losters\n # there still could be connection\n # exmple: triangle connection\n #removed_count += len(deps_lost)\n #for conn in deps_lost:\n #conn.delete()\n #deps_lost.remove(conn)\n #else:\n # restore original\n restored = 0\n neighs = Degree.objects.filter(from_user=user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=friend)\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n obj, created = Degree.objects.get_or_create(from_user=user, to_user=friend)\n if created:\n obj.path=\"%s,%s\" % (user.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n restored +=1\n else:\n if shortest.distance + 1 < obj.distance:\n obj.path=\"%s,%s\" % (user.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n except Degree.DoesNotExist:\n continue\n\n # reverse\n\n neighs = Degree.objects.filter(from_user=friend, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n obj, created = Degree.objects.get_or_create(from_user=friend, to_user=user)\n if created:\n obj.path=\"%s,%s\" % (friend.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n restored +=1\n else:\n if shortest.distance + 1 < obj.distance:\n obj.path=\"%s,%s\" % (friend.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n\n except Degree.DoesNotExist:\n continue\n\n # if we still have someone left\n # check last time through neigh\n if deps_lost:\n for dep in deps_lost:\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n # hooray! shortest pass\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n # check length (and current path)\n #if '%s,%s' % (user.id, friend.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), dep.path):\n # if we have wrong path\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n # FIXME\n # THis will not remove all elements\n # maybe we can use\n # for item in mylist[:]:\n # but needs to be tested\n deps_lost.remove(dep)\n updating = True\n else:\n # path already updated from neighbour\n # we need to check current length\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updating = True\n except Degree.DoesNotExist:\n continue\n\n if deps_rev_lost:\n for dep in deps_rev_lost:\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n #if '%s,%s' % (friend.id, user.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), dep.path):\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n # FIXME\n # look above\n deps_rev_lost.remove(dep)\n else:\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n except Degree.DoesNotExist:\n continue\n\n # if we still have losters,\n # delete them\n\n if deps_lost:\n removed_count += len(deps_lost)\n for conn in deps_lost:\n conn.delete()\n #deps_lost.remove(conn)\n\n if deps_rev_lost:\n removed_count += len(deps_rev_lost)\n for conn in deps_rev_lost:\n\n conn.delete()\n #deps_rev_lost.remove(conn)\n\n logger = logging.getLogger(__name__)\n logger.warning('We removed: %s records, updated: %s, restored %s, dependants: %s/2 ' % (removed_count, updated_count, restored, dep_count))\n\n return True",
"def _remove_points(self, points_to_remove, teams_population):\n for team in teams_population:\n for point in points_to_remove:\n if point.point_id_ in team.results_per_points_:\n team.results_per_points_.pop(point.point_id_)",
"def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def remove_previous_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_out in self.predecessors[nd.name]:\n if nd_out.name in self.successors:\n self.successors[nd_out.name].remove(nd)\n self.edges.remove((nd_out, nd))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)",
"def exchange_2_cust(self, sol_in4, cust, c_loc, curr_temp, sol_type4, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in4[c_loc[0]])\r\n route_new_1 = route_ing\r\n route_new_2 = route_ing\r\n cust_folw = route_ing[c_loc[1] + 1]\r\n exch_to_route = c_loc[0]\r\n origin_cost1 = check_violation(route_ing, sol_type4[c_loc[0]])[1]\r\n # route_ing.remove(cust) # move c in the current route\r\n # adjust_cost1 = check_violation(route_ing)[1]\r\n best_cut_cost0 = -1000\r\n best_cut_cost = best_cut_cost0 # best cost cut of moving this customer\r\n for j, rou in enumerate(sol_in4):\r\n origin_cost2 = check_violation(rou, sol_type4[j])[1]\r\n if j != c_loc[0] and len(rou) >= 4: # exchange to a different route\r\n for k in range(1, len(rou) - 2):\r\n rou_test_1 = copy.deepcopy(sol_in4[c_loc[0]])\r\n rou_test_2 = copy.deepcopy(rou)\r\n rou_test_1[c_loc[1]], rou_test_1[c_loc[1] + 1] = rou[k], rou[k + 1]\r\n rou_test_2[k], rou_test_2[k + 1] = cust, cust_folw\r\n if check_violation(rou_test_1, 5)[0] and check_violation(rou_test_2, 5)[0]:\r\n new_type1 = route_type(rou_test_1)\r\n new_type2 = route_type(rou_test_2)\r\n adjust_cost1 = check_violation(rou_test_1, new_type1)[1]\r\n adjust_cost2 = check_violation(rou_test_2, new_type2)[1]\r\n cost_cut_test = origin_cost1 + origin_cost2 - adjust_cost1 - adjust_cost2\r\n if cost_cut_test > best_cut_cost:\r\n best_cut_cost = cost_cut_test\r\n route_new_1 = rou_test_1\r\n route_new_2 = rou_test_2\r\n exch_to_route = j\r\n\r\n\r\n\r\n if best_cut_cost > 1e-5:\r\n # print('exchange2 good', best_cut_cost)\r\n sol_in4[c_loc[0]] = route_new_1\r\n sol_in4[exch_to_route] = route_new_2\r\n sol_type4[c_loc[0]] = route_type(route_new_1)\r\n sol_type4[exch_to_route] = route_type(route_new_2)\r\n\r\n elif sa_lns and best_cut_cost < -1e-5:\r\n prb = random.uniform(0, 1)\r\n if np.exp(best_cut_cost / curr_temp) > prb:\r\n # print('exchange2', best_cut_cost)\r\n sol_in4[c_loc[0]] = route_new_1\r\n sol_in4[exch_to_route] = route_new_2\r\n sol_type4[c_loc[0]] = route_type(route_new_1)\r\n sol_type4[exch_to_route] = route_type(route_new_2)\r\n\r\n # return sol_in4\r",
"def zoned_clean(self, x1_coord: int, y1_coord: int,\n x2_coord: int, y2_coord: int, iterations: int):\n return self.send(\"app_zoned_clean\",\n [x1_coord, y1_coord, x2_coord, y2_coord, iterations])"
] | [
"0.58851576",
"0.5448354",
"0.5397266",
"0.53144395",
"0.5311116",
"0.5295682",
"0.5100967",
"0.5089005",
"0.50736344",
"0.5059185",
"0.50466216",
"0.5021134",
"0.49826962",
"0.49800837",
"0.49682873",
"0.49234816",
"0.49223632",
"0.49205166",
"0.48926115",
"0.48870838",
"0.48719734",
"0.48697945",
"0.48616716",
"0.48501122",
"0.4822798",
"0.4802596",
"0.48021707",
"0.47957414",
"0.47685134",
"0.4753412"
] | 0.653128 | 0 |
Tidies a string `time` into a `date` in `datetime64[D]` format, and records the status of the conversion (`date_status`). | def tidy_time_string(time):
# TODO - :return date_range: Where date_status is "centred", date_range is a tuple (`first_date`, `last_date`) of
# `datetime64[D]` objects. Otherwise will return a tuple of Not a Time objects.
# TODO - warnings/logging
# TODO - change date offsets to rounding using MonthEnd/MonthBegin
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
# TODO - allow mulitple `date_status`es (circa and centred).
date_status = 'not_converted'
date = pd.NaT
original_time_string = str(time)
# IS THE STRING ALREADY PARSABLE AS AN EXACT TIME:
if '-' not in time: # to avoid accidentally parsing ranges as exact times. e.g. "25-27 june".
try:
date = pd.to_datetime(time)
date_status = 'exact'
return date, date_status
except:
pass
# IS THE STRING "CIRCA" SOMETHING:
if (('c' in time) or (('[' in time) or (']' in time))):
if 'c' in time: # contains 'c' (not in a month, e.g. Dec), so " c. ", "c ", t
time = re.sub(r'(?<!\w)(c[.]?\s?)', '', time)
if ('[' in time) and (']' in time): # contains square brackets
# We don't attempt to fix multiple pairs of brackets with one missing bracket
num_sq_brackets = time.count(']') + time.count(']')
if num_sq_brackets >= 3 and (num_sq_brackets % 2) != 0:
logging.info("Cannot fix multiple pairs of brackets with one missing bracket.")
return date, date_status
reg2 = re.findall(r'\[(.*?)\]', time)
if reg2 is not None:
# remove square brackets
for in_brackets in reg2:
time = time.replace(f"[{in_brackets}]", in_brackets)
elif '[' in time:
time = time.replace('[', '')
elif ']' in time:
time = time.replace(']', '')
time = time.strip()
try:
date = pd.to_datetime(time)
date_status = 'circa'
return date, date_status
except:
pass
# IS THE STRING A RANGE OF DATES? WHICH WE CAN AVERAGE OR CENTRE:
# We are assuming an '[1,2]\d{2}0)s' pattern (e.g. 1970s, 1980s, 1730s, 1900s) implies a decade.
if ('s' in time) or ('-') in time:
if ('s' in time) and ('-' not in time):
reg3 = re.findall(r'([1,2]\d{2}0)s', time)
for reg in reg3:
time = time.replace(f"{reg}s", str(int(reg) + 5)) # centre is 5 years later
date = pd.to_datetime(time, format='%Y')
date_status = 'centred'
elif ('-' in time):
if time.count('-') > 1:
print('many hyphens', original_time_string)
# Not attempting to deal with multiple hyphens at the moment.
pass
else:
time = re.sub(r'\s?-\s?', '-', time)
reg4 = re.match(r'(.*?)-(.*)$', time)
first = time.replace(reg4.group(0), reg4.group(1))
last = time.replace(reg4.group(0), reg4.group(2))
if 's' in first:
reg5 = re.findall(r'([1,2]\d{2}0)s', time)
for reg in reg5:
first = first.replace(f"{reg}s", reg)
if not re.search(r'[1,2]\d{3}', first): # no year:
if not re.search(r'\d+', first): # no days in `first` => varying month:
# Take the year from last and add it on
reg5 = re.findall(r'[1,2]\d{3}', last)
first = f"{first} {reg5[0]}"
else: # days in `first` => varying days:
# Take the month and year from last and add it on.
reg6 = re.findall(r'\w+ [1,2]\d{3}', last)
if len(reg6) > 0:
first = f"{first} {reg6[0]}"
if 's' in last:
reg7 = re.findall(r'([1,2]\d{2}0)s', time)
for reg in reg7:
last = last.replace(f"{reg}s", str(int(reg) + 10)) # end is 10 years later.
if re.match(r'\w+\s\d+', last): # assuming month and year
time_delta = pd.tseries.offsets.DateOffset(months=1)
elif re.match(r'[a-zA-Z]', last): # assuming it's a month
time_delta = pd.tseries.offsets.DateOffset(months=1)
elif re.match(r'[1,2]\d{3}', last): # assuming it's a year
time_delta = pd.tseries.offsets.DateOffset(months=12)
elif re.match(r'\d+', last).span()[1] - re.match(r'\d+', last).span()[0] <= 2: # assuming it's a day:
time_delta = pd.tseries.offsets.DateOffset(months=0)
else:
logging.info(f"Can't guess format of {last} from {original_time_string}")
return date, date_status
try:
last = pd.to_datetime(last)
except:
logging.info(f"Could not parse `last` ({last}) into `datetime` format.")
return date, date_status
last = last + time_delta
try:
first = pd.to_datetime(first)
except:
logging.info(f"Could not parse `first` ({first}) into `datetime` format.")
return date, date_status
centre_date = first + (last - first) / 2
date_status = 'centred'
return centre_date, date_status
return date, date_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_convert(time):\n try:\n time_data = str(time)\n if time_data:\n try:\n time_data = datetime.strptime(time_data, '%Y%m%d')\n except Exception:\n time_data = datetime.strptime(time_data, '%Y%m%d%H%M%S')\n time_data = time_data.strftime('%Y-%m-%d')\n return time_data\n except Exception:\n return False",
"def convert_time(cls, time_str):\n if cls.date_ignore_pattern:\n time_str = re.sub(cls.date_ignore_pattern, '', time_str)\n return datetime.strptime(time_str, cls.date_format)",
"def convert_datetime(date, time):\n return datetime.datetime.strptime(date + \" \" + time, '%Y-%m-%d %H:%M:%S')",
"def convert_string_to_datetime(time_string):\n int_string = [int(i) for i in time_string.split('-')]\n return datetime.date(int_string[0], int_string[1], int_string[2])",
"def decode_datetime(self, string):\n if isinstance(string, str):\n if 'T' in string:\n return datetime.strptime(string, \"%Y%m%dT%H%M%S\")\n else:\n return datetime.strptime(string, \"%Y%m%d\")\n else:\n return string",
"def time_string2dt(time_string: str)-> datetime:\n return parse(time_string, fuzzy=True)",
"def datetime_from_string(time):\n try:\n if type(time) == datetime.datetime:\n return time\n else:\n try:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')\n except ValueError:\n return time\n except TypeError:\n return time",
"def clean_date(raw_time):\n time_stamp = raw_time.split(\" \")\n time_stamp = str(time_stamp[1]+' '+time_stamp[2]+' '+time_stamp[3]+' '+time_stamp[5])\n clean_date_time = parser.parse(time_stamp)\n return clean_date_time",
"def parse_date_time(date, time):\n dt = '%s %s' % (date, time)\n return datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')",
"def str2date(s):\n return datetime.strptime(s.decode(), \"%Y-%m-%d %H:%M:%S\")",
"def parse_twitter_datetime(timestr):\r\n return pd.datetime.strptime(timestr, \"%a %b %d %H:%M:%S %z %Y\")",
"def parse_time(time: Union[str, datetime]) -> datetime:\n if isinstance(time, str):\n try:\n from ciso8601 import parse_datetime # pylint: disable=wrong-import-position # noqa: F401\n return parse_datetime(time)\n except (ImportError, ValueError): # pragma: no cover\n return dateutil.parser.parse(time)\n\n return time",
"def convert_datetime(t):\r\n try:\r\n if isinstance(t, str):\r\n converted = datetime.strptime(t, '%d-%m-%Y')\r\n # to get time in seconds:\r\n t = int(time.mktime(converted.timetuple()))\r\n return t\r\n except Exception as e:\r\n print(e)\r\n return None",
"def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))",
"def main():\n date_time_conversion('2018-12-30T09:37:56.000001Z', '2020-07-12T07:56:43.000001Z', 0, 0, 0, 0)",
"def extract_datetime(\n datetime_str: Text, type_: Union[datetime.datetime, datetime.date]\n):\n if type_ == datetime.datetime:\n return parse_datetime(datetime_str)\n elif type_ == datetime.date:\n return parse_date(datetime_str)\n elif type_ == datetime.time:\n return parse_time(datetime_str)",
"def str2date(string):\n if string is 'None':\n return None\n return datetime.strptime(string + '000', '%Y-%m-%dT%H:%M:%S.%f')",
"def __handleDateAttribute(self, timeString):\n try:\n if len(str(timeString)) == 13:\n return datetime.datetime.fromtimestamp(timeString / 1000)\n else:\n return datetime.datetime.fromtimestamp(timeString)\n except ValueError:\n return None\n except TypeError:\n return None",
"def to_date(str):\n \n try:\n return datetime.datetime.strptime(str,\"%Y/%m/%d %H:%M\")\n except ValueError:\n return datetime.datetime.strptime(str,\"%Y/%m/%d\")",
"def parse_time(time_string, time_format='', **kwargs):\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n ii = [ss.astype(datetime) for ss in time_string]\n # Validate (in an agnostic way) that we are getting a datetime rather than a date\n return np.array([datetime(*(dt.timetuple()[:6])) for dt in ii])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n # Following a comment by the Lead Developer, the Try / except clause\n # is replaced. The Lead Developer thinks that this the try/except\n # clause is related to SunPy's database module.\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n except Exception:\n pass\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))",
"def _get_date(str_time, time_formats = [\"%Y-%m-%d %H:%M:%S.%f\", \"%Y-%m-%d %H:%M:%S\"]):\r\n time = None\r\n for time_format in time_formats:\r\n try:\r\n time = datetime.strptime(str_time, time_format)\r\n if time:\r\n break\r\n except:\r\n pass\r\n return time",
"def time_convert(timestr):\n \n try:\n # Analyse given time str to seperate elements.\n struct_time = time.strptime(timestr[:-4], \"%a, %d %b %Y %H:%M:%S\")\n # Convert given time by secend unit.\n t = time.mktime(struct_time) \n # Re-construct time to isotime format.\n isot = time.strftime(\"%Y-%m-%d\", time.gmtime(t))\n return isot\n \n except:\n return ''",
"def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"",
"def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]",
"def datetime_fromstr(timestr: str) -> GPSTime:\n\n return datetime.strptime(timestr, \"%Y-%m-%d\")",
"def timeConvert(time):\n\n FMTin = '%Y-%m-%d %H:%M:%S'\n FMTout = '%m/%d/%y'\n\n return datetime.strftime(datetime.strptime(time, FMTin), FMTout)",
"def tidy_time_df(df, time_col, new_tidy_col='date_tidy', new_status_col='date_status'):\n date_tidy_series = pd.Series(index=df.index, dtype='datetime64[D]')\n date_status_series = pd.Series(index=df.index, dtype='object')\n\n for ref_no, o_time in df[time_col].iteritems():\n time = str(o_time)\n # TODO: Add nd, n.d., n.d, no date to date_cleaner and remove from create_new_catalogue.\n date, date_status = tidy_time_string(time)\n if date_status == 'not_converted' and 'nd' not in o_time:\n print(f\"COULD NOT CONVERT {ref_no} with {o_time}, recording `date` {date} `date_status` {date_status}.\")\n\n date_tidy_series.loc[ref_no] = date\n date_status_series.loc[ref_no] = date_status\n\n df[new_tidy_col] = date_tidy_series\n df[new_status_col] = date_status_series\n\n return df",
"def _parse_date(date_str: str) -> datetime:\n datetime_obj = datetime.strptime(date_str, \"%Y-%m-%dT%H:%M:%SZ\")\n return f\"<t:{int(datetime_obj.timestamp())}:d>\"",
"def filter_to_date(date_time_val):\n if not isinstance(date_time_val, (datetime, date, time)):\n return date_time_val\n return date_time_val.date()",
"def convert_to_datetime(logs):\n extract_datetime = logs.split()[1]\n match_dt_string = '%Y-%m-%dT%H:%M:%S'\n final_datetime = datetime.strptime(extract_datetime, match_dt_string)\n return final_datetime"
] | [
"0.67671704",
"0.6561771",
"0.6454839",
"0.6385109",
"0.6359751",
"0.6207002",
"0.61763185",
"0.61540025",
"0.6093099",
"0.6084911",
"0.6053707",
"0.5992525",
"0.5986227",
"0.5974825",
"0.5956986",
"0.59523886",
"0.5942912",
"0.5880063",
"0.58654827",
"0.5819688",
"0.5815174",
"0.5804132",
"0.5776472",
"0.5775425",
"0.5772104",
"0.57670325",
"0.5766974",
"0.5766864",
"0.5761882",
"0.57484823"
] | 0.65892553 | 1 |
Test that noun_chunks raises Value Error for 'fr' language if Doc is not parsed. | def test_noun_chunks_is_parsed_fr(fr_tokenizer):
doc = fr_tokenizer("trouver des travaux antérieurs")
with pytest.raises(ValueError):
list(doc.noun_chunks) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_noun_chunks_is_parsed(fi_tokenizer):\n doc = fi_tokenizer(\"Tämä on testi\")\n with pytest.raises(ValueError):\n list(doc.noun_chunks)",
"def test_issue401(EN, text, i):\n tokens = EN(text)\n assert tokens[i].lemma_ != \"'\"",
"def test_issue3625():\n nlp = Hindi()\n doc = nlp(u\"hi. how हुए. होटल, होटल\")\n assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']",
"def test_extract_incorrect_embeddings():\n with pytest.raises(ValueError):\n model = BERTopic(language=\"Unknown language\")\n model._extract_embeddings([\"Some document\"])",
"def test_parse_simple_nonmember(self):\n lexed = [\n Token(\n value=\"qet\",\n token_type=KT.UNKNOWN,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=KT.NOUN,\n line_number=0,\n ),\n ]\n self.assertFalse(parse(SimpleKlingonGrammar, lexed))",
"def test_lang_is_missing(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n lang = json_data['lang']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']\n assert lang == 'en'",
"def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)",
"def test_lang_subset_unlikely_language(en_multilingual):\n sentences = [\"你好\" * 200]\n docs = [Document([], text=text) for text in sentences]\n en_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"en\"]\n\n processor = en_multilingual.processors['langid']\n model = processor._model\n text_tensor = processor._text_to_tensor(sentences)\n en_idx = model.tag_to_idx['en']\n predictions = model(text_tensor)\n assert predictions[0, en_idx] < 0, \"If this test fails, then regardless of how unlikely it was, the model is predicting the input string is possibly English. Update the test by picking a different combination of languages & input\"",
"def test_text_cleaning(basic_multilingual, clean_multilingual):\n docs = [\"Bonjour le monde! #thisisfrench #ilovefrance\",\n \"Bonjour le monde! https://t.co/U0Zjp3tusD\"]\n docs = [Document([], text=text) for text in docs]\n \n basic_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"it\", \"it\"]\n \n assert clean_multilingual.processors[\"langid\"]._clean_text\n clean_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"fr\", \"fr\"]",
"def test_no_ngrams():\n tokenizer = Tokenizer(quadgram_freq=2)\n X = tokenizer.transform([[\"a b c d\"]])\n assert X[\"corpus\"][0] == [\"a\", \"b\", \"c\", \"d\"]\n assert tokenizer.quadgrams is None",
"def test_invalid_tokens(self):\n self.assertTrue(1 + 1)",
"def _get_vals(self, doc: Doc) -> Iterator[Span]:\n\n for ngram in doc._.ngrams:\n if ngram.text.isalpha() and OOV_PHONEMES not in ngram._.phonemes:\n yield ngram",
"def test_lang_is_not_supported(app):\n rv = app.test_client().post('/tokenize', json={\n 'text':'这是中文'})\n json_data = rv.get_json()\n msg = json_data['message']\n assert msg == 'Language not supported'",
"def test_no_brackets_in_words():\n raise SkipTest\n assert_raises(ParseError, grammar['word'].parse, ']')",
"def test_ngrams_valence_processing():\n language = \"ita\"\n letter = \"z\"\n\n valence_data = load_valence_data(language)\n\n temp_directory = \"{}/googlebooksdata\".format(PACKAGE_LOCATION)\n os.makedirs(temp_directory, exist_ok=True)\n\n ngrams_fpath = download_nrgams_file(temp_directory, language, letter)\n\n ngrams_valence_scores = merge_ngrams_and_ANEW_data(valence_data, ngrams_fpath)\n\n zucchero_data = ngrams_valence_scores[\n (ngrams_valence_scores['ngram'] == \"zucchero\") & (ngrams_valence_scores['year'] == 2009)]\n\n os.remove(ngrams_fpath)\n\n assert float(zucchero_data['valence']) == 6.55",
"def test_issue859(en_tokenizer, text):\n doc = en_tokenizer(text)\n assert doc.text == text",
"def test_unparse_invalid_examples(self):\n for description, example in INVALID_EXAMPLES.items():\n for mode in MODES:\n if example['trees'][mode] is None:\n continue\n with self.assertRaises(SyntaxError, msg=(description, mode)) as raised:\n typed_astunparse.unparse(example['trees'][mode])\n self.assertIn('PEP 526', str(raised.exception), msg=(description, mode))\n\n with self.assertRaises(SyntaxError, msg=(description, mode)):\n typed_ast.ast3.parse(source=example['code'], mode=mode)",
"def test_text_found_in_single_slide(collected_seg_motif):\n slide_not_found, error_records_future, error_records_seg, skip_test = collected_seg_motif\n if skip_test:\n pytest.skip(\"File does not have nti_data\")\n #print(\"error_records_future : \", error_records_future)\n print(\"error_records_seg : \", error_records_seg)\n if len(error_records_seg) > 0:\n print(\"error count : \", len(error_records_seg))\n print(\"error slides : \", (error_records_seg))\n pytest.fail(\n \"errors found. Count of missing rem slides in segments %s , count of remediation slides in nore than 1 slide %s\"\n % (slide_not_found, error_records_seg))",
"def test_word_positions_in_file(self):\n pass",
"def test_in_word(self):\n with self.assertRaises(ValueError):\n term, rmd = util.parse_date(\"notasearch1902foradatepartial\")",
"def test_no_key_words(self):\n for msg_test in MSG_TEST_NO_RESULT:\n result = self.parser.msg_analysis(msg_test)\n assert len(result) == 0",
"def test_langid(basic_multilingual):\n english_text = \"This is an English sentence.\"\n french_text = \"C'est une phrase française.\"\n docs = [english_text, french_text]\n\n docs = [Document([], text=text) for text in docs]\n basic_multilingual(docs)\n predictions = [doc.lang for doc in docs]\n assert predictions == [\"en\", \"fr\"]",
"def _check_pofiles_content(self):\n\n # The list of invalid chars is specific to Catalan language\n invalid_chars = {u'á', u'ñ', u'ë', u'ù', u'â', u'ê', u'î', u'ô', u'û',\n u'ë', u'ÿ', u'ä', u'ö'}\n\n try:\n\n THRESHOLD_PERCENTAGE = 1\n findFiles = FindFiles()\n for filename in findFiles.find(self.temp_dir, \"*.po\"):\n poFile = pofile(filename)\n\n invalid = 0\n for entry in poFile:\n # Only localized segments. Skips developers names,\n # untranslated country names, etc\n if entry.msgid == entry.msgstr:\n continue\n\n for char in entry.msgstr.lower():\n if char in invalid_chars:\n invalid = invalid + 1\n\n if len(poFile) > 100 and invalid > 0:\n percentage = 100.0 * invalid / len(poFile)\n if percentage > THRESHOLD_PERCENTAGE:\n self.errors = self.errors + 1\n print \"Unsual number of invalid chars at {0} ({1}%)\".\\\n format(filename, str(percentage))\n\n except Exception as detail:\n print detail",
"def test_parse_empty_is_never_part_of_grammar(self):\n self.assertFalse(parse(SimpleKlingonGrammar, []))",
"def test_spelling(self) -> None:\n misstakes: Dict[Word, List[str]] = self.report.spellcheck(\n self.rules.spelling_skip_wordclasses\n )\n for word, corrections in misstakes.items():\n if word.text.lower() in self.rules.forbidden_words:\n continue\n if word.text.lower() in [\n ab[\"word\"] for ab in self.rules.police_abbreviations\n ]:\n continue\n error_text: str = f\"Ordet {word.text} är felstavat.\"\n if corrections:\n error_text += \" Rättningsförslag: \" + \", \".join(corrections) + \".\"\n self.add_error(error_text, word=word)",
"def test_xfailed_not_mentioned_exception():\n assert False",
"def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )",
"def test_corpus_is_not_present():\n with pytest.raises(ValueError, match=\".*not contain a text corpus.*\"):\n TextCleaner().transform(X_bin)",
"def test_unaffected(self, entries, errors, __):\n # Note that this checks that the plugin did not insert any Open\n # directive by itself where not required. This is correct behaviour.\n self.assertEqualEntries(self.test_unaffected.__input__, entries)",
"def test_get_lyrics_invalid_format(bot):\n assert get_lyrics('asdf', 1) == 'Invalid format!'"
] | [
"0.81967974",
"0.5773204",
"0.57079947",
"0.55603945",
"0.55467683",
"0.5531417",
"0.5456726",
"0.53696465",
"0.53655165",
"0.53548074",
"0.53134775",
"0.52308595",
"0.5148851",
"0.514678",
"0.5137788",
"0.5127326",
"0.5120156",
"0.51103795",
"0.50573343",
"0.5028861",
"0.5026243",
"0.50243574",
"0.5015622",
"0.50150603",
"0.4985942",
"0.49628356",
"0.49623683",
"0.4954984",
"0.4950811",
"0.49409038"
] | 0.846443 | 0 |
Add (multidimensional) samples to buffer. | def push(self, samples):
len_s = len(samples)
if self.idx + len_s < self.len:
self.data[self.idx:self.idx + len_s] = samples
self.idx += len_s
else:
if self.idx == self.len:
self.data[:-len_s] = self.data[len_s:]
else:
self.data[:-len_s] = self.data[len_s -
self.len + self.idx:self.idx]
self.idx = self.len
self.data[-len_s:] = samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addSamples(self, samples):\n try:\n self.buf = np.append(\n self.buf,\n np.fromstring(\n samples,\n dtype=np.float32))\n self.bufcount += 1\n except:\n pass\n if self.bufcount >= self.numBuffers:\n self.bufcount = 0\n rms = np.sqrt(np.mean(np.square(self.buf)))\n self.buf = np.array([], dtype=np.float32)\n return self.addRMS(rms)\n return False",
"def add(self, datasetName, rows):\r\n\r\n rows = np.vstack(rows)\r\n\r\n\t\t# Add the sample into the buffer\r\n try:\r\n self.datasetBuffer[datasetName].extend(rows)\r\n except:\r\n # Initialize the buffer\r\n self.datasetBuffer[datasetName] = []\r\n self.datasetBuffer[datasetName].extend(rows)\r\n\r\n # Create the dataset\r\n self._createDatasets(datasetName)\r\n\r\n # Initiliaze dataset index count\r\n self.idxs[datasetName] = 0\r\n\r\n # Update the number of samples in the buffer\r\n self.totalFeatures += len(rows)\r\n\r\n\t\t# Check to see if we have reached the maximum buffer size\r\n if self.totalFeatures >= self.maxBufferSize:\r\n\r\n\t\t\t# write the buffers to file\r\n self._writeBuffers()",
"def add_sample(self, sample: Tuple):\n self.samples.append(sample)\n if len(self.samples) > self.max_memory:\n self.samples.pop(0)",
"def _add(self, sample):#obs_t, action, reward, obs_tp1, done):\n\n if self._next_idx >= len(self._buffer): #appends data if max capacity not reached yet\n self._buffer.append(sample)\n else:\n self._buffer[self._next_idx] = sample #drops old entry and appends new data if max capacity\n self._next_idx = (self._next_idx + 1) % self._capacity",
"def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t",
"def add_sample(self, time, value):\n\t\tif self.buf_full:\n\t\t\tself.buf.pop(0)\n\t\t\n\t\tself.buf.append((time, value))",
"def add_to_buffer(self, values):\n self._buffer.extend(values)",
"def add(self, sample, **kwargs):\n if not self.samples:\n self.init(sample)\n self.samples.append(sample)",
"def addsample(self):\n if self.nsample >= len(self.samples):\n raise Exception(\"Max number of samples reached\")\n\n self.samples[self.nsample] = self.current()\n self.nsample += 1",
"def add_samples(self, samples):\n samples = [samples] if isinstance(samples, Sample) else samples\n for sample in samples:\n if isinstance(sample, Sample):\n self._samples.append(sample)\n self[SAMPLE_EDIT_FLAG_KEY] = True\n else:\n _LOGGER.warning(\"not a peppy.Sample object, not adding\")",
"def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][3] for b in buffer.memory] # images\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # gears\n d8 = [b[1][2] for b in buffer.memory] # rpms\n d9 = [b[3] for b in buffer.memory] # terminated\n d10 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n self.data[10] += d10\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n self.data.append(d10)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n self.data[10] = self.data[10][to_trim:]\n\n return self",
"def add_data(self, sample, data_id):\n sample = np.array(sample)\n if len(sample.shape) == 1:\n sample = sample.reshape(1, -1)\n self.feat_in_pipe_in.send(sample)",
"def append(self, sample):\n self.samples.append(sample)\n self.total += sample\n while len(self.samples) > self.maxlen:\n self.total -= self.samples.popleft()\n self.mean = float(self.total) / len(self.samples)",
"def _copy_buffer_samples(self, buffer_info, nSamples, arr, copy_all_samples=False, last_read=False):\n\t\tcAvailable = c_int()\n\t\tcLost = c_int()\n\t\tcCorrupted = c_int()\n\t\tcSamples = buffer_info[0]\n\n\t\t# get DigitalIn status because we want to read from buffer\n\t\tself._get_DigitalIn_status(read_data=True)\n\n\t\t# record info about the data collection process (filling of the buffer)\n\t\tdwf.FDwfDigitalInStatusRecord(self.interface_handler, byref(cAvailable), byref(cLost), byref(cCorrupted))\n\n\t\tif copy_all_samples:\n\t\t\tdwf.FDwfDigitalInStatusData(self.interface_handler, byref(arr), c_int(2*4096))\n\t\t\treturn [0, 0, 0]\n\n\t\tcSamples += cLost.value\n\t\tif cSamples + cAvailable.value > nSamples:\n\t\t\tcAvailable = c_int(nSamples - cSamples)\n\n\t\t# copy samples to arr on computer\n\t\tdwf.FDwfDigitalInStatusData(self.interface_handler, byref(arr, 2*cSamples), c_int(2*cAvailable.value))\n\n\t\tcSamples += cAvailable.value\n\n\t\tbuffer_info = [cSamples, buffer_info[1] + cLost.value, buffer_info[2] + cCorrupted.value]\n\t\treturn buffer_info",
"def add_memory(self, **kwarg):\n for name, obs in kwarg.items():\n self.buffers[name] = np.concatenate((self.buffers[name], obs), axis=0)\n # get recent memory\n return self",
"def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][2] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # progress\n d8 = [b[3] for b in buffer.memory] # terminated\n d9 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n\n return self",
"def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][1] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes (terminated or truncated)\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[3] for b in buffer.memory] # terminated\n d8 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n\n return self",
"def addSample(self, time, x, y, z):\n\t\tself.numSamples += 1\n\t\tif self.prevTime != None:\n\t\t\tdt = abs(time - self.prevTime)\n\t\t\tself.timeDifferences[dt] += 1\n\t\t\tif dt > self.highDT:\n\t\t\t\tif getTimeDifference(self.rawData[self.currIdx]) >= self.minSampleTime:\n\t\t\t\t\tself.currIdx += 1\n\t\t\t\t\tself.rawData.append(list())\n\t\t\t\telse:\n\t\t\t\t\tself.rawData[self.currIdx] = list()\n\t\t\t\t\n\t\t\telse: \n\t\t\t\tself.rawData[self.currIdx].append(preProcess.resultantAcceleration(time, x, y, z))\n\n\t\tself.prevTime = time",
"def addData(self,data,x,y):\n self.nSamples+=1\n if self.nSamples == 1:\n self.indata.append(data)\n (self.ny,self.nx)=data.shape\n self.ny=int(data.shape[0])\n self.x=x\n self.y=y\n self.lx=self.x[-1:][0]\n self.ly=self.y[-1:][0]\n\n\n else:\n if data.shape == self.indata[0].shape and x.all() == self.x.all() and y.all() == self.y.all():\n self.indata.append(data)\n else:\n logging.warning('Inconsistent data input!')\n logging.warning(\"Check data shape and X,Y sampling!\")",
"def add_data(self, data: np.ndarray):\n data = np.asarray(data)\n if data.ndim < 2:\n data = np.reshape(data, (-1, 1))\n\n self.create_storage(data)\n\n start = self._count\n finish = start + data.shape[0]\n self._data_store[start:finish, :] = data\n self._count += data.shape[0]",
"def append(self, value):\n value = np.asarray(value)\n\n if self.__buffer is None:\n newsize = (ArrayBuffer.initial_size,) + value.shape # tuples\n self.__buffer = np.zeros(newsize, value.dtype)\n\n assert(value.ndim == self.__buffer.ndim - 1)\n assert(value.shape == self.__buffer.shape[1:])\n\n self.__count += 1\n\n if self.__count >= self.__buffer.shape[0]:\n growth_factor = 2.0\n newsize = list(self.__buffer.shape)\n newsize[0] = int(np.floor(growth_factor * newsize[0] + 2.0))\n self.__buffer = np.resize(self.__buffer, newsize)\n\n self.__buffer[self.__count - 1] = value\n self.__value = self.__buffer[:self.__count]",
"def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])",
"def addData( self, anValue ):\n nNbrData = len(anValue) \n nNbrSample = nNbrData/self.nNbrChannel\n if( self.nNbrChannel*nNbrSample != nNbrData ):\n logging.error( \"You should provide a number of data multiple of you channel number ! (data:%d, nbrChannel: %d)\" % ( nNbrData, self.nNbrChannel ) )\n return False\n\n if( not isinstance( anValue, np.ndarray ) ):\n anValue = np.array( anValue, dtype = self.dataType )\n \n if( len( self.data ) == 0 ):\n self.data = np.copy( anValue ) # so we copy the type !!!\n else:\n self.data = np.concatenate( ( self.data, anValue) )\n self.updateHeaderSizeFromDataLength() \n return True",
"def add_data(self, data):\n num_data = None\n for key, val in data.items():\n if key not in self.data_dict:\n raise Exception(\"ReplayBuffer doesn't have key: {}\".format(key))\n \n # check if we are extending by a batch of data or just a single\n # data point\n if len(val.shape) == len(self.data_dict[key].shape):\n if num_data is None:\n num_data = len(val) \n assert(num_data < self.max_entries)\n else:\n assert(num_data == len(val))\n if (self.start_idx + num_data > self.max_entries):\n split_num_1 = self.max_entries - self.start_idx\n split_num_2 = num_data - split_num_1\n self.data_dict[key][self.start_idx:, ...] = val[:split_num_1, ...]\n self.data_dict[key][:split_num_2, ...] = val[split_num_1:, ...]\n else:\n self.data_dict[key][self.start_idx:self.start_idx+num_data, ...] = val\n else:\n if num_data is None:\n num_data = 1\n else:\n assert(num_data == 1)\n self.data_dict[key][self.start_idx, ...] = val\n\n self.start_idx += num_data\n if (self.start_idx >= self.max_entries):\n self.start_idx -= self.max_entries\n self.num_entries += num_data\n if self.num_entries > self.max_entries:\n self.num_entries = self.max_entries",
"def new_sample(self):\n\n self.u_seq.append([])\n self.r_exp.append(0)",
"def append_to_dataset(dataset, sample):\n old_size = dataset.shape[0] # this function always appends samples on the first axis\n dataset.resize(old_size + 1, axis=0)\n dataset[old_size, ...] = sample\n return old_size",
"def samples(self, samples):\n\n self._samples = samples",
"def _add_buffer(self, p_buffer_element:PyTorchIOElement):\r\n\r\n self._buffer.add_element(p_buffer_element)",
"def sample_batch(self) -> List:\n return self.buffer.sample(self.batch_size)",
"def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch"
] | [
"0.7309586",
"0.68047845",
"0.6730027",
"0.669488",
"0.6684545",
"0.6589268",
"0.64348847",
"0.64089745",
"0.6260624",
"0.62548214",
"0.62530696",
"0.6174509",
"0.6046224",
"0.6001303",
"0.59894323",
"0.5982165",
"0.59481347",
"0.59215266",
"0.5910251",
"0.58930796",
"0.58878094",
"0.58535105",
"0.58478403",
"0.5810935",
"0.58067787",
"0.57962465",
"0.57371926",
"0.573252",
"0.56140447",
"0.5612399"
] | 0.6881006 | 1 |
Pop a number of samples from buffer. | def pop(self, idx=None):
if not idx:
samples = np.copy(self.data[:self.idx])
self.data[:] = np.empty(self.data.shape)
self.idx = 0
else:
if idx > self.idx:
raise ValueError()
samples = np.copy(self.data[:idx])
data = np.copy(self.data[idx:self.idx])
self.data[:] = np.empty(self.data.shape)
self.data[:self.idx - idx] = data
self.idx -= idx
return samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _popN(self, n):\n for _ in range(n):\n self._buffer.popleft()",
"def pop(self):\n while self.number > self.maxlength:\n self.buffer.popleft()\n self.number -= 1",
"def pop_memory(self, **kwarg):\n for name, obs in kwarg.items():\n self.buffers[name] = obs[-self.memory_size:]\n return self",
"def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))",
"def pop(self):\n data = self.buffer.getvalue()\n self.buffer.seek(0)\n self.buffer.truncate()\n return data",
"def pop(self, batch_size: int) -> List[Transition]:\n return random.sample(self.memory, batch_size)",
"def pop(self):\n value = self.buffer[self.end - 1]\n self.buffer[self.end - 1] = None\n self.end = (self.end - 1) % len(self.buffer)\n return value",
"def sample(self, batch_size):\n if len(self._buffer) <= batch_size:\n print(\"There are only %d batches in the experience buffer.\" % len(self._buffer))\n return self._buffer\n idxes = [random.randint(0, len(self._buffer) - 1) for _ in range(batch_size)]\n return [self._buffer[idx] for idx in idxes]",
"def populate_buffer(self, num_transitions):\n while len(self.replay_buffer) < self.buffer_sample_size:\n self.play(num_transitions)",
"def sample(self, batch_size):\n buffer_size = len(self.buffer)\n print(\"**\",buffer_size)\n index = np.random.choice(np.arange(buffer_size), size=batch_size, replace=False)\n return [self.buffer[i] for i in index]",
"def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)",
"def read(self, size: int) -> np.ndarray:\n if size > self._buffer_size:\n raise ValueError('Cannot read more samples than the size of the buffer.')\n elif size <= 0:\n raise ValueError('Size must be positive.')\n\n start_index = self._buffer_size - size\n return np.copy(self._buffer[start_index:])",
"def remove_buffered_packets(self):\n seq = self.next_seq\n while True:\n p = self.buffer.pop(seq, None)\n if p is None:\n break\n else:\n seq += len(p.data)\n yield p",
"def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch",
"def sample(self, size) -> []:\n size = min(len(self.buffer), size)\n return random.sample(self.buffer, size)",
"def pop(self):\r\n return self.buff.pop(-1)",
"def pop(self, n):\n try:\n self._load(False)\n except KeyError:\n return\n\n # Delete the items we no longer need,\n # and most importantly decrease self.count\n key = (self.head - self.count) % self.size\n while n > 0 and self.count > 0:\n del self.db[key]\n key += 1\n if key == self.size:\n key = 0\n n -= 1\n self.count -= 1\n self.db['count'] = self.count",
"def recv_samples(rx_streamer, total_num_samps, skip_samples):\n metadata = uhd.types.RXMetadata()\n result = np.empty((1, total_num_samps), dtype=np.complex64)\n total_samps_recvd = 0\n timeouts = 0 # This is a bit of a hack, until we can pass timeout values to\n # Python\n max_timeouts = 20\n buffer_samps = rx_streamer.get_max_num_samps()\n recv_buffer = np.zeros(\n (1, buffer_samps), dtype=np.complex64)\n while total_samps_recvd < total_num_samps:\n samps_recvd = rx_streamer.recv(recv_buffer, metadata)\n if metadata.error_code == uhd.types.RXMetadataErrorCode.timeout:\n timeouts += 1\n if timeouts >= max_timeouts:\n print(\"[ERROR] Reached timeout threshold. Exiting.\")\n return None\n elif metadata.error_code != uhd.types.RXMetadataErrorCode.none:\n print(\"[ERROR] \" + metadata.strerror())\n return None\n if samps_recvd:\n samps_recvd = min(total_num_samps - total_samps_recvd, samps_recvd)\n result[:, total_samps_recvd:total_samps_recvd + samps_recvd] = \\\n recv_buffer[:, 0:samps_recvd]\n total_samps_recvd += samps_recvd\n if skip_samples:\n print(\"Skipping {} samples.\".format(skip_samples))\n return result[0][skip_samples:]",
"def pull_read(self, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.random.randint(0, self.total)\n return self.reads[index, :]",
"def downsample(self, number):\n for num, ss in enumerate(self.samples):\n self.samples[num], self.extra_kwargs[num] = _downsample(\n ss, number, extra_kwargs=self.extra_kwargs[num]\n )",
"def cut_sample(whole_audio_data, num_samples):\n len_audio_data = len(whole_audio_data)\n if num_samples >= len_audio_data:\n raise Exception(\"Length of to be generated signal cannot be greater and equal to original audio signal\")\n sys.exit(-1)\n\n # generate a random number which is used as a first index to cut off\n ind = random.randint(0, len_audio_data-num_samples)\n gen_data = whole_audio_data[ind:ind+num_samples]\n return gen_data",
"def sample_from_buffer(self, batch_size):\n samples = self.initial_dist.sample((batch_size, ))\n inds = torch.randint(0, len(self.buffer), (batch_size, ), device=self.device)\n samples_from_buffer = self.buffer[inds]\n rand_mask = (torch.rand(batch_size, device=self.device) < self.reuse_freq)\n samples[rand_mask] = samples_from_buffer[rand_mask]\n return samples, inds",
"def sample_from_buffer(self, batch_size):\n samples = self.initial_dist.sample((batch_size, ))\n inds = torch.randint(0, len(self.buffer), (batch_size, ), device=self.device)\n samples_from_buffer = self.buffer[inds]\n rand_mask = (torch.rand(batch_size, device=self.device) < self.reuse_freq)\n samples[rand_mask] = samples_from_buffer[rand_mask]\n return samples, inds",
"def push(self, samples):\n len_s = len(samples)\n if self.idx + len_s < self.len:\n self.data[self.idx:self.idx + len_s] = samples\n self.idx += len_s\n else:\n if self.idx == self.len:\n self.data[:-len_s] = self.data[len_s:]\n else:\n self.data[:-len_s] = self.data[len_s -\n self.len + self.idx:self.idx]\n self.idx = self.len\n self.data[-len_s:] = samples",
"def sample(self, length):\n pass",
"def pop(self, index):\n self.dataBlocks.pop(index)\n self._n_data_blocks -= 1",
"def sample(self, size=1):\n pass",
"def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t",
"def downsample(self, number):\n self.samples, self.extra_kwargs = _downsample(\n self.samples, number, extra_kwargs=self.extra_kwargs\n )",
"def read_buffer(self):\n data = []\n\n # loop until the buffer was completely read out\n while True:\n new = [float(n) for n in self.read(\":FETC:ARR? MAX\").split(\",\") if n]\n data += new\n\n if len(new) < self.batch_size:\n break\n\n return data"
] | [
"0.67280066",
"0.6347082",
"0.6287858",
"0.60025215",
"0.59872806",
"0.5924517",
"0.59227526",
"0.58538824",
"0.5841255",
"0.58254236",
"0.58163065",
"0.58092636",
"0.58041793",
"0.5779683",
"0.5754109",
"0.5721759",
"0.568317",
"0.5670812",
"0.5636273",
"0.5605704",
"0.5604474",
"0.56021166",
"0.56021166",
"0.55888176",
"0.55857325",
"0.55654323",
"0.5559767",
"0.55589145",
"0.55306965",
"0.5484224"
] | 0.70006067 | 0 |
Returns the dictionary of genome fasta | def getseq(genomefasta):
genomedict = {}
for i in SeqIO.parse(open(genomefasta), "fasta"):
genomedict[i.id] = str(i.seq)
return genomedict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_fasta_to_dictionary(genome_file):\n filename = genome_file\n dct = {}\n\n id_name = \"\"\n sequence = \"\"\n first_pass = 1\n\n read_fh = open(filename, 'r')\n for i, line in enumerate(read_fh):\n line = line.rstrip()\n if re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r',', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n\n elif re.search(r'^>(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r'(\\d+)_', \"\", id_name)\n id_name = re.sub(r'.*\\|', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n else:\n sequence += line\n dct[id_name] = sequence\n\n return dct",
"def return_fasta_dic(file):\n seq_dict = {rec.id: rec.seq for rec in SeqIO.parse(file, \"fasta\")}\n return seq_dict",
"def sequenceDict(self):\n\t\twith open(self.ff) as fastaFile:\n\t\t\tsequences = {}\n\t\t\tfor name, seq in self.readFasta(fastaFile):\n\t\t\t\tsequences[name] = seq\n\t\treturn sequences",
"def FASTAfile_to_dict(FASTAfile):\n FASTADict = {}\n for line in FASTAfile:\n if '>' in line:\n FASTALabel = line\n FASTADict[FASTALabel] = \"\"\n else:\n FASTADict[FASTALabel] += line\n return FASTADict",
"def get_fastg_seqs_dict(fastg_name, G):\n fp = open(fastg_name, 'r')\n seqs = {}\n for name,seq,qual in readfq(fp):\n name_parts = re.sub('[:,]',\" \", name[:-1]).split()\n node = name_parts[0]\n seqs[node] = seq\n return seqs",
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def fastaDictionary(inFile, chrName=None):\n\n d = {}\n for (title, seq) in FastaIterator(inFile):\n title = title.split()[0]\n if not chrName:\n d[title] = seq\n elif chrName == title:\n d[title] = seq\n return d\n\n if chrName:\n print \"NOT ABLE TO FIND!\", chrName\n return d",
"def fasta_to_dict(fasta_file):\n deflines = []\n sequences = []\n sequence = \"\"\n with open(fasta_file, \"r\") as file:\n for line in file:\n if line.startswith(\">\"):\n deflines.append(line.rstrip().lstrip('>'))\n if sequence:\n sequences.append(sequence)\n sequence = \"\"\n else:\n sequence += line.rstrip()\n sequences.append(sequence)\n fasta_dict = {}\n for x, defline in enumerate(deflines):\n fasta_dict[defline]=sequences[x]\n return fasta_dict",
"def get_fasta_dict(input_fasta_path):\n\n\ttry:\n\t\tnew_file = open(input_fasta_path, \"rU\")\n\t\tsequence_record_dict = SeqIO.to_dict(SeqIO.parse(new_file, \"fasta\"))\n\t\tnew_file.close()\n\t\treturn sequence_record_dict\n\texcept IOError as e:\n\t\tprint(str(e))\n\t\tsys.exit(1) # Aborts program. (exit(1) indicates that an error occurred)",
"def gather_strand_by_geneID_dict(genome_gtf):\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict",
"def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict",
"def get_ref_seq_dict(ref_seq):\n return SeqIO.to_dict(SeqIO.parse(ref_seq, 'fasta')) if ref_seq else None",
"def GenomeReader(GenomeFile):\n GenomeScaffolds = {}\n key = []\n with open(GenomeFile, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(\">\"):\n NamedSeq = line.replace('>', '')\n key.append(NamedSeq)\n GenomeScaffolds[NamedSeq] = \"\"\n else:\n GenomeScaffolds[NamedSeq] += line\n return GenomeScaffolds # Returns a Dictionary object",
"def make_tRNA_fasta_dict(tRNAdf):\n\n\n\ttRNA_fasta_outdict = OrderedDict()\n\n\tfor i in tRNAdf.index:\n\n\t\tif tRNAdf.loc[i,'feature'] == 'tRNA':\n\t\t\tchrom = tRNAdf.loc[i,'#chrom']\n\t\t\tchrStart = int(tRNAdf.loc[i,'chromStart'])\n\t\t\tchrEnd = int(tRNAdf.loc[i,'chromEnd'])\n\t\t\tstrand = tRNAdf.loc[i,'strand']\n\t\t\t\n\t\t\tif strand == \"+\":\n\t\t\t\tchrStart = chrStart-1 ### gtf files are 1-based, convert to 0-based\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\t\t\t\n\t\t\telse: # for neg strand\n\t\t\t\tchrStart = chrStart-1\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrSeq = trSeq.reverse_complement()\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\n\t\t\ttrID = \"tRNA_\"+trdict['gene_id'][0]\n\t\t\tdesc = \"| tRNA | \"+trdict['gene_type'][0] + \" | %s; %s; %s:%s\" % (chrom, strand, chrStart, chrEnd)\n\n\t\t\ttrSeqRec = SeqRecord(trSeq, id=trID, name=trdict['gene_name'][0], description=desc)\n\t\t\ttRNA_fasta_outdict[trID] = trSeqRec\n\t\n\treturn tRNA_fasta_outdict",
"def load_fasta(filepath):\n chromosomes = {}\n\n seqs = SeqIO.parse(filepath, format='fasta', \n alphabet=IUPAC.ambiguous_dna)\n\n # iterate over seqs and add to chromosome dictionary\n for seq in seqs:\n # determine chromosome number\n match = re.search('\\d+', seq.name)\n chromosome_number = int(match.group())\n\n chromosomes[chromosome_number] = seq\n\n return chromosomes",
"def read_fasta_to_dict(path_to_file):\n if options.verbose:\n syserr(\"Reading sequences from %s \\n\" % (path_to_file))\n try:\n seq_obj = open(path_to_file, 'Ur')\n seqs = {}\n for seq in SeqIO.parse(seq_obj, 'fasta'):\n seqs[str(seq.id)] = str(seq.seq)\n except IOError:\n raise IOError('Cannot read from %s' % (path_to_file))\n\n return seqs",
"def read_fasta_file(path):\n with open(path) as data_file:\n output = {}\n sequence_name = None\n for line in data_file.readlines():\n if line.startswith(\">\"):\n sequence_name = line[1:].strip()\n else:\n output.setdefault(sequence_name, \"\")\n line = \"\".join(re.findall(\"[acgtACGT]+\", line))\n\n output[sequence_name]+=line.upper()\n return output",
"def find_GC_content(fasta_file_name):\n\twith open(fasta_file_name) as fasta:\n\t\tGC_content = {}\n\t\tfor line in fasta:\n\n\t\t\t# Each line (bar the last) ends with '\\n'\n\t\t\tloc_line = line.replace('\\n', '')\n\n\t\t\t# Finds '>' at opening of line (FASTA seq title)\n\t\t\tif re.match(r'^>', loc_line):\n\t\t\t\tGC_content[loc_line] = 0\n\t\t\t\tG_count = 0\n\t\t\t\tC_count = 0\n\t\t\t\tcount = 0\n\t\t\t\tcurrent = loc_line\n\t\t\telse:\n\t\t\t\tG_count += loc_line.count('G')\n\t\t\t\tC_count += loc_line.count('C')\n\t\t\t\tcount += len(loc_line)\n\t\t\t\tGC_content[current] = float((G_count + C_count)) / count\n\treturn GC_content",
"def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}",
"def fasta_reader(path, fasta_file):\n fasta_dict = dict()\n try:\n for seq_record in SeqIO.parse(path + fasta_file, \"fasta\"):\n id_fasta = seq_record.id\n sequence = seq_record.seq\n fasta_dict[id_fasta] = sequence\n except FileNotFoundError:\n GRAPH_LOGGER.debug('External fasta file not exist!')\n return None\n\n return fasta_dict",
"def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences",
"def fetchRefSeqDict(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res[i.name] = i\n return res",
"def get_fasta_dictionary(interaction_dict):\n fasta_dict={}\n for name, structure in interaction_dict.items():\n chains =[]\n sequences = get_residues_sequence(structure)\n if len(sequences) == 2: #there are 2 chains\n A = sequences[0]\n B = sequences [1]\n for model in structure:\n for chain in model:\n chains.append(chain.id)\n name_fastaA = name + \"_\" + chains[0]\n name_fastaB = name + \"_\" + chains[1]\n fasta_dict[name_fastaA] = A\n fasta_dict[name_fastaB] = B\n elif len(sequences) ==1: #there is only 1 chain there\n A = A = sequences[0]\n for model in structure:\n for chain in model:\n chains.append(chain.id)\n name_fastaA = name + \"_\" + chains[0]\n fasta_dict[name_fastaA] = A\n else: #there are no chains\n continue\n return fasta_dict",
"def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic",
"def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary",
"def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence",
"def read_fasta_gff(\r\n fasta: str,\r\n gff: str,\r\n as_dict: bool = False,\r\n circular: bool = False) \\\r\n -> Union[List[Chromosome], Dict[str, Chromosome]]:\r\n chromosomes = []\r\n\r\n feature_dict = read_gff(gff, as_dict=True)\r\n # feature_dict has the data structure:\r\n # {\r\n # seqname: [GffFeature, ...], ...\r\n # }\r\n\r\n with FastaParser(fasta) as parser:\r\n for seqname, sequence in parser:\r\n\r\n gff_features = feature_dict.get(seqname, [])\r\n\r\n features = list(map(gff_to_generic_feature, gff_features))\r\n\r\n features = FeatureArray(\r\n seqname=seqname,\r\n chromosome_size=len(sequence),\r\n features=features,\r\n circular=circular)\r\n\r\n c = Chromosome(\r\n seqname=seqname,\r\n sequence=sequence,\r\n features=features,\r\n circular=False)\r\n\r\n chromosomes.append(c)\r\n\r\n if as_dict:\r\n return {chrom.seqname: chrom for chrom in chromosomes}\r\n\r\n return chromosomes",
"def get_gene_biotype_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.GeneId, df.GeneBiotype)))",
"def parse_fasta(fasta_file):\n\n fasta_dct = {}\n\n with open(fasta_file,'r') as text:\n label = ''\n for line in text:\n if line.startswith('>'):\n if label in fasta_dct.keys():\n fasta_dct[current_line] = str(''.join(fasta_dct[current_line]))\n label = line.strip()[1:]\n fasta_dct[label] = []\n current_line = label\n else:\n fasta_dct[current_line].append(line.strip())\n fasta_dct[current_line] = str(''.join(fasta_dct[current_line]))\n\n return fasta_dct",
"def fastadict(fasta, plaintext=False):\n d = FastaDict.from_text(fasta, plaintext)"
] | [
"0.7508443",
"0.7360985",
"0.71590203",
"0.689614",
"0.6895492",
"0.6875781",
"0.6870282",
"0.6815837",
"0.680902",
"0.67469376",
"0.6740498",
"0.6521526",
"0.64860785",
"0.6435831",
"0.64199185",
"0.64125013",
"0.63991344",
"0.63906515",
"0.6364206",
"0.6346941",
"0.6306387",
"0.629229",
"0.6249013",
"0.62404144",
"0.6205474",
"0.61862946",
"0.61752963",
"0.61751634",
"0.6164989",
"0.6149631"
] | 0.7972919 | 0 |
Program to read a gff and create dictionary of exons from a transcript | def read_gff(gff):
genome = getseq(args.genome)
dictoftranscripts = {}
for k in open(gff):
if not k.startswith("#"):
lines = k.strip().split("\t")
if lines[2] == "exon":
strand = lines[6]
chromosome = lines[0]
start = lines[3]
end = lines[4]
transcriptid = re.search("Parent=transcript:(.*)", lines[8]).group(1)
if transcriptid + "#" + chromosome in dictoftranscripts:
dictoftranscripts[transcriptid + "#" + chromosome].extend([start, end])
else:
dictoftranscripts[transcriptid + "#" + chromosome] = []
dictoftranscripts[transcriptid + "#" + chromosome].extend([start, end])
for key, value in dictoftranscripts.iteritems():
value.sort()
print value
for coord1 in value:
for coord2 in value[1:]:
#print coord1, coord2
if int(coord1) != int(value[-1]) and value.index(coord2) != value.index(coord1)+1 and value.index(coord2) > value.index(coord1):
exon1_start = int(coord1)
exon1_end = int(coord2)
#print exon1_start, exon1_end
#print key.split("#")[1]
#print value.index(coord1), value.index(coord2)
exon_seq = genome.get(key.split("#")[1],"NA")
if exon_seq != "NA":
sequence_exon = exon_seq[exon1_start:exon1_end+1]
#print exon1_start, exon1_end, sequence_exon
for start, end, strand, frame, pro in translate(sequence_exon):
junction =
print start, end, strand, frame, pro | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def read_gtf_file(gtf_file):\n genes = {}\n transcripts = {}\n exons = {}\n\n with open(gtf_file) as gtf:\n for line in gtf:\n line = line.strip()\n\n # Ignore header\n if line.startswith(\"#\"):\n continue\n\n # Split into constitutive fields on tab\n tab_fields = line.split(\"\\t\")\n chrom = tab_fields[0]\n entry_type = tab_fields[2]\n\n # Entry is a gene\n if entry_type == \"gene\":\n gene = Gene.get_gene_from_gtf(tab_fields)\n native_id = gene.identifier\n genes[native_id] = gene\n\n # Entry is a transcript\n elif entry_type == \"transcript\":\n transcript = Transcript.get_transcript_from_gtf(tab_fields)\n gene_id = transcript.gene_id\n if gene_id in genes:\n genes[gene_id].add_transcript(transcript)\n native_id = transcript.identifier\n transcripts[native_id] = transcript\n \n # Entry is an edge\n elif entry_type == \"exon\":\n exon = Edge.create_edge_from_gtf(tab_fields)\n # This ID is used because of a rare GENCODE bug\n location_exon_id = exon.identifier\n exons[location_exon_id] = exon \n\n transcript_id = list(exon.transcript_ids)[0]\n gene_id = exon.annotations[\"gene_id\"]\n \n if location_exon_id not in exons:\n # Add the new edge to the data structure\n exons[location_exon_id] = exon\n else:\n # Update existing exon entry, including its transcript set\n exon = exons[location_exon_id]\n exon.transcript_ids.add(transcript_id)\n \n if transcript_id in transcripts: \n currTranscript = transcripts[transcript_id]\n currTranscript.add_exon(exon)\n\n return genes, transcripts, exons",
"def gtf_to_transcript_exons(gtf, transcript_type):\n gft = HTSeq.GFF_Reader(gtf)\n\n transcripts = {}\n\n for gtf_line in gft:\n if gtf_line.type == 'exon':\n try:\n tr_id = gtf_line.attr['transcript_id']\n tr_type = gtf_line.attr['transcript_biotype']\n except:\n sys.stderr.write(f\"Problem with: {gtf_line}. Exiting.{os.linesep}\")\n sys.exit(1)\n\n if transcript_type != \"all\":\n if tr_type != transcript_type:\n continue\n\n if tr_id not in transcripts:\n transcripts[tr_id] = [gtf_line]\n else:\n transcripts[tr_id].append(gtf_line)\n\n return transcripts",
"def parse_gff3(filename):\n genes = OrderedDict()\n transcript_to_locus = {}\n\n count_per_transcript = defaultdict(lambda: 1)\n\n with open(filename) as gff_in:\n for line in gff_in:\n # Skip comments\n if not line.strip()[0] == '#':\n line_data = parse_line(line)\n\n # Parts (e.g. CDS or Exon) might not have an ID. One will be added here\n if ID_ATTRIBUTE not in line_data['attributes'].keys() and line_data['feature'] in PARTS_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n counter_id = line_data['attributes'][PARENT_ATTRIBUTE] + '.' + line_data['feature'] + '.'\n new_id = counter_id + str(count_per_transcript[counter_id])\n count_per_transcript[counter_id] += 1\n line_data['attributes'][ID_ATTRIBUTE] = new_id\n\n # Every line needs a valid ID\n if ID_ATTRIBUTE in line_data['attributes'].keys():\n\n if line_data['feature'] in LOCUS_FEATURES:\n genes[line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'transcripts': OrderedDict()\n }\n\n elif line_data['feature'] in TRANSCRIPT_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n\n if parent_id in genes.keys():\n genes[parent_id]['transcripts'][line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'parts': []\n }\n\n transcript_to_locus[line_data['attributes'][ID_ATTRIBUTE]] = \\\n line_data['attributes'][PARENT_ATTRIBUTE]\n\n elif line_data['feature'] in PARTS_FEATURES:\n\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n grandparent_id = transcript_to_locus[parent_id]\n\n genes[grandparent_id]['transcripts'][parent_id]['parts'].append(line_data)\n\n return genes",
"def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk",
"def readGenes(gtf, tid=False):\n gs = {}\n #get all genes information\n print(\"reading annotaions from %s\" % gtf)\n for line in tqdm(open(gtf).read().split(\"\\n\")[:-1]):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != \"exon\":\n continue\n e = parseGtfLine(line, tid)\n if e.name not in gs:\n g = Gene()\n g.chrom = e.chrom\n g.start = e.start\n g.end = e.end\n g.strand = e.strand\n g.name = e.name\n g.id = e.id\n g.exons = {(e.start, e.end): e}\n gs[g.name] = g\n else:\n #same position exons\n if (e.start, e.end) in gs[e.name].exons:\n continue\n else:\n g = gs[e.name]\n if e.start < g.start:\n g.start = e.start\n if e.end > g.end:\n g.end = e.end\n g.exons[(e.start, e.end)] = e\n #get all genes information\n ngs = {} #key is chromosome\n for k, g in gs.items():\n if g.chrom not in ngs:\n ngs[g.chrom] = {}\n if g.strand == \"+\":\n tss = g.start\n else:\n tss = g.end\n #tss position is key, other information is value, for following search\n if tss not in ngs[g.chrom]:\n ngs[g.chrom][tss] = g\n return ngs",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def gff3_parsed (gff3_file, sam_dic):\n\n #A special type of dictionary in which the values were saved in a list\n gff_dic = defaultdict(list)\n\n gff3_file = open(arg.gff3_infile)\n gff3_dic = {}\n\n gene_dic = {}\n exon_list = []\n gene_idx = 1\n\n counter_1 = 0\n counter_2 = 0\n counter_3 = 0\n counter_4 = 0\n counter_5 = 0\n counter_6 = 0\n counter_7 = 0\n idx_pseudogene = 0\n\n #A dictionary\n gene_idexes = {\"gene\": gene_idx, \"exon\": gene_idx,\n \"pseudogene\": \"pseudogene\"}\n\n\n for line in gff3_file:\n if line.startswith(\"##\"):\n pass\n elif line.startswith(\"#!\"):\n pass\n else:\n line_information = line.strip().split()\n\n # Make a dic with the genes present on Gg genome and its anotattion\n if line_information[2] == (\"gene\"):\n # deal with the PREVIOUS gene\n #This peace of code add to the gff3_dic(the main dic of gff3 file)\n #the information of which are the exons of one particular gene\n #Note: this happends at the same time that the gene information\n #were parsed\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n gene_idx += 1\n\n exon_list = []\n #parse the gene information and add this information to a new dic (gff3_dic)\n #with all the information related to the genes present in gff3 file (Cg_Nara5)\n # deal with CURRENT gene\n scaffold = line_information [0]\n gene_beg = line_information[3]\n gene_end = line_information [4]\n gene_loc = [gene_beg, gene_end]\n gene_strand = line_information[6]\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n gff3_dic[gene_idx] = {\"scaffold\": scaffold,\n \"gene_range\": gene_loc,\n \"description\": gene_description,\n \"exon_list\": None,\n \"strand\": gene_strand}\n\n # Make a list with the exons-genes present on Gg genome and its anotattion\n # If in this line the \"gene\" keyword is not present but the \"exon\"\n #keyword are append the range information to the exon list which\n # will be added to main gff3 dic\n elif line_information[2] == (\"exon\"):\n exon_beg = line_information[3]\n exon_end = line_information [4]\n exon_loc = (exon_beg, exon_end)\n exon_list.append(exon_loc)\n\n exon_information = line_information [8]\n exon_information = line.strip().split()[8].split(\";\")[0]\n gff3_dic[gene_idx][\"exon_reference\"] = exon_information\n #At the same time - regardless the previous code if the line has\n #any of this keywords the information of the gene_range were added\n # to the gff_dic.\n if line_information[2] in [\"gene\", \"exon\", \"pseudogene\"]:\n\n gene_range = (line_information[3], line_information[4])\n\n #Note: this peace of code happends because the gene description\n #of the gene is not the same as the exon description. Therefore,\n #the gene description has to be recovered\n\n if line_information[2] == \"gene\":\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n\n # Example:\n # gff_dic[scaffold1] = [[1, \"gene\", (82, 1159), description],\n # 1, \"exon\", (82, 603), description],\n # 2, \"gene\", (1440, 4998), description\n # pseudogene_idx, pseudogene, (1999, 3000)]]\n\n #To keep only the information regardless gene_idx (gene index)\n #to the gene or the exons present in this gene. When I have\n #pseudogenes, the gene index is replaced for pseudogene\n if line_information[2] in [\"exon\", \"gene\"]:\n idx = gene_idx\n else:\n idx_pseudogene += 1\n idx = \"pseudogene_\"+ str(idx_pseudogene)\n\n #add the previous information in a different format in which\n #the key is the sacffold and the values are the index (to easly\n #acess the information present in gff3 dictionary), the keyword\n #(gene, exon, pseudogene), the range, and the description.\n #All these informations will be used to perfome the SNP range\n # discover only within the true scaffold and not in all the scaffolds\n #present in the gff3 file. Making the code mor efficient and realibel\n gff_dic[line_information[0]].append([idx,\n line_information[2],\n gene_range,\n gene_description])\n\n # Add last exon list to last gene index\\\n else:\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n\n print (\"Step 3a - Parse the .gff3 file -- Done\")\n\n\n for locus, info_dict in sam_dic.items():\n\n # Get all info from current scaffold\n # scaffold_info is a list containing all genes, exons and pseudogenes\n # of the scaffold in sam_dic\n\n scaffold_info = gff_dic[info_dict[\"scaffold\"]]\n #we create two different \"values\" in the sam_dic dictionary with the len\n #of the real snp location in which all the \"values\" begin with \"intergenic\" or None\n #and as we make the check codes this values will be replaced for new\n # values or will be remain like this\n\n info_dict[\"element_type\"] = [\"intergenic\"] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"element_range\"] = [None] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"gene_index\"] = \"intergenic\"\n\n # Check if locus is in any range\n # The enumerate function give the value of the \"value\" as well as the\n #position of the value. Example: l = [\"a\", \"b\", \"c\"]\n #enumerate (l) --- (0, \"a\"); (1, \"b\"); (2, \"c\")\n #pos - the position of the snp in the list\n #snp - is the real snp localization under analyse\n\n # Get the position of the snp in the list. This position will\n # be used to create a key for the gene_inf_dic.\n for pos, snp in enumerate(info_dict[\"real_snp_localization\"]):\n # The \"element\" is the several lists present in the gff_dic.\n #Note: all the lists regardless the type has exactly the same length.\n # Example : [10459, \"gene\", (\"18930\", \"23805\"), [\"description=LysM domain-containing protein\"]\n #So for each list we will check if the SNP is in the range\n for element in scaffold_info:\n element_beg = int(element[2][0])\n element_end = int(element[2][1])\n element_range= range(element_beg, element_end)\n\n\n # YAY, one of the SNP matches one element of the scaffold\n if snp in element_range:\n\n info_dict[\"gene_index\"] = element[0]\n\n # ELEMENT KEY:\n # \"exon\": The SNP is in a coding region\n # \"gene\": The SNP is in an intron\n # \"pseudogene\": The SNP is in a pseudogene\n info_dict[\"element_type\"][pos] = element[1]\n\n info_dict[\"element_range\"][pos] = element[2]\n\n info_dict[\"description\"] = element[3]\n\n\n\n #Get the main statistics from our dataset\n\n for locus, locus_info in sam_dic.items():\n\n element_type = locus_info[\"element_type\"]\n\n # Adding information for loci in a intergenic region\n #The set return an object with only 1 \"element\" in that case \"intergenic\"\n #So if the locus has 2 snps 1 in a intergenic region and other in a gene\n # this locus will not count as a intergenic locus, because the set will\n #have two elenets {\"intergenic\", \"gene\"} and not only 1 {\"intergenic\"}.\n #Note: The set works for each element_type present in sam_dic (loop)\n if set(element_type) == {\"intergenic\"}:\n counter_1 += 1\n\n # Adding information for SNPs in intergenic region\n #This counter gives the number of times the intergenic word appears\n counter_2 += element_type.count(\"intergenic\")\n\n # Adding information for loci in pseudogenes\n if \"pseudogene\" in element_type:\n counter_3 += 1\n\n #Adding information for SNPs in pseudogene\n counter_4 += element_type.count(\"pseudogene\")\n\n #Adding information for loci in genes\n #As previously refered the gene information were recorded in two different formats\n #gene- when the SNP were in a gene but not in a exon (aka intron)\n #exon - when the SNP were in a gene and in a specific exon\n #So in order to have the statistics for the gene we need to search\n #booth keywords on the element_type . Not in this particular case the set\n #doesn\"t work because the set don\"t has an order (gene, exon) or (exon, gene)\n\n if \"gene\" in element_type or \"exon\" in element_type:\n counter_5 += 1\n\n #Adding information for SNPs in gene\n\n counter_6 += element_type.count(\"exon\") + element_type.count(\"gene\")\n\n #Adding information for SNPs in exons\n\n counter_7 += element_type.count(\"exon\")\n\n\n\n print(\"Data resume:\")\n print(\"Number of loci in a non coding region: {}\".format(counter_1))\n print(\"Number of SNPs in a non coding region: {}\".format(counter_2))\n\n print(\"Number of loci located in pseudogenes:{}\".format(counter_3))\n print(\"Number of SNPs located in pseudogenes:{}\".format(counter_4))\n\n print(\"Number of loci located in genes: {}\".format(counter_5))\n print(\"Number of SNPs located in genes: {}\".format(counter_6))\n print(\"Number of SNPs located in exons: {}\".format(counter_7))\n\n\n\n# print(gff3_dic[6207])\n return (sam_dic, gff3_dic)",
"def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line",
"def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None",
"def stampaGTFEsIn(dictTranscript, dictGenes, dictInput, fileOut, geneNames):\n\n\tstringaGTF \t\t\t\t= \t\t'%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t# Formato della riga da stampare nel file\n\texonF\t\t\t\t\t= \t\t'exon_number \"%d\"'\t\t\t\t\t\t\t# Formato della stringa di tipo exon (True)\n\tintronF\t\t\t\t\t=\t\t'intron_number \"%d\"'\t\t\t\t\t\t# Formato della stringa di tipo intron (False)\n\t\n\t# Indici all'interno del dizionario dei transcript\n\t#\n\tidx_transcriptName = 0\n\tidx_geneID = 1\n\t\n\t# Indici all'interno del dizionari dei geni\n\t#\n\tidx_geneName = 0\n\tidx_cromosoma = 1\n\n\t# Indici all'interno del dizionario degli introni e degli esoni\n\t#\n\tidx_start = 0\n\tidx_end = 1\n\tidx_tipo = 2\t\n\n\t# Tipo di regioni\n\tesone = True\n\tintrone = False\n\n\n\t# Apertura e preparazione dei file da scrivere (un file gtf con\n\t# esoni/introni per ogni gene e uno totale con tutte le regioni per tutti\n\t# i geni passati dall'utente\n\t#\t\n\tfiles = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\n\tfor gene in geneNames:\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tcod = geneNames[gene]\n\t\t# Avendo tanti geni, ad ogni nome di gene si associa la relativa\n\t\t# cartella del gene corrente tra quelli passati dall'utente\n\t\t#\n\t\tif not path.exists(cartella % cod):\n\t\t\tsystem('mkdir ' + cartella % cod)\n\t\tfiles[gene] = open(str(cartella % cod + fileOut), 'w')\n\t\t\n\t# File contenente le regioni esoniche/introniche di tutti i geni\n\t# passati dall'utente (serve per mappare le reads)\n\t#\n\tfileGtf = open(str(fileOut), 'w')\t\t\t\t\t\t\t \n\n\tfor transcriptID in dictInput:\n\t\tgeneID \t\t\t= dictTranscript[transcriptID][idx_geneID]\n\t\tcromosoma\t\t= dictGenes[geneID][idx_cromosoma]\n\t\tgeneName\t\t= dictGenes[geneID][idx_geneName]\n\t\ttranscriptName \t= dictTranscript[transcriptID][idx_transcriptName]\n\t\t# Inizializzazione del numero di esone/introne da stampare nel file\n\t\t#\n\t\tnrEs \t\t\t= 1\n\t\tnrIn \t\t\t= 1\n\t\t\n\t\tfor i in range(0, len(dictInput[transcriptID][idx_start])):\n\t\t\tstart\t\t= dictInput[transcriptID][idx_start][i]\n\t\t\tend\t\t\t= dictInput[transcriptID][idx_end][i]\n\t\t\ttipo\t\t= dictInput[transcriptID][idx_tipo][i]\n\n\t\t\tif tipo == esone:\n\t\t\t\tregione = exonF % (nrEs)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato exon\n\t\t\t\tnrEs += 1\n\t\t\telse:\n\t\t\t\tregione = intronF % (nrIn)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato intron\n\t\t\t\tnrIn += 1\n\t\t\t\t\n\t\t\tstrGtf = stringaGTF % (cromosoma, str(start), str(end), regione,\t\t\n\t\t\t\t\t\t\t\t geneName, transcriptName)\t\t\t\t\t# Creazione della riga del file\n\t\t\t\n\t\t\tif geneName in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene presenta regioni introniche..\n\t\t\t\tfiles[geneName].write(strGtf)\t\t\t\t\t\t\t\t\t# ..si stampa il file gtf relativo alle proprie..\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..regioni introniche nella propria cartella\n\n\t\t\tfileGtf.write(strGtf)\n\t\t\t\t\n\tif geneNames:\n\t\tfor gene in files:\n\t\t\tfiles[gene].close()\n\n\tfileGtf.close()",
"def build_transcript_search_dict(path_to_gtf):\n search_dict = {}\n with open(path_to_gtf) as fp:\n for line in fp:\n if line.startswith(\"#\"):\n continue\n gtf_record = line.split(\"\\t\")\n if gtf_record[2] == \"transcript\" or gtf_record[2] == \"tRNA\":\n chromosome = str(gtf_record[0])\n position = int(gtf_record[3])\n transcript_id_raw = gtf_record[8].split()[3]\n transcript_id = re.sub('\"|;', \"\", transcript_id_raw)\n search_dict[transcript_id] = TranscriptInfo(chromosome, position)\n return search_dict",
"def intron_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n intronpos = defline[1:].split(' ')[1]\n seqs[intronpos] = seq\n\n reported_introns = {}\n introns = []\n mrnaid = None\n start, stop = None, None\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n mrnaid = re.search(r'accession=([^;\\n]+)', entry).group(1)\n elif '\\tintron\\t' in entry:\n introns.append(entry)\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if mrnaid is None:\n continue\n assert start, 'No start codon for introns(s): %s' % introns[0]\n assert stop, 'No stop codon for introns(s): %s' % introns[0]\n if len(introns) > 0:\n for intron in introns:\n fields = intron.split('\\t')\n assert len(fields) == 9, \\\n 'entry does not have 9 fields: %s' % intron\n intronpos = '%s_%s-%s%s' % (fields[0], fields[3],\n fields[4], fields[6])\n if intronpos in reported_introns:\n continue\n intronlength = int(fields[4]) - int(fields[3]) + 1\n intronseq = seqs[intronpos]\n assert len(intronseq) == intronlength, \\\n 'intron \"%s\": length mismatch; gff=%d, fa=%d' % (\n intronpos, intronlength, len(intronseq))\n gccontent = gc_content(intronseq)\n gcskew = gc_skew(intronseq)\n ncontent = n_content(intronseq)\n context = intron_context(intron, start, stop)\n values = '%s %s %d %.3f %.3f %.3f %s' % (\n intronpos, mrnaid, intronlength, gccontent, gcskew,\n ncontent, context)\n reported_introns[intronpos] = 1\n yield values.split(' ')\n mrnaid = None\n introns = []\n start, stop = None, None",
"def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()",
"def parse_anno_from_gff3(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### initial run to get the transcript to gene mapping\n if options.verbose:\n print >> sys.stderr, \"... init structure\"\n\n trans2gene = dict() ### dict with: keys = transcript IDs, values = gene IDs\n for line in open(options.anno, 'r'):\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n if sl[2] in ['mRNA', 'transcript', 'mrna', 'miRNA', 'tRNA', 'snRNA', 'snoRNA', 'ncRNA', 'mRNA_TE_gene', 'rRNA', 'pseudogenic_transcript', 'transposon_fragment']:\n tags = get_tags_gff(sl[8])\n trans2gene[tags['ID']] = tags['Parent']\n\n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for contig %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1,), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict() # contains the exon list per transcript, only need this for mask_alternative_overlap\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n tags = get_tags_gff(sl[8])\n if sl[2] == 'exon':\n trans_id = tags['Parent']\n gene_id = trans2gene[trans_id]\n else:\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n ### store for each position of the transcriptome a tuple containing all overlapping gene IDs\n ### assume positions are 1 based and in closed intervals\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n if not sl[0] in exons:\n exons[sl[0]] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[sl[0]][trans_id].append([start, stop])\n except KeyError:\n exons[sl[0]][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[sl[0]][start:stop] > 0):\n for p in range(start, stop):\n if anno[sl[0]][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[sl[0]][p]]) | set([gene_id]))\n try:\n anno[sl[0]][p] = gene2idx[new_set]\n except KeyError:\n anno[sl[0]][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[sl[0]][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n \n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def load_gtf_data(fil):\n ofunc = get_open_function(fil)\n\n gene_data = {}\n exon_data = {}\n with ofunc(fil, 'rt') as fh:\n for line in fh:\n if line.startswith('#'):\n continue\n cols = line.rstrip('\\r\\n').split('\\t')\n fclass = cols[2]\n if fclass == 'gene':\n gene_id, gene_type = extract_metadata(cols[8])\n gene_data[gene_id] = gene_type\n elif fclass == 'exon':\n gene_id, gene_type = extract_metadata(cols[8])\n if gene_id not in exon_data:\n exon_data[gene_id] = []\n val = (int(cols[3]), int(cols[4]))\n exon_data[gene_id].append(val)\n return gene_data, exon_data",
"def _read_gtf(gtf):\n if not gtf:\n return gtf\n db = defaultdict(list)\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n cols = line.strip().split(\"\\t\")\n name = [n.split(\"=\")[1] for n in cols[-1].split(\";\") if n.startswith(\"Name\")]\n chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]\n if cols[2] == \"miRNA_primary_transcript\":\n db[name[0]].append([chrom, int(start), int(end), strand])\n return db",
"def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes",
"def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes",
"def parse_anno_from_gtf(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for chr %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1, ), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict()\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n if sl[2] != 'exon':\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n tags = get_tags_gtf(sl[8])\n gene_id = tags['gene_id']\n trans_id = tags['transcript_id']\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n chrm = sl[0]\n if chrm == 'chrM_rCRS':\n chrm = 'chrM'\n\n if not chrm in exons:\n exons[chrm] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[chrm][trans_id].append([start, stop])\n except KeyError:\n exons[chrm][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[chrm][start:stop] > 0):\n for p in range(start, stop):\n if anno[chrm][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[chrm][p]]) | set([gene_id]))\n try:\n anno[chrm][p] = gene2idx[new_set]\n except KeyError:\n anno[chrm][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[chrm][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n #print >> sys.stderr, 'found %i positions' % p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def inizializzazione(fileInput, geneNames):\n\t\n\tdictTranscript \t= {}\n\tdictGenes \t\t= {}\n\tdictEsoni \t\t= {}\n\tdictIntroni \t= {}\n\tdictGeneChr \t= {}\n\n\t# - Filtraggio file di annotazione in input per 'exon' e per nome gene\n\t# - Calcolo delle coordinate dei geni nei cromosomi\n\t#\n\tlines, dictGeneChr = filtraFileDiAnn(fileInput, geneNames)\n\t\n\t\n\t# Indici all'interno del dizionario degli esoni\n\t#\n\tidx_starts \t= 0\n\tidx_ends \t= 1\n\tidx_strand \t= 2\n\t\n\t# Indici all'interno del dizionario dei Geni\n\t#\n\tidx_transcripts = 2\n\n\n\t# Creazione dei dizionari utili alla risoluzione del problema B\n\t#\n\tfor riga in lines:\n\t\tcromosoma \t\t= riga[0]\n\t\tstart_esone \t= riga[3]\n\t\tend_esone \t\t= riga[4]\n\t\tstrand \t\t\t= riga[6]\n\t\tgeneName \t\t= riga[11]\n\t\ttranscriptName \t= riga[12]\n\t\t\n\t\tTranscriptID \t= riga[9]\n\t\tGeneID \t\t\t= riga[8]\n\t\n\t\t# Creazione del dizionario dei transcritti\n\t\t#\n\t\tdictTranscript[TranscriptID] = [transcriptName, GeneID]\n\t\t\n\t\t# Creazione del dizionario dei geni\n\t\t#\n\t\tif not dictGenes.has_key(GeneID):\t\t\t\t\t\t\t\t\t\t# Se il GeneID non e' presente..\n\t\t\tdictGenes[GeneID] = [geneName, cromosoma, [TranscriptID]]\t\t\t# ..nel dizionario (come key)\n\t\telif TranscriptID not in dictGenes[GeneID][idx_transcripts]:\t\t\t# Se il GeneID e' presente ma non lo e'..\n\t\t\tdictGenes[GeneID][idx_transcripts].append(TranscriptID)\t\t\t\t# ..il TranscriptID questo si aggiunge alla lista\n\t\t\n\t\t# Creazione del dizionario degli esoni\n\t\t#\n\t\tif not dictEsoni.has_key(TranscriptID):\t\t\t\t\t\t \t# Se il TranscriptID non e' presente.. \n\t\t\tdictEsoni[TranscriptID] = [[start_esone],[end_esone],strand] \t# ..nel dizionario (come key)\n\t\telse:\n\t\t\tdictEsoni[TranscriptID][idx_starts].append(start_esone)\t\t\t \t# Il TranscriptID e' gia' presente quindi..\n\t\t\tdictEsoni[TranscriptID][idx_ends].append(end_esone)\t\t\t \t# ..si aggiunge l'esone alla lista degli esoni\n\t\t\t\n\t\t\t\n\t# Creazione del dizionario degli introni\n\t#\n\tfor TranscriptID in dictEsoni:\n\t\tesoniPerTranscript = len(dictEsoni[TranscriptID][idx_starts])\t \t# Si valuta il nr di esoni per TranscriptID corrente\n\t\t\n\t\tif int(esoniPerTranscript) > 1:\n\t\t\tstart_introni \t= []\t\t\t\t\t\t\t\t\t\t\t # Si preparano le variabili necessarie\n\t\t\tend_introni \t= []\n\t\t\t\n\t\t\tstart_esoni \t= []\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tend_esoni \t\t= []\n\t\t\t\n\t\t\t# Si considera lo strand relativo al TranscriptID\n\t\t\t#\n\t\t\tif dictEsoni[TranscriptID][idx_strand] == '+':\t\t\t\t\t \t# Strand positivo -> esoni scritti in ordine crescente\n\t\t\t\tstrand = True\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts]\n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends]\n\t\t\t\t\n\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t# Strand negativo -> esoni scritti in ordine inverso..\n\t\t\t\tstrand = False\t\t\t\t\t\t\t\t\t\t\t\t \t# ..e per comodita' sono invertiti in ordine crescente\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts][::-1] \t \n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends][::-1]\n\n\t\t\t# Calcolo delle regioni introniche\n\t\t\t#\n\t\t\ti = 0\n\t\t\twhile i < int(esoniPerTranscript) - 1:\t\t\t\t\t\t\t \t# Per ogni coppia di esoni\n\t\t\t\tif (int(start_esoni[i+1]) - int(end_esoni[i])) > 2:\t\t\t \t# Se la regione tra due esoni consecutivi e' > 2..\n\t\t\t\t\tstart_introni.append(int(end_esoni[i]) + 1)\t\t\t \t# ..(considerando che gli estremi dell'introne sono..\n\t\t\t\t\tend_introni.append(int(start_esoni[i+1]) - 1)\t\t \t \t#..interni a quelli dei due esoni consecutivi correnti)\n\t\t\t\ti += 1\n\t\t\t\n\t\t\tif not strand:\t\t\t\t\t\t\t\t\t\t\t\t \t# Si mantiene traccia del fatto che derivano da un..\n\t\t\t\tstart_introni.reverse()\t\t\t\t\t\t\t\t\t \t# ..TranscriptID con strand negativo..\n\t\t\t\tend_introni.reverse()\t\t\t\t\t\t\t\t\t\t\t# ..(si inverte l'ordine degli introni)\n\t\t\n\t\t\tdictIntroni[TranscriptID] = [start_introni, end_introni]\n\n\n\t# Si eliminano i geni che non presentano regioni introniche:\n\t# \t- dalla lista di tutti i geni si rimuovono quelli che hanno introni;\n\t#\t- dal dizionario si rimuovono quelli rimasti nella lista.\n\t#\n\ttuttiIGeni = geneNames.keys()\n\tfor TranscriptID in dictIntroni:\n\t\tgeneID = dictTranscript[TranscriptID][1]\n\t\tnomeGene = dictGenes[geneID][0]\n\t\t\n\t\tif nomeGene in tuttiIGeni:\n\t\t\ttuttiIGeni.remove(nomeGene)\n\n\n\tfor nomeGene in tuttiIGeni:\n\t\tdel geneNames[nomeGene]\n\t\tprint 'Il gene %s non presenta regioni introniche.' % nomeGene\n\n\n\treturn [dictTranscript, dictGenes, dictEsoni, dictIntroni, dictGeneChr]",
"def read_gene_families(gftxt, protfile = None, cdsfile = None, wrkdir = None):\n gene_families = []\n if protfile is None and cdsfile is None:\n logging.info(\"Gene families need to have sequences!\")\n with open(gftxt, 'r') as f:\n for line in f:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gene_families.append(GeneFamily(gf_id=gf_id, gf_members=gf_genes))\n return gene_families\n \n if protfile is not None:\n prot = SeqIO.to_dict(SeqIO.parse(protfile, \"fasta\"))\n\n if cdsfile is not None:\n cds = SeqIO.to_dict(SeqIO.parse(cdsfile, \"fasta\"))\n \n with open(gftxt, 'r') as handle:\n for line in handle:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gf_prot = {}\n gf_cds = {}\n for gid in x:\n if prot[gid][-1:].seq == '*':\n gf_prot[gid] = prot[gid][:-1]\n else:\n gf_prot[gid] =prot[gid]\n if cds[gid][-3:].seq == \"TAA\" or \\\n cds[gid][-3:].seq == \"TAG\" or \\\n cds[gid][-3:].seq == \"TGA\":\n gf_cds[gid] = cds[gid][:-3]\n else:\n gf_cds[gid] = cds[gid]\n gene_families.append(GeneFamily(gf_id = gf_id, gf_members = gf_genes, \n prot_seqs = gf_prot, cds_seqs = gf_cds, wrkdir=wrkdir))\n return gene_families",
"def create_t2g_from_gtf(gtf_path, t2g_path, intron=False):\n logger.info('Creating transcript-to-gene mapping at {}'.format(t2g_path))\n gtf = GTF(gtf_path)\n with open_as_text(t2g_path, 'w') as f:\n for entry in gtf.entries():\n if entry['feature'] == 'transcript':\n transcript_id = entry['group']['transcript_id']\n transcript_version = entry['group'].get(\n 'transcript_version', None\n )\n transcript = '{}.{}'.format(\n transcript_id, transcript_version\n ) if transcript_version else transcript_id\n gene_id = entry['group']['gene_id']\n gene_version = entry['group'].get('gene_version', None)\n gene = '{}.{}'.format(\n gene_id, gene_version\n ) if gene_version else gene_id\n gene_name = entry['group'].get('gene_name', '')\n f.write('{}\\t{}\\t{}\\n'.format(transcript, gene, gene_name))\n\n if intron:\n f.write(\n '{}\\t{}\\t{}\\n'.format(\n transcript + '-I', gene, gene_name\n )\n )\n\n return {'t2g': t2g_path}",
"def read_GFF(gff_filename):\n gff_info = {} # loci --> LocusInfo\n tmp = {} # loci PB.X --> list of GFF records for PB.X.Y\n\n for r in collapseGFFReader(gff_filename):\n m = rex_pbid.match(r.seqid)\n if m is None:\n raise Exception(f\"Expected PBID format PB.X.Y but saw {r.seqid}\")\n locus = m.group(1) # ex: PB.1\n if locus not in tmp:\n tmp[locus] = [r]\n gff_info[locus] = LocusInfo(\n chrom=r.chr, strand=r.strand, regions=None, isoforms=None\n )\n else:\n if gff_info[locus].chrom != r.chr:\n logger.warning(\n f\"WARNING: Expected {r.seqid} to be on {gff_info[locus].chrom} but saw {r.chr}. Could be minimap2 multi-mapping inconsistency for repetitive genes. Check later.\\n\"\n )\n tmp[locus].append(r)\n\n # now figure out the exonic regions for each gene PB.X\n for locus, records in tmp.items():\n c = ClusterTree(0, 0)\n for r in records:\n for e in r.ref_exons:\n c.insert(\n max(0, e.start - extra_bp_around_junctions),\n e.end + extra_bp_around_junctions,\n 1,\n )\n\n regions = [(a, b) for (a, b, junk) in c.getregions()]\n regions[0] = (max(0, regions[0][0] - __padding_before_after__), regions[0][1])\n regions[-1] = (\n max(0, regions[-1][0]),\n regions[-1][1] + __padding_before_after__,\n )\n gff_info[locus] = LocusInfo(\n chrom=gff_info[locus].chrom,\n strand=gff_info[locus].strand,\n regions=regions,\n isoforms=[r.seqid for r in records],\n )\n\n return gff_info",
"def Extract_gene_type(gtf_file):\n gene_type_dic = {}\n for i in range(0,len(gtf_file)):\n if '##' not in gtf_file[i]:\n row = gtf_file[i].strip().split('\\t')\n if row[2] == 'transcript':\n trans_id = row[8].split('transcript_id \"')[1].split('\";')[0]\n #print trans_id\n gene_type_dic[trans_id] = row[8].split('transcript_type \"')[1].split('\";')[0]\n return gene_type_dic",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def process_gff(gff_file, feat='CDS', id_sym=\"gene_id=\"):\n gene_to_gene_length = {}\n with open(gff_file, \"r\") as fh:\n for line in fh:\n line = line.strip()\n if line.startswith('>'):\n break\n elif line.startswith((\"#\", \" \")) or len(line) == 0:\n continue\n elif line.split('\\t')[2] != feat:\n continue\n else:\n start = int(line.split(\"\\t\")[3].strip())\n end = int(line.split(\"\\t\")[4].strip())\n gene_length = abs(end - start)/1000\n #prokka = line.split(\"\\t\")[-1].split(\";\")[0].strip(id_sym)\n prokka = line.split(\"\\t\")[-1].split(id_sym)[1].split(\";\")[0]\n # This would give me the prokka id\n gene_to_gene_length[prokka] = gene_length\n return gene_to_gene_length",
"def write_exons(input_file, gtf_chrom_dict):\n\n dotexon = open(input_file + '.exon', 'w')\n if input_file[0:3] == 'ref':\n interval_best_matches = MatchingDicts.ref_best_matches\n dotexon.write(\"ExonID\\tChromosome\\tReference(Coordinates[strand]|Transcript[exon_number])\\tMatch_Type\\t\" +\n \"Query(Best_Match_Coordinates|Transcript[exon_number])\\tShared\\tBase_Difference\\tNotes\\n\")\n else:\n interval_best_matches = MatchingDicts.interval_best_matches\n dotexon.write(\"ExonID\\tChromosome\\tQuery(Coordinates[strand]|Transcript[exon_number])\\tMatch_Type\\t\" +\n \"Reference(Best_Match_Coordinates|Transcript[exon_number])\\tShared\\tBase_Difference\\tNotes\\n\")\n gtf_exons = {}\n for chrom in gtf_chrom_dict:\n for strand in gtf_chrom_dict[chrom]:\n len_after = len(gtf_exons) + len(gtf_chrom_dict[chrom][strand][1])\n gtf_exonc = gtf_exons.copy()\n gtf_exons.update(gtf_chrom_dict[chrom][strand][1])\n if len(gtf_exons) < len_after:\n print(\"Dictionary was OVERRITTEN\");\n ids = [(keyid, valid.id, valid.chrom, valid.strand, valid.begin, valid.end, \"next\", gtf_exonc[keyid].id, gtf_exonc[keyid].chrom, gtf_exonc[keyid].strand, gtf_exonc[keyid].begin, gtf_exonc[keyid].end) for keyid, valid in gtf_chrom_dict[chrom][strand][1].items() if keyid in gtf_exonc]\n print(ids)\n exit()\n for exon_id in sorted(gtf_exons):\n exon = gtf_exons[exon_id]\n cinter = Interval(exon.begin, exon.end, exon.gtf_interval)\n bests = interval_best_matches.get(cinter, None)\n # If a match (best match) was found write each match in .exon file\n if bests:\n for bintr, bval in bests.items():\n dotexon.write('{}\\t{}\\t{}-{}[{}]|{}[{}]\\t{}\\t{}-{}[{}]|{}\\t{}\\t({},{})\\t({})\\n'.format(\n exon_id, exon.chrom, cinter.begin, cinter.end - 1, cinter.data.strand, exon.transcript_id,\n cinter.data.transcriptIds[exon.transcript_id], bval[1], bintr.begin, bintr.end - 1,\n bintr.data.strand, '|'.join(['{}[{}]'.format(k, v) for k, v in bintr.data.transcriptIds.items()]),\n bval[0], bintr.begin - cinter.begin, cinter.end - bintr.end, NOTES[cinter.data.note]\n ))\n else:\n dotexon.write('{}\\t{}\\t{}-{}[{}]|{}[{}]\\tNovel\\t-\\t-\\t-\\t-\\n'.format(\n exon_id, exon.chrom, cinter.begin, cinter.end - 1, cinter.data.strand, exon.transcript_id,\n cinter.data.transcriptIds[exon.transcript_id]\n ))\n dotexon.close()",
"def read_ann_file(fileid, ann_dir):\n ann_file = \"%s/%s.ann\"%(ann_dir,fileid)\n with codecs.open(ann_file, 'r', 'utf-8') as f:\n data = f.read()\n rows = data.split('\\n')\n entities = {}\n ent_count = 0\n relations = {}\n #annotations = []\n for row in rows:\n cols = row.split(\"\\t\")\n ann_id = cols[0]\n if(u\"#\" in cols[0]):\n tmp = cols[1].split()[1:],\" \",cols[2]\n annotations.append(tmp)\n elif(len(cols)==3 and u\"T\" in cols[0]):\n # is an entity\n ent_count += 1\n ent_type = cols[1].split()[0]\n ranges = cols[1].replace(\"%s\"%ent_type,\"\")\n if \";\" in ranges:\n ranges = [{\"start\":int(r.split()[0]),\"end\":int(r.split()[1])} for r in ranges.split(';')]\n else:\n ranges = [{\"start\":int(ranges.split()[0]),\"end\":int(ranges.split()[1])}]\n entities[cols[0]] = {\"ann_id\":ann_id\n ,\"entity_type\": ent_type\n ,\"positions\": ranges\n ,\"surface\":cols[2]\n ,\"continuation\":False}\n elif(len(cols)>=2 and u\"R\" in cols[0]):\n rel_type, arg1, arg2 = cols[1].split()\n relations[cols[0]] = {\"ann_id\":ann_id\n ,\"arguments\":(arg1.split(\":\")[1], arg2.split(\":\")[1])\n ,\"relation_type\":rel_type}\n else:\n if(len(cols)>1):\n if(cols[1].split()[0]==\"Continuation\"):\n continued_entity_id = cols[1].split()[1]\n #print cols[1].split()[0],continued_entity_id\n entities[continued_entity_id][\"continuation\"] = True\n return entities, relations",
"def readSoft2Dict(softFileName,index=11):\n import gzip\n probe2Entrez = {}\n Flag = False\n if softFileName[-2:] == \"gz\":\n softHandle = gzip.open(softFileName,\"rt\")\n else:\n softHandle = open(softFileName,\"r\")\n softMatrix = softHandle.readlines()\n for line in softMatrix:\n line = line.split(\"\\t\")\n #if len(line[0]) <5 :\n # print(line[0].lower())\n if len(line) <= index:\n continue\n if Flag:\n #print(line)\n if line[0] in probe2Entrez.keys():\n probe2Entrez[line[0]].append(line)\n else:\n probe2Entrez[line[0]] = [line]\n if line[0].lower() == 'id':\n Flag = True\n multipleKeyList = []\n for key in probe2Entrez: #discard probs refer to multiple genes\n if len(probe2Entrez[key]) > 1:\n multipleKeyList.append(key)\n for key in multipleKeyList: #can't del keys of dictionary when iterating it\n del probe2Entrez[key]\n return probe2Entrez"
] | [
"0.75707626",
"0.73930323",
"0.7180306",
"0.6962576",
"0.6809232",
"0.6784794",
"0.67178106",
"0.6689208",
"0.6666986",
"0.6539142",
"0.6533102",
"0.635201",
"0.62793416",
"0.62397146",
"0.6231889",
"0.6221245",
"0.6214062",
"0.61779433",
"0.61518013",
"0.6121378",
"0.6117332",
"0.60378075",
"0.60252684",
"0.59683895",
"0.5954734",
"0.590473",
"0.58986235",
"0.58907664",
"0.584574",
"0.58084035"
] | 0.77669865 | 0 |
Show all or a specific predefined statistic. | def show_predefined_statistics(idx: int = -1) -> None:
if idx < 0:
print(PermutationStatistic._predefined_statistics())
else:
print(PermutationStatistic._STATISTICS[idx][0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showStat(self):\n print \">>[Stat Information]:\"\n if self.gid != DEFALUT_GROUP_ID:\n print \"Gid = %u\" % self.gid\n print \"[Queries] Arp = %u, Original_to_controller= %u, Current_to_controller = %u\" % (self.query_arp, self.query_control_origin, self.query_control_current)\n print \"TP = %u, TN = %u, FP = %u\" % (self.tp, self.tn, self.fp)\n print \"[Flow] local_switch = %u, within the group = %u,across groups = %u\" % (self.flow_local, self.flow_within_group, self.flow_cross_group)\n print \"[Traffic] local_switch = %u byte, within the group = %u byte,across groups = %u byte\" % (self.byte_local, self.byte_within_group, self.byte_cross_group)",
"def print_stat(self):\n all_stat = self.get_all_stat()\n for stat_type, stat in all_stat.items():\n print(stat_type,\":\",stat, end=' / ')",
"def stat(**kwargs):\n print(\"output stats\")",
"def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])",
"def displayStatistics(self):\n return \"\"",
"def show_stats(self, output_type='count'):\n if not self._stats:\n raise TypeError(\"self._stats is not defined. Try running run_parser first!\")\n self._stats.print_spec(output_type)",
"def advancedStats():",
"def show_stats(self):\n print(self.team_one.name + \" stats: \")\n self.team_one.stats()\n print(self.team_two.name + \" stats: \")\n self.team_two.stats()",
"def showBestStats(self) :\n Scenario.messageBestStats()\n self.showBestStatLevelReached()\n self.showNbCoupFindFirstAttempt()\n self.showBestGainWon()\n self.showBestBetUse()\n self.showNbLevelWon()",
"def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )",
"def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())",
"def show_stats(self):\n print(\"\\nName: \" + self.name)\n print(\"Element Type: \" + self.element)\n print(\"Health: \" + str(self.current_health) + \" / \" + str(self.max_health))\n print(\"Speed: \" + str(self.speed))",
"def print_stats(info):\n print_row(10, [\"Loss\", \"Err\", \"% Optimal\", \"% Success\"])\n print_row(10, [\n info[\"avg_loss\"], info[\"avg_error\"],\n info[\"avg_optimal\"], info[\"avg_success\"]])\n return info",
"def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))",
"def dataset_statistics(dataset):\n print (dataset.describe())",
"def print_stats(self):\n if self.df_avg is None:\n self.collect_stats()\n\n print(\"Simulation Results\")\n print(tabulate(self.df_avg, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"FleetManager stats\")\n print(tabulate(self.manager_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Customer stats\")\n print(tabulate(self.customer_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Transport stats\")\n print(tabulate(self.transport_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Station stats\")\n print(tabulate(self.station_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))",
"def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):\n if mean:\n mean_ = np.mean(array)\n median = np.median(array)\n mini = np.min(array)\n maxi = np.max(array)\n first_qu = np.percentile(array, 25)\n third_qu = np.percentile(array, 75)\n\n if verbose:\n if mean:\n label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '\n label += '3rd QU={:.1f} / max={:.1f}'\n print(label.format(mini, first_qu, mean_, median, third_qu, maxi))\n else:\n label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '\n label += '/ max={:.1f}'\n print(label.format(mini, first_qu, median, third_qu, maxi))\n\n if plot:\n boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')\n\n if mean:\n return mini, first_qu, mean_, median, third_qu, maxi\n else:\n return mini, first_qu, median, third_qu, maxi",
"def print_stat(self, returnTable=False):\n summary = PrettyTable([\"Set\", \"Name\", \"Number [-]\", \"Fraction [%]\"])\n summary.align = 'l'\n for name, df in self.subsets.items():\n summary.add_row([name, 'Normal', df[df.abnormal_XR == 0].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 0].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal', df[df.abnormal_XR == 1].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Normal known', df[df.semi_label == 1].shape[0], '{:.2%}'.format(df[df.semi_label == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal known', df[df.semi_label == -1].shape[0], '{:.2%}'.format(df[df.semi_label == -1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Unknown', df[df.semi_label == 0].shape[0], '{:.2%}'.format(df[df.semi_label == 0].shape[0] / df.shape[0])])\n if name != 'test' : summary.add_row(['----']*4)\n if returnTable:\n return summary\n else:\n print(summary)",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def print_stats(stat_type: str, patient_dict: dict):\n print(\"{} stats\".format(stat_type), end=\"\\n---------------------------\\n\")\n for stat, value in patient_dict.items():\n print(stat, value, sep=\":\")\n print()",
"def statistics():\n return render_template('statistics.html'), 200",
"def show_summary(self, lang):\n return self.summary % self.vars",
"def _predefined_statistics() -> str:\n return \"\\n\".join(\n f\"[{i}] {name}\"\n for i, (name, _) in enumerate(PermutationStatistic._STATISTICS)\n )",
"def summary(self, tmin=None, tmax=None, stats='basic'):\n output = {\n 'basic': {\n 'evp': 'Explained variance percentage',\n 'rmse': 'Root mean squared error',\n 'avg_dev': 'Average Deviation',\n 'rsq': 'Pearson R^2',\n 'bic': 'Bayesian Information Criterion',\n 'aic': 'Akaike Information Criterion'},\n }\n\n # get labels and method names for stats output\n if stats == 'all':\n # sort by key, label, method name\n selected_output = sorted([(k, l, f) for k, d in output.items()\n for f, l in d.items()])\n else:\n # sort by name, method name\n selected_output = sorted([(0, l, f) for f, l in\n output[stats].items()])\n\n # compute statistics\n labels_and_values = [(l, getattr(self, f)(tmin=tmin, tmax=tmax))\n for _, l, f in selected_output]\n labels, values = zip(*labels_and_values)\n\n stats = pd.DataFrame(index=list(labels), data=list(values),\n columns=['Value'])\n stats.index.name = 'Statistic'\n return stats",
"def print_summary(column, data):\n print(data[column].describe())\n print()\n print('Количество уникальных значений:', data[column].nunique())\n print('Количество пустых значений:', data[column].isnull().sum())",
"def showWorstStats(self) :\n Scenario.messageWorstStats()\n self.showWorstGainWon()\n self.showWorstBetUse()\n self.showNbLevelLose()",
"def show_info(self, occurrence_item, all_events_opt, all_metrics_opt,\n all_opt):\n if all_opt:\n self.__show_all()\n sys.exit(0)\n\n if all_events_opt:\n self.__show_all_events()\n sys.exit(0)\n\n if all_metrics_opt:\n self.__show_all_metrics()\n sys.exit(0)\n\n occurrence = occurrence_item[0]\n if occurrence in self.events_list:\n self.__print_events_info(occurrence)\n elif occurrence in self.metrics_list:\n self.__print_metrics_info(occurrence)\n else:\n print(\"Event or Metric \\\"{}\\\" not found.\".format(occurrence))\n sys.exit(1)\n sys.exit(0)",
"def printStats(m2, m3, actual, top):\n print(\"\\nThe actual categories for this page are: %s\" % \", \".join(sorted(actual)))\n print(\"\\nThe suggested categories for this page are: %s\" % \", \".join(sorted([v for v, count in top])))\n print(\"\\nBOOLEAN MEASURE = %s\" %(m2 != 0))\n print(\"FRACTIONAL MEASURE = %0.2f\" %(m2))\n print(\"HIERARCHICAL MEASURE = %0.2f\\n\" %(m3))\n print(\"*\" * 150)",
"def show_stat_desc(self):\n qtw.QMessageBox.information(\n self, \"Stat Descriptions\", '*This list is not comprehensive, and '\n 'will vary depending on race/(sub)class.*\\n\\nStrength\\n\\t-damage '\n 'and attack bonuses for most melee and thrown weapons\\n\\t-carry '\n 'capacity\\n\\t-Athletics checks\\nDexterity\\n\\t-damage and attack '\n 'bonuses for ranged/finesse weapons\\n\\t-Acrobatics, Stealth, '\n 'Sleight of Hand, and Initiative checks\\nConstitution\\n\\t-hit '\n 'points\\n\\t-resistance to poisons, etc.\\nIntelligence\\n\\t-spell '\n 'save DC/attack bonus for Wizards\\n\\t-Arcana, History, '\n 'Investigation, Nature, and Religion checks\\nWisdom\\n\\t-spell save'\n ' DC/attack bonus for Druid, Cleric, and Ranger\\n\\t-Animal '\n 'Handling, Insight, Medicine, Perception, and Survival checks\\n'\n 'Charisma\\n\\t-spell save DC/attack bonus for Bard, Paladin, '\n 'Warlock and Sorceror\\n\\t-Deception, Intimidation, Performance and'\n ' Persuasion checks'\n )"
] | [
"0.6680718",
"0.65894514",
"0.65171486",
"0.65093523",
"0.6490466",
"0.6477537",
"0.64123726",
"0.640252",
"0.6308846",
"0.63020253",
"0.6296237",
"0.62802315",
"0.6250845",
"0.62471896",
"0.6246921",
"0.6233278",
"0.62177813",
"0.6141919",
"0.6141851",
"0.6141851",
"0.6107252",
"0.6067043",
"0.6040342",
"0.60283345",
"0.60269135",
"0.6024919",
"0.60227287",
"0.6004769",
"0.59943414",
"0.5992094"
] | 0.70260644 | 0 |
Check if statistic (self) is preserved in a bijection. | def preserved_in(self, bijection: BijectionType) -> bool:
return all(self.func(k) == self.func(v) for k, v in bijection.items()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invariant(self):\n\t\treturn (self.demand.popId != self.dstPopId)",
"def is_bijective(self):\n return self.is_injective() and self.is_surjective()",
"def check_all_transformed(cls, bijection: BijectionType) -> Dict[str, List[str]]:\n transf = defaultdict(list)\n all_stats = cls._get_all()\n for stat1, stat2 in product(all_stats, all_stats):\n if all(stat1.func(k) == stat2.func(v) for k, v in bijection.items()):\n transf[stat1.name].append(stat2.name)\n return dict(transf)",
"def over(self):\n return self.result is not None",
"def isBlade(self) -> bool:\n\n grade = None\n\n for i in range(self.layout.gaDims):\n if abs(self.value[i]) > _eps:\n if grade is None:\n grade = self.layout.gradeList[i]\n elif self.layout.gradeList[i] != grade:\n return False\n\n Vhat = self.gradeInvol()\n Vrev = ~self\n Vinv = Vrev/(self*Vrev)[0]\n\n gpres = grades_present(Vhat*Vinv, 0.000001)\n if len(gpres) == 1:\n if gpres[0] == 0:\n if np.sum(np.abs((Vhat*Vinv).value - (Vinv*Vhat).value)) < 0.0001:\n for e in basis_vectors(self.layout).values():\n gpres = grades_present(Vhat*e*Vrev, 0.000001)\n if not (len(gpres) == 1 and gpres[0] == 1):\n return False\n return True\n return False",
"def _no_improve(self):\n improve = [p-f for (f,p),_ in self.population]\n return np.mean(improve) < 1.0",
"def invariant(self):\n\t\treturn ((self.vcdnId > 0) and (self.popId > 0))",
"def __bool__(self):\n return not(self.outcome != 0 or self.filled)",
"def __invert__(self):\n return NotAny(self)",
"def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)",
"def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1",
"def __bool__(self):\n return _osgAnimation.mapVertexInfluence___bool__(self)",
"def uniform_dz(self):\n return self.dz is not None",
"def check_hermitian(self):\n adjoint = self.mat.conj().T\n return np.allclose(self.mat, adjoint)",
"def validate(self):\n if self.isEmpty(): return False\n\n sum = 0\n for item in self.mask:\n sum += item.prob\n return sum == 1",
"def isVersor(self) -> bool:\n\n Vhat = self.gradeInvol()\n Vrev = ~self\n Vinv = Vrev/(self*Vrev)[0]\n\n gpres = grades_present(Vhat*Vinv, 0.000001)\n if len(gpres) == 1:\n if gpres[0] == 0:\n if np.sum(np.abs((Vhat*Vinv).value - (Vinv*Vhat).value)) < 0.0001:\n for e in basis_vectors(self.layout).values():\n gpres = grades_present(Vhat*e*Vrev, 0.000001)\n if not (len(gpres) == 1 and gpres[0] == 1):\n return False\n gpres = grades_present(self, 0.000001)\n if len(gpres) == 1:\n return False\n else:\n return True\n return False",
"def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0",
"def is_equivalence(self) -> bool:",
"def __bool__(self):\n return _osgAnimation.BoneMap___bool__(self)",
"def is_already_processed(self):\n\n return self.genotype_probabilities is not None",
"def goal_test(self):\n if -1 in self.state:\n return False\n else:\n return True",
"def assumed_state(self) -> bool:\n return self._optimistic",
"def maximize(self):\n return [not self.minimize]",
"def guard_liberate_transition(self):\n if self.get_free_positions:\n return True",
"def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True",
"def is_FSAL(self):\n if np.all(self.A[-1,:]==self.b): return True\n else: return False",
"def __isub__(self,value):\n if isinstance(value,LiveStat):\n raise Exception(\"Cannot sum statistics\")\n if value.vcount < 1 or self.vcount < 1:\n raise Exception(\"Cannot sum empty statistics\")\n else:\n # sum of two considered pairwise: z_i = stat(x_i - y_i)\n #\n # data have different weights due to number of samples.. TODO\n self.vmin = self.vmin-value.vmax\n self.vmax = self.vmax-value.vmin\n self.vmean -= value.vmean\n self.vsum -= value.vsum\n # variance is sum of variance in any case\n self.vm2 += value.vm2\n # TODO vm3 vm4\n self.vcount = min(self.vcount,value.vcount)\n self.vcountsq = self.vcount**2\n self.dirty = True\n print (\"sub Missing: M3 and M4\")\n else:\n # constant bias\n if self.vmin is not None:\n self.vmin -= value\n self.vmax -= value\n self.vmean -= value\n self.vsum -= self.vcount*value\n self.dirty = True\n print (\"sub Missing: M3 and M4\")\n return self",
"def is_complete(self):\n acquired_points = self.dset.shape[0]\n total_nr_pts = np.shape(self.get_sweep_points())[0]\n if acquired_points < total_nr_pts:\n return False\n elif acquired_points >= total_nr_pts:\n if self.soft_avg() != 1 and self.soft_iteration == 0:\n return False\n else:\n return True",
"def __bool__(self):\n return not self.undefine",
"def balanced(self):\n tmp = self.distro.values()\n tmp.sort()\n bal = [tmp == [2, 3, 4, 4],\n tmp == [2, 3, 3, 5],\n tmp == [3, 3, 3, 4]]\n if any(bal):\n return True\n else:\n return False"
] | [
"0.59036845",
"0.58631575",
"0.5650684",
"0.5457113",
"0.53898907",
"0.5371066",
"0.53598166",
"0.53580296",
"0.52650684",
"0.524763",
"0.52252597",
"0.5104863",
"0.5102853",
"0.5074673",
"0.5071287",
"0.5058628",
"0.5055895",
"0.5042884",
"0.5037415",
"0.5028868",
"0.50263524",
"0.5005157",
"0.49964356",
"0.49902397",
"0.49891308",
"0.49864143",
"0.4983314",
"0.4982919",
"0.4966422",
"0.49629584"
] | 0.72229594 | 0 |
Return a distribution of statistic for a fixed length of permutations. If a class is not provided, we use the set of all permutations. | def distribution_for_length(
self, n: int, perm_class: Optional[Av] = None
) -> List[int]:
iterator = perm_class.of_length(n) if perm_class else Perm.of_length(n)
cnt = Counter(self.func(p) for p in iterator)
lis = [0] * (max(cnt.keys(), default=0) + 1)
for key, val in cnt.items():
lis[key] = val
return lis | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sampling_class_portion(data,classes,others=None,class_portion=None,rng=np.random.RandomState(100)):\n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=dict()\n \n # get sample size of each class\n size_min=float(\"inf\")\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes[u[i]]=sample_size_this\n if class_portion[u[i]]==1 and sample_size_this<size_min:\n size_min=sample_size_this\n print(size_min)\n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n replacetf=True if sample_sizes[u[i]]<(size_min*class_portion[u[i]]) else False\n ind_this_reduced=ind_this_num[rng.choice(sample_sizes[u[i]],size=size_min*class_portion[u[i]],replace=replacetf)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # get the sampled data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n sample_sizes=sample_sizes*portion\n sample_sizes=np.array(sample_sizes,dtype=int)\n # set a ceiling/limit\n if max_size_given is not None:\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n\n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages",
"def get_permutatation_by_length(length, permutation_set):\n pass",
"def distribution_up_to(\n self, n: int, perm_class: Optional[Av] = None\n ) -> List[List[int]]:\n return [self.distribution_for_length(i, perm_class) for i in range(n + 1)]",
"def prob(self, tple, class_counts, feature_counts):\n feats = self.dataset.input_features\n unnorm = [prod(feature_counts[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))\n /(class_counts[c]**(len(feats)-1))\n for c in range(self.num_classes)]\n thesum = sum(unnorm)\n return [un/thesum for un in unnorm]",
"def __call__(self, p, size=1):\n\n # Get a uniform distribution over the elements to fill each group.\n uni_gen = MappingDiscreteUniformDistributionGenerator(*self.args)\n\n # Draw the sizes for each group\n group_sizes = numpy.random.geometric(p, size)\n\n # Using the sizes draw element to fill groups up to the right size\n results = [\n uni_gen(group_sizes[i]) for i in nanshe.util.iters.irange(size)\n ]\n\n return(results)",
"def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])",
"def strategize(generator):\r\n @functools.wraps(generator)\r\n def strategy_generator(random, args):\r\n candidate = generator(random, args)\r\n n = len(candidate)\r\n candidate.extend([random.random() for _ in range(n)])\r\n return candidate\r\n return strategy_generator",
"def sampleClass(classgroup):\n return classgroup.sample(frac = fraction)",
"def equally_distributed(cls, class1: Av, class2: Av, n: int = 6) -> Iterator[str]:\n return (\n stat.name\n for stat in cls._get_all()\n if all(\n stat.distribution_for_length(i, class1)\n == stat.distribution_for_length(i, class2)\n for i in range(n + 1)\n )\n )",
"def _construct_classical_mutation_classes(n):\n from sage.combinat.cluster_algebra_quiver.quiver import ClusterQuiver\n data = {}\n\n # finite A\n data[ ('A',n) ] = ClusterQuiver(['A',n]).mutation_class(data_type='dig6')\n # affine A\n for j in range(1, n//2+1):\n data[ ('A',(n-j,j),1) ] = ClusterQuiver(['A',[n-j,j],1]).mutation_class(data_type='dig6')\n # finite B\n if n > 1:\n data[ ('B',n) ] = ClusterQuiver(['B',n]).mutation_class(data_type='dig6')\n # affine B\n if n > 2:\n data[ ('BB',n-1,1) ] = ClusterQuiver(['BB',n-1,1]).mutation_class(data_type='dig6')\n # finite C\n if n > 2:\n data[ ('C',n) ] = ClusterQuiver(['C',n]).mutation_class(data_type='dig6')\n # affine C\n if n > 1:\n data[ ('BC',n-1,1) ] = ClusterQuiver(['BC',n-1,1]).mutation_class(data_type='dig6')\n # affine CC\n if n > 2:\n data[ ('CC',n-1,1) ] = ClusterQuiver(['CC',n-1,1]).mutation_class(data_type='dig6')\n # affine BD\n if n > 3:\n data[ ('BD',n-1,1) ] = ClusterQuiver(['BD',n-1,1]).mutation_class(data_type='dig6')\n # affine CD\n if n > 3:\n data[ ('CD',n-1,1) ] = ClusterQuiver(['CD',n-1,1]).mutation_class(data_type='dig6')\n # finite D\n if n > 3:\n data[ ('D',n) ] = ClusterQuiver(['D',n]).mutation_class(data_type='dig6')\n # affine D\n if n > 4:\n data[ ('D',n-1,1) ] = ClusterQuiver(['D',n-1,1]).mutation_class(data_type='dig6')\n\n return data",
"def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)",
"def summarize_by_class(self, dset, n_classes):\n final_summarized = []\n sep_dset = self.separate_classes(dset, n_classes)\n for class_dset in sep_dset:\n summarized = self.summarize(class_dset)\n final_summarized.append(summarized)\n return final_summarized",
"def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n else:\n components = []\n multiplicities = []\n for x in self.irreducible_components():\n if components.count(x) == 0:\n components.append(x)\n multiplicities.append(1)\n else:\n y = components.index(x)\n multiplicities[y] = multiplicities[y]+1\n\n sizes = [ x.class_size() for x in components ]\n if NotImplemented in sizes:\n print(\"Size unknown\")\n return NotImplemented\n else:\n return prod( [binomial(sizes[i]+multiplicities[i]-1,\n multiplicities[i] ) for i in range (0,len(sizes))])",
"def _get_all(cls) -> Iterator[\"PermutationStatistic\"]:\n yield from (cls(name, func) for name, func in PermutationStatistic._STATISTICS)",
"def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def jointly_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[str, ...]]:\n return (\n tuple(stat[0] for stat in stats)\n for stats in combinations(PermutationStatistic._STATISTICS, dim)\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )",
"def _perm_stat(self, index): # pragma: no cover\n\n permu = np.random.permutation(self.u)\n permv = np.random.permutation(self.v)\n\n # calculate permuted statics, store in null distribution\n perm_stat = self.indep_test._statistic(permu, permv)\n\n return perm_stat",
"def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]",
"def permutation(data, dataLabel=None, nperm=10000, decimals=4):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.permutation: data must be'\n + ' a dictionary with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n\n g1 = data[k[0]]\n g2 = data[k[1]]\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n\n combined = np.concatenate((g1, g2))\n diffobs = np.mean(g2)-np.mean(g1)\n diffs = np.zeros(nperm)\n nperm = nperm\n index = range(0, combined.shape[0])\n for i in range(nperm):\n # draw from combined data set without replacement\n #shuff = np.random.randint(combined.shape[0], size=combined.shape[0])\n shuff = np.random.permutation(index)\n ar = combined[shuff[0:len(g1)]]\n br = combined[shuff[len(g1):]]\n diffs[i] = np.mean(br) - np.mean(ar)\n pvalue = np.sum(np.abs(diffs) >= np.abs(diffobs)) / float(nperm)\n if dataLabel is not None:\n print ('\\n%s: Permutation Test (Nperm = %d)' % (dataLabel, nperm))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (Permutation test does not depend on distribution)')\n \n n = max([len(l) for l in k])\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[0].rjust(n), np.mean(g1), np.std(g1, ddof=1),\n len(g1), pc=decimals))\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[1].rjust(n), np.mean(g2), np.std(g2, ddof=1),\n len(g2), pc=decimals))\n summarizeData(data, decimals=decimals)\n # iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n # iqr2 = np.subtract(*np.percentile(g2, [75, 25]))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[0].rjust(n), np.median(g1), iqr1))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[1].rjust(n), np.median(g2), iqr2))\n print(u' Observed difference: {:8.4f}'.format(diffobs))\n print(u' p={:8.6f}, Nperm={:8d}\\n'.format(float(pvalue), int(nperm)))\n return(pvalue, nperm)",
"def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")",
"def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts",
"def generatePopulations(num_pops):\n distGenerator = DistributionGenerator()\n populations = []\n for i in range(num_pops):\n dist_type = random.sample(config.dist_types, 1)[0]\n populations.append(distGenerator.generateDistributions(dist_type, config.MaxDistributionSize))\n return populations",
"def set_class_distribution(ub_dict, percentage_dict, name):\n tot_percent = 0\n for x in percentage_dict:\n tot_percent += percentage_dict[x]\n label_ctr_dict = defaultdict(int)\n for x in ub_dict['labels']:\n label_ctr_dict[x] += 1\n \n if abs(tot_percent - 1) > 1e-15:\n sys.exit(\"Total percentages != 1\")\n if len(ub_dict['meta_data'][0]) != len(percentage_dict):\n sys.exit(\"Mismatch between expected and given number of classes\")\n if set(ub_dict['meta_data'][0]) != set(percentage_dict):\n sys.exit(\"Mismatch between classes given and those expected\")\n\n batch_size = int(min([label_ctr_dict[x]/percentage_dict[x] for x in percentage_dict]))\n class_trgt_distrib = {x: int(batch_size*percentage_dict[x]) for x in percentage_dict}\n class_actual_distrib = {x: 0 for x in percentage_dict}\n tot_trgts = sum([class_trgt_distrib[x] for x in class_trgt_distrib])\n if tot_trgts < batch_size:\n key, val = min(class_trgt_distrib.iteritems(), key=operator.itemgetter(1))\n class_trgt_distrib[key] += (batch_size - tot_trgts)\n\n tot_rows = batch_size\n\n bal_dict = dict()\n bal_data = np.zeros((tot_rows, 3072), dtype=ub_dict['data'].dtype)\n bal_labels = [0] * tot_rows\n bal_filenames = [\"\"] * tot_rows\n\n bal_ctr = 0\n for idx in range(len(ub_dict['labels'])):\n curr_label = ub_dict['labels'][idx]\n if class_actual_distrib[curr_label] < class_trgt_distrib[curr_label]:\n bal_data[bal_ctr, :] = ub_dict['data'][idx, :]\n bal_labels[bal_ctr] = ub_dict['labels'][idx]\n bal_filenames[bal_ctr] = ub_dict['filenames'][idx]\n \n bal_ctr += 1\n class_actual_distrib[curr_label] += 1\n\n bal_dict['data'] = bal_data\n bal_dict['labels'] = bal_labels\n bal_dict['filenames'] = bal_filenames\n bal_dict['name'] = name\n bal_dict['src_meta_data'] = ub_dict['meta_data']\n\n return bal_dict",
"def get_decks_per_class(filtering=None, sorting=None, count=None, patch=None):\n # For some strange reason, HearthPwn assigns each class a \"power of two\"\n # value for filtering by class. For example, Warrior is filter-class=1024.\n # I'm not getting too granular at the moment, so just calculating powers\n # of two is fine.\n classes = [2**x for x in range(2, 11)]\n decks = []\n\n if not count:\n # Substitute a default count in here so that all classes return the\n # same number of decks.\n url = generate_url(filtering, sorting, patch)\n pagecount = get_pagecount(get_pagetree(url))\n count = int((pagecount * DECKS_PER_PAGE * 0.1) / len(classes))\n for classid in classes:\n decks += get_decks(filtering, sorting, count, patch, classid)\n return decks",
"def fit_dist(self, instances):\n dists = []\n for i in range(len(instances[0])):\n component = [instances[k][i] for k in instances.keys()]\n dist = norm.fit(component)\n dists.append(dist)\n\n def sample():\n instance = []\n for d in dists:\n instance.append(np.random.normal(d[0], d[1]))\n return instance\n\n return sample",
"def __call__(self, num_perms=999):\r\n res = super(Best, self).__call__()\r\n cats = self.Categories\r\n dm = self.DistanceMatrices[0]\r\n dm_flat = dm.condensed_form()\r\n\r\n row_count = dm.shape[0]\r\n col_count = len(cats)\r\n sum = 0\r\n stats = [(-777777777, '') for c in range(col_count + 1)]\r\n for i in range(1, col_count + 1):\r\n combo = combinations([j for j in range(1, col_count + 1)], i)\r\n\r\n for element in combo:\r\n cat_mat = self._make_cat_mat(cats, element)\r\n cat_dm = self._derive_euclidean_dm(cat_mat, row_count)\r\n cat_dm_flat = cat_dm.condensed_form()\r\n r = spearman(dm_flat, cat_dm_flat)\r\n if r > stats[i - 1][0]:\r\n stats[i - 1] = (r, ','.join(str(s) for s in element))\r\n\r\n res['method_name'] = 'BEST'\r\n res['num_vars'] = col_count\r\n res['vars'] = ['%s = %d' % (name, val + 1)\r\n for val, name in enumerate(cats)]\r\n res['rho_vals'] = stats[:-1]\r\n\r\n return res",
"def optimal_instances_per_class(df, factor=1.0, draw=False):\n # `bincount` returns the number of instances we have for each website\n counts = np.bincount(df.class_label.tolist())\n hist, bin_edges = np.histogram(counts)\n if draw:\n inst_counts = get_num_instances(df)\n inst_counts.hist(cumulative=-1, bins=100)\n plt.xlabel('Num of instances')\n plt.ylabel('Num of classes with x or more insts')\n plt.show()\n\n # scale the y-axis\n dx = bin_edges[1] - bin_edges[0]\n cum_hist = np.cumsum(hist) * dx\n\n # get the inverse cumulative sum\n inv_cum_hist = max(cum_hist) - cum_hist\n\n # compute the harmonic mean of tuples (y=f(x), x)\n hms = [harmonic_mean(x, y, factor) if y > 0 and x > 0 else 0\n for x, y in zip(bin_edges[1:], inv_cum_hist)]\n\n print(hms)\n\n # find index for max harmonic mean\n i = np.argmax(hms)\n\n # this is the optimal number of instances:\n opt_num_insts = int(bin_edges[i])\n\n # which leaves us with this number of classes:\n opt_num_classes = len(counts[counts >= opt_num_insts])\n\n if draw:\n print(\"Optimal number of instances:\", opt_num_insts)\n print(\"Optimal number of classes:\", opt_num_classes)\n\n return opt_num_insts, opt_num_classes",
"def jointly_transformed_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[Tuple[str, ...], Tuple[str, ...]]]:\n return (\n (tuple(stat[0] for stat in stats1), tuple(stat[0] for stat in stats2))\n for stats1, stats2 in combinations(\n permutations(PermutationStatistic._STATISTICS, dim), 2\n )\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats1) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats2) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )"
] | [
"0.61888975",
"0.61321306",
"0.5902894",
"0.5809527",
"0.57847863",
"0.5612866",
"0.55107516",
"0.5496993",
"0.5490976",
"0.54454756",
"0.53975725",
"0.537233",
"0.5363853",
"0.5340827",
"0.5332886",
"0.5306523",
"0.525835",
"0.5203852",
"0.51803553",
"0.51737016",
"0.5172415",
"0.51612705",
"0.51503783",
"0.51295125",
"0.5108784",
"0.50902754",
"0.50849235",
"0.5049614",
"0.5049114",
"0.5041385"
] | 0.6423975 | 0 |
Return all stats that are equally distributed for two classes up to a max length. | def equally_distributed(cls, class1: Av, class2: Av, n: int = 6) -> Iterator[str]:
return (
stat.name
for stat in cls._get_all()
if all(
stat.distribution_for_length(i, class1)
== stat.distribution_for_length(i, class2)
for i in range(n + 1)
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jointly_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[str, ...]]:\n return (\n tuple(stat[0] for stat in stats)\n for stats in combinations(PermutationStatistic._STATISTICS, dim)\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )",
"def jointly_transformed_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[Tuple[str, ...], Tuple[str, ...]]]:\n return (\n (tuple(stat[0] for stat in stats1), tuple(stat[0] for stat in stats2))\n for stats1, stats2 in combinations(\n permutations(PermutationStatistic._STATISTICS, dim), 2\n )\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats1) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats2) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )",
"def test_find_distance_classes_variable_size_bins(self):\r\n # Single distance class.\r\n exp = (array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]]), [5.0])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 1)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # Multiple distance classes (even #).\r\n exp = (array([[-1, 0, 0], [0, -1, 1], [0, 1, -1]]), [3.5, 6.5])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 2)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # Multiple distance classes (odd #).\r\n exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),\r\n [2.0, 3.5, 6.5])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 3)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # More classes than distances.\r\n exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),\r\n [2.0, 3.5, 6.5, 8])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 4)\r\n self.compare_multiple_level_array(obs, exp)",
"def choose_split_value(attrs, classes):\n indices = np.argsort(attrs)\n classes = classes[indices]\n attrs = attrs[indices]\n max_gain = 0.0\n max_gain_value = None\n for i in range(len(attrs) - 1):\n if classes[i] != classes[i+1]:\n mean = (attrs[i] + attrs[i+1]) / 2.0\n gain = inform_gain(attrs, classes, mean)\n if gain > max_gain:\n max_gain = gain\n max_gain_value = mean\n return max_gain_value, max_gain",
"def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages",
"def test_calculate_class_2_individuals_best_response_simulation_equal_split():\n lambda_2 = 0.3\n equal_split = calculate_class_2_individuals_best_response(\n lambda_2=lambda_2,\n lambda_1_1=0.3,\n lambda_1_2=0.3,\n mu_1=0.2,\n mu_2=0.2,\n num_of_servers_1=4,\n num_of_servers_2=4,\n threshold_1=3,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=0,\n seed_num_2=0,\n )\n\n assert np.isclose(equal_split, 0.5)",
"def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def getMaximumDistances(self):\n pass",
"def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split",
"def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def _get_splitpoints_class_to_subclasses(self, l_adj_discr):\n std = numpy.std(l_adj_discr)\n return [i for i, discr in enumerate(l_adj_discr) if discr >= std * self.std_split]",
"def overrepresented_units(distribution, classes=None):\n # Regroup into classes if specified. Otherwise return categories indicated\n # in the data\n if not classes:\n classes = return_categories(distribution) \n\n\n ## Compute the representation of the different classes in all areal units\n rep = mb.representation(distribution, classes)\n\n ## Find the tracts where classes are overrepresented\n areal_units = {cl:[au for au in rep\n if rep[au][cl][0] > 1 + 2.57*math.sqrt(rep[au][cl][1])] \n for cl in classes}\n\n return areal_units",
"def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)",
"def _max_stat(X, X2, perms, dof_scaling):\n n_samples = len(X)\n mus = np.dot(perms, X) / float(n_samples)\n stds = np.sqrt(X2[None, :] - mus * mus) * dof_scaling # std with splitting\n max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1) # t-max\n return max_abs",
"def nms(bounds, classes, scores): # pylint: disable=too-many-locals, no-member\n best_bounds = []\n best_scores = []\n best_classes = []\n for i in list(np.unique(classes)):\n mask_class = classes == i\n bounds_class = bounds[mask_class, :]\n scores_class = scores[mask_class]\n while bounds_class.size > 0:\n max_index = np.argmax(scores_class)\n best_bound = bounds_class[max_index]\n best_bounds.append(best_bound)\n best_scores.append(scores_class[max_index])\n best_classes.append(i)\n bounds_class = np.delete(bounds_class, max_index, axis=0)\n scores_class = np.delete(scores_class, max_index)\n if bounds_class.size == 0:\n break\n best_area = (best_bound[2] - best_bound[0]) * (best_bound[3] - best_bound[1])\n areas = (bounds_class[:, 2] - bounds_class[:, 0]) * (bounds_class[:, 3] - bounds_class[:, 1])\n xmax = np.maximum(best_bound[0], bounds_class[:, 0])\n xmin = np.minimum(best_bound[2], bounds_class[:, 2])\n ymax = np.maximum(best_bound[1], bounds_class[:, 1])\n ymin = np.minimum(best_bound[3], bounds_class[:, 3])\n width = np.maximum(0, xmin - xmax + 1)\n height = np.maximum(0, ymin - ymax + 1)\n areas_intersection = width * height\n iou = areas_intersection / (best_area + areas - areas_intersection)\n mask_iou = iou < 0.45\n bounds_class = bounds_class[mask_iou, :]\n scores_class = scores_class[mask_iou]\n return best_bounds, best_classes, best_scores",
"def test_calculate_class_2_individuals_best_response_simulation_all_inds_in_one():\n all_individuals_to_first = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=0.1,\n lambda_1_2=3,\n mu_1=10,\n mu_2=2,\n num_of_servers_1=8,\n num_of_servers_2=4,\n threshold_1=6,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_first == 1\n\n all_individuals_to_second = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=3,\n lambda_1_2=0.1,\n mu_1=2,\n mu_2=10,\n num_of_servers_1=4,\n num_of_servers_2=8,\n threshold_1=3,\n threshold_2=6,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_second == 0",
"def get_var_pool(cls, data1: tuple, data2: tuple) -> float:\n cls._data_validation(data1)\n cls._data_validation(data2)\n n1 = cls.get_n(data1)\n var1 = cls.get_var(data1)\n n2 = cls.get_n(data2)\n var2 = cls.get_var(data2)\n return ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2)",
"def get_n_best(self):\n pass",
"def run(self, max_clusters):\n sample_dist_matrix = self.matrix_dist()\n self.link.print_link()\n first_clus = self.clusters[0] # initialize first cluster to merge into\n second_clus = self.clusters[0] # initialize second cluster to merge\n max_samples_dist = max(sample_dist_matrix.values())\n # initialize minimun distance between two samples\n min_dist = max_samples_dist\n while len(self.clusters) > max_clusters: # clustering loop\n for clus in self.clusters: # iterate over every cluster\n for other_clus in self.clusters: # iterate over other clusters\n if clus.c_id > other_clus.c_id: # avoid duplicates and make sure to pass correct key to dictionary\n # compute distance between two clusters according to current link\n clus_dist = self.link.compute(clus, other_clus, sample_dist_matrix)\n if clus_dist < min_dist: # keep the minimum distance and its clusters\n min_dist = clus_dist\n first_clus = other_clus\n second_clus = clus\n self.clusters.remove(second_clus) # remove the cluster that's getting merged from clusters list\n first_clus.merge(second_clus) # merge the cluster with higher id into the other\n min_dist = max_samples_dist # restore high distance in order to start the search again\n\n sum_sil = self.compute_summery_silhouette(sample_dist_matrix)\n # print results\n for clus in self.clusters:\n clus.print_details(sum_sil[clus.c_id])\n print(f'Whole data: silhouette = {sum_sil[0]}, RI = {self.compute_rand_index()}')",
"def n_classes(self):\n raise NotImplementedError",
"def n_classes(self):\n raise NotImplementedError",
"def balance_sample_size(data, classes, others=None, min_size_given=None, rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this) \n \n size_min=np.amin(sample_sizes) # smallest sample size\n \n if min_size_given and size_min>min_size_given:\n size_min=min_size_given \n \n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=size_min,replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,others",
"def sampling_class_portion(data,classes,others=None,class_portion=None,rng=np.random.RandomState(100)):\n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=dict()\n \n # get sample size of each class\n size_min=float(\"inf\")\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes[u[i]]=sample_size_this\n if class_portion[u[i]]==1 and sample_size_this<size_min:\n size_min=sample_size_this\n print(size_min)\n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n replacetf=True if sample_sizes[u[i]]<(size_min*class_portion[u[i]]) else False\n ind_this_reduced=ind_this_num[rng.choice(sample_sizes[u[i]],size=size_min*class_portion[u[i]],replace=replacetf)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # get the sampled data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def compute_domain_sizes(self):\n print('Computing domain sizes...')\n domain_sizes = np.zeros(self.data_size).astype(int)\n for i in range(0, len(self)):\n cat_counts = self[i]\n print(cat_counts)\n domain_sizes = np.maximum(domain_sizes, cat_counts)\n return domain_sizes",
"def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n sample_sizes=sample_sizes*portion\n sample_sizes=np.array(sample_sizes,dtype=int)\n # set a ceiling/limit\n if max_size_given is not None:\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n\n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3",
"def get_num_classes(self):",
"def optimal_instances_per_class(df, factor=1.0, draw=False):\n # `bincount` returns the number of instances we have for each website\n counts = np.bincount(df.class_label.tolist())\n hist, bin_edges = np.histogram(counts)\n if draw:\n inst_counts = get_num_instances(df)\n inst_counts.hist(cumulative=-1, bins=100)\n plt.xlabel('Num of instances')\n plt.ylabel('Num of classes with x or more insts')\n plt.show()\n\n # scale the y-axis\n dx = bin_edges[1] - bin_edges[0]\n cum_hist = np.cumsum(hist) * dx\n\n # get the inverse cumulative sum\n inv_cum_hist = max(cum_hist) - cum_hist\n\n # compute the harmonic mean of tuples (y=f(x), x)\n hms = [harmonic_mean(x, y, factor) if y > 0 and x > 0 else 0\n for x, y in zip(bin_edges[1:], inv_cum_hist)]\n\n print(hms)\n\n # find index for max harmonic mean\n i = np.argmax(hms)\n\n # this is the optimal number of instances:\n opt_num_insts = int(bin_edges[i])\n\n # which leaves us with this number of classes:\n opt_num_classes = len(counts[counts >= opt_num_insts])\n\n if draw:\n print(\"Optimal number of instances:\", opt_num_insts)\n print(\"Optimal number of classes:\", opt_num_classes)\n\n return opt_num_insts, opt_num_classes",
"def getMetricsClass(pred_bboxes, gt_bboxes, nclasses):\r\n aps = []\r\n iou = []\r\n for cls in range(nclasses):\r\n if bool(pred_bboxes):\r\n if len(pred_bboxes[0]) == 4: \r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes)\r\n if len(pred_bboxes[0]) == 5:\r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes, confidence = True)\r\n else:\r\n avg_precision_class = 0\r\n iou_class = 0\r\n\r\n aps.append(avg_precision_class)\r\n iou.append(iou_class)\r\n \r\n return np.mean(aps), np.mean(iou)",
"def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result"
] | [
"0.6598037",
"0.6402674",
"0.5585297",
"0.5560338",
"0.55585843",
"0.5543845",
"0.54998505",
"0.5416551",
"0.5411819",
"0.5411735",
"0.5398721",
"0.53787124",
"0.53559506",
"0.53210706",
"0.5216282",
"0.51838976",
"0.5151496",
"0.5138267",
"0.51368135",
"0.5108671",
"0.5108671",
"0.50892395",
"0.5078946",
"0.50690144",
"0.50632334",
"0.50625646",
"0.5057749",
"0.5033423",
"0.5024769",
"0.50130117"
] | 0.7223419 | 0 |
Check if a combination of statistics is equally distributed between two classes up to a max length. | def jointly_equally_distributed(
class1: Av, class2: Av, n: int = 6, dim: int = 2
) -> Iterator[Tuple[str, ...]]:
return (
tuple(stat[0] for stat in stats)
for stats in combinations(PermutationStatistic._STATISTICS, dim)
if all(
Counter(
tuple(stat[1](p) for stat in stats) for p in class1.of_length(i)
)
== Counter(
tuple(stat[1](p) for stat in stats) for p in class2.of_length(i)
)
for i in range(n + 1)
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def equally_distributed(cls, class1: Av, class2: Av, n: int = 6) -> Iterator[str]:\n return (\n stat.name\n for stat in cls._get_all()\n if all(\n stat.distribution_for_length(i, class1)\n == stat.distribution_for_length(i, class2)\n for i in range(n + 1)\n )\n )",
"def jointly_transformed_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[Tuple[str, ...], Tuple[str, ...]]]:\n return (\n (tuple(stat[0] for stat in stats1), tuple(stat[0] for stat in stats2))\n for stats1, stats2 in combinations(\n permutations(PermutationStatistic._STATISTICS, dim), 2\n )\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats1) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats2) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )",
"def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]:\n cutoff = 1.36 / math.sqrt(num_samples)\n ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))])\n ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))])\n max_diff = np.absolute(ecdf1 - ecdf2).max()\n return max_diff, max_diff < cutoff",
"def test_find_distance_classes_variable_size_bins(self):\r\n # Single distance class.\r\n exp = (array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]]), [5.0])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 1)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # Multiple distance classes (even #).\r\n exp = (array([[-1, 0, 0], [0, -1, 1], [0, 1, -1]]), [3.5, 6.5])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 2)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # Multiple distance classes (odd #).\r\n exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),\r\n [2.0, 3.5, 6.5])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 3)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # More classes than distances.\r\n exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),\r\n [2.0, 3.5, 6.5, 8])\r\n obs = self.small_mc_var_bins._find_distance_classes(\r\n self.small_mc_var_bins.DistanceMatrices[1], 4)\r\n self.compare_multiple_level_array(obs, exp)",
"def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def dominates(self, other):\n at_least_as_good = all(s <= o for s, o in zip(self.scores, other.scores))\n better_in_some_respect = any(s < o for s, o in zip(self.scores, other.scores))\n\n return at_least_as_good and better_in_some_respect",
"def tuple_distance(t1, t2, var_attr, cat_sim, num_dis_norm, agg_col):\n dis = 0.0\n cnt = 0\n if var_attr is None:\n var_attr = t1.keys()\n max_dis = 0.0\n\n for v_col in var_attr:\n col = v_col.replace(' ', '')\n\n if col not in t1 and col not in t2:\n # if col == 'name':\n # dis += 10000\n # else:\n # dis += 100\n # cnt += 1\n continue\n if col not in t1 or col not in t2:\n # if col == 'name':\n # dis += 10000\n # else:\n # dis += 100\n # cnt += 1\n continue\n\n if col == 'name':\n if t1[col] != t2[col]:\n dis += 10000\n cnt += 1\n continue\n\n if col == 'venue' or col == 'pubkey':\n if t1[col] != t2[col]:\n dis += 0.25\n cnt += 1\n continue\n\n if cat_sim.is_categorical(col):\n\n t1_key = str(t1[col]).replace(\"'\", '').replace(' ', '')\n t2_key = str(t2[col]).replace(\"'\", '').replace(' ', '')\n s = 0\n if t1[col] == t2[col]:\n s = 1\n else:\n s = cat_sim.compute_similarity(col, t1_key, t2_key, agg_col)\n\n if s == 0:\n dis += 1\n max_dis = 1\n else:\n dis += (((1.0 / s)) * ((1.0 / s))) / 100\n # dis += (1-s) * (1-s)\n if math.sqrt((((1.0 / s)) * ((1.0 / s)) - 1) / 100) > max_dis:\n max_dis = math.sqrt((((1.0 / s)) * ((1.0 / s)) - 1) / 100)\n\n cnt += 1\n else:\n if col not in num_dis_norm or num_dis_norm[col]['range'] is None:\n if t1[col] == t2[col]:\n dis += 0\n else:\n dis += 1\n else:\n if col != agg_col and col != 'index':\n if isinstance(t1[col], datetime.date):\n diff = datetime.datetime(t1[col].year, t1[col].month, t1[col].day) - datetime.datetime.strptime(\n t2[col], \"%Y-%m-%d\")\n temp = diff.days\n else:\n temp = abs(float(t1[col]) - float(t2[col]))\n\n dis += 0.5 * math.pow(temp, 8)\n if temp > max_dis:\n max_dis = temp\n cnt += 1\n\n return math.pow(dis, 0.5)",
"def isMoreGeneral(self,cl):\n if len(self.specified_attributes) >= len(cl.specified_attributes):# and self.action != cl.action and self.prediction < cl.prediction and self.error > cl.error:\n return False\n for i in range(len(self.specified_attributes)): #Check each attribute specified in self.condition\n att_info = cons.env.format_data.attribute_info[self.specified_attributes[i]]\n if self.specified_attributes[i] not in cl.specified_attributes:\n return False\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n otherRef = cl.specified_attributes.index(self.specified_attributes[i])\n if att_info[0]:\n #If self has a narrower ranger of values than it is a subsumer\n if self.condition[i][0] < cl.condition[otherRef][0]:\n return False\n if self.condition[i][1] > cl.condition[otherRef][1]:\n return False\n #else: # discrete attributes\n # if self.condition[i] != cl.condition[otherRef]:\n # return False\n\n return True",
"def _check_ensembles_are_same_size(p, q):\n if p.npdf != q.npdf:\n raise ValueError(\"Input ensembles should have the same number of distributions\")",
"def overrepresented_units(distribution, classes=None):\n # Regroup into classes if specified. Otherwise return categories indicated\n # in the data\n if not classes:\n classes = return_categories(distribution) \n\n\n ## Compute the representation of the different classes in all areal units\n rep = mb.representation(distribution, classes)\n\n ## Find the tracts where classes are overrepresented\n areal_units = {cl:[au for au in rep\n if rep[au][cl][0] > 1 + 2.57*math.sqrt(rep[au][cl][1])] \n for cl in classes}\n\n return areal_units",
"def fits(x, y):\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk",
"def compare(self, other, enforce_mask=False, enforce_grid=False,\n enforce_area=False, enforce_aream=False, enforce_all=False):\n eps_mask = 1.0e-6\n eps_grid = 1.0e-2\n eps_area = 1.0e-1\n\n # Do a global gather to create a non-distributed attribute vector\n debugPrint( \"self.lgrid:\\n\",self.lgrid )\n debugPrint( \"other.lgrid:\\n\",other.lgrid )\n gGrid1 = attributevector.AttributeVector(self.ifields, self.rfields, self.lsize())\n gGrid1.initv(self.lgrid, self.lgrid.lsize())\n gGrid1.gather(self.lgrid, self.gsMap, comm.world_pe0, comm.component_pid, comm.local_comm) \n gGrid2 = attributevector.AttributeVector(other.ifields, other.rfields, other.lsize())\n gGrid2.initv( other.lgrid, other.lgrid.lsize() )\n gGrid2.gather(other.lgrid, self.gsMap,comm.world_pe0, comm.component_pid, comm.local_comm)\n\n # From here on, everything is done by the root pe\n if( comm.component_pid != comm.world_pe0 ):\n return\n\n # Compare size of domain\n npts1 = gGrid1.lsize()\n npts2 = gGrid2.lsize()\n npts = npts1\n\n if ( npts1 == npts2 ):\n debugPrint( \"the domain size is \",npts )\n else:\n debugPrint( \"domain size #1 = \", npts1 )\n debugPrint( \"domain size #2 = \", npts2 )\n debugPrint( \"ERROR: domain size mis-match\" )\n # call shr_sys_abort(subName // \"ERROR: domain size mis-match\")\n # Exceptions?\n\n # If there was no problem, continue:\n # Compare Domain masks:\n debugPrint(\"gData1:\\n\",gGrid1)\n debugPrint(\"gData2:\\n\",gGrid2)\n data1,data1_size = gGrid1.exportRAttr(\"mask\")#rcode)?\n data2,data2_size = gGrid2.exportRAttr(\"mask\")#rcode)?\n \n ndiff = 0\n debugPrint( \"npts:\",npts )\n debugPrint( \"length of data1:\",data1_size )\n for n in xrange(0,npts-1):\n if ( (( (abs(data1[n])) > eps_mask ) and (abs(data1[n]) < eps_mask )) or \n ( (( abs(data1[n])) < eps_mask ) and (( abs(data1[n])) > eps_mask) ) ):\n ndiff = ndiff + 1\n\n # Enforce consistency: \n # Nested function declaration\n def enforce_consistency(msg,exception=None):\n if (enforce_mask or enforce_all):\n if (ndiff > 0):\n debugPrint( msg )\n # Raise Exception\n \n enforce_consistency(\"ERROR: incompatible domain masks\")\n \n # Compute Maximum Latitude and Longitude Differences\n mask = data1\n ndiff = 0\n data1,data1_size = gGrid1.exportRAttr(\"lat\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lat\")#,rcode))\n diff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n diff = abs( data1[n] - data2[n] )\n max_diff = max(max_diff, diff)\n if( diff > eps_grid ):\n ndiff = ndiff + 1\n debugPrint( \"Maximum latitude difference = \",max_diff )\n\n data1,data1_size = gGrid1.exportRAttr(\"lon\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"lon\")#,rcode))\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n x1 = data1[n]\n x2 = data2[n]\n if( x1 > x2 ): #make sure x1 < x2\n # swap(x1,x2)\n x1 = data2[n]\n x2 = data1[n]\n while( (x1+360.0) < (x2+180.0) ):#longitude is periodic\n x1 = x1 + 360.0\n diff = abs( x2 - x1 )\n max_diff = max(max_diff,diff)\n \n if (diff > eps_grid):\n ndiff = ndiff + 1\n debugPrint( \"Maximum longitude difference = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain grid coordinates!\")\n\n # Compare Area:\n data1,data1_size = gGrid1.exportRAttr( \"area\" )#, rcode )\n data2,data2_size = gGrid2.exportRAttr( \"area\" )#, rcode )\n\n ndiff = 0\n max_diff = 0.0\n\n for n in xrange(npts):\n if( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs( (data2[n] - data1[n]) / data2[n] )\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"Maxium relative error of area (model) = \", max_diff )\n\n enforce_consistency(\"ERROR: icompatible domain area(model)\")\n\n # Compare aream\n data1,data1_size = gGrid1.exportRAttr(\"aream\")#,rcode))\n data2,data2_size = gGrid2.exportRAttr(\"aream\")#,rcode))\n\n ndiff = 0\n max_diff = 0.0\n for n in xrange(npts):\n if ( abs( mask[n] ) > eps_mask ):\n if( data2[n] != 0.0 ):\n diff = abs((data2[n] - data1[n])/data2[n])\n max_diff = max(max_diff,diff)\n if( diff > eps_area ):\n ndiff = ndiff + 1\n debugPrint( \"maximum relative error of area(map) = \",max_diff )\n\n enforce_consistency(\"ERROR: incompatible domain area (map)\")\n\n # Clean up, we're finished!\n return",
"def evaluateDistribution(self, samples):\n self._unique_elements = len(np.unique(samples))\n \n if np.allclose(np.round(samples) - samples, 0):\n self._measure_type = mt.DISCRETE\n else:\n self._measure_type = mt.CONTINUOUS \n\n measure_type = eval('dg.{}'.format(self._distribution_type))().measure_type\n self._measure_type_match = measure_type == self._measure_type\n \n if self._distribution_type == dt.BERNOULLI.name.title():\n \n if self._unique_elements == 2:\n self._pass = True\n self._bernoulli = True\n np.seterr('ignore')\n if self._gof is not None and self._score != 1:\n if type(self._gof) is not str:\n if self._gof[0] != np.nan and self._gof[1] != np.nan and self._gof[0] is not None and self._gof[1] is not None: \n self._score = 1/np.exp((self._gof[0] - self._gof[1])/self._gof[1])\n if self._gof[0] < self._gof[1]:\n self._pass = True",
"def dominate(aver_size1, aver_size2, likelibest1, likelibest2):\n return (aver_size1 < aver_size2 and likelibest1 >= likelibest2) or (\n aver_size1 <= aver_size2 and likelibest1 > likelibest2\n )",
"def test_calculate_class_2_individuals_best_response_simulation_equal_split():\n lambda_2 = 0.3\n equal_split = calculate_class_2_individuals_best_response(\n lambda_2=lambda_2,\n lambda_1_1=0.3,\n lambda_1_2=0.3,\n mu_1=0.2,\n mu_2=0.2,\n num_of_servers_1=4,\n num_of_servers_2=4,\n threshold_1=3,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=0,\n seed_num_2=0,\n )\n\n assert np.isclose(equal_split, 0.5)",
"def is_max_same_class_reached(chars, klass, max_same_class):\n return is_max_pred_reached(chars, lambda c: c in klass, max_same_class)",
"def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def test_maximum_common_subgraph(graph1, graph2, attrs):\n expected = vermouth.graph_utils.categorical_maximum_common_subgraph(graph1, graph2, attrs)\n\n found = vermouth.graph_utils.maximum_common_subgraph(graph1, graph2, attrs)\n\n note((\"Attributes that must match\", attrs))\n note((\"Graph 1 nodes\", graph1.nodes(data=True)))\n note((\"Graph 1 edges\", graph1.edges))\n note((\"Graph 2 nodes\", graph2.nodes(data=True)))\n note((\"Graph 2 edges\", graph2.edges))\n # We don't find all MCS'es. See comment in\n # vermouth.graph_utils.maximum_common_subgraph\n found = make_into_set(found)\n expected = make_into_set(expected)\n\n if found == expected:\n event(\"Exact match\")\n assert found <= expected",
"def perform_wilcoxon_validation(series1, series2):\n differences, sorted_diffs = ExperimentUtil._calculate_differences(series1, series2)\n sorted_diffs.sort()\n position_diffs = ExperimentUtil._calculate_position_differences(differences, sorted_diffs)\n\n for index, score in enumerate(differences):\n if score < 0:\n position_diffs[index] = position_diffs[index] * -1\n\n sum_positive, sum_negative = ExperimentUtil._calculate_positive_negative_sum(position_diffs)\n T = min(sum_positive, sum_negative)\n # TODO: Se o tamanho de n for maior que 30, seria preciso usar a tabela T-Student\n if len(position_diffs) <= 30:\n # TODO: Com o valor de T, precisamos ver qual o valor critico e elaborar melhor a resposta no relatorio\n return T < ExperimentUtil.wilcox_table[len(position_diffs)]",
"def test_sufficient_statistics(self):\n assert (\n len(self.data),\n self.data.var(),\n self.data.mean(),\n ) == sufficient_statistics(self.data)",
"def isSubset(self, other):\n for val, freq in self.items():\n if freq > other.freq(val):\n return False\n return True",
"def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result",
"def lengthCriterion(segment1, segment2, mergedSegments, minValidData, inversedIndex):\n \n nullCrit = max(mergedSegments.loc[segment1].nonNullProp,mergedSegments.loc[segment2].nonNullProp)>minValidData/2\n nSegsCrit = inversedIndex[inversedIndex.isin([segment1,segment2])].size>10\n lengthCrit = (mergedSegments.loc[segment1].length+mergedSegments.loc[segment2].length)>4\n\n return np.array([np.sum([nullCrit+nSegsCrit+lengthCrit])])",
"def isStochasticallyDominated(wvalues1, wvalues2, probabilitiesForObjectives):\n not_equal = False\n for self_wvalue, other_wvalue, p in zip(wvalues1, wvalues2, probabilitiesForObjectives):\n r = random.random()\n if (r<=p):\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal",
"def matchClasses(self, probable, actual = None):\r\n if actual == None:\r\n actual = self.classes\r\n return not probable or len([x for x in probable if x in actual]) == len(probable)",
"def test_within_length(self):\r\n\r\n flow1 = Flowgram(\"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08\") # len 7\r\n flow2 = Flowgram('0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1') # len 10\r\n\r\n self.assertTrue(within_length(flow1, 0, 10))\r\n self.assertFalse(within_length(flow1, 10, 20))\r\n self.assertFalse(within_length(flow2, 0, 5))\r\n self.assertTrue(within_length(flow2, 5, 20))\r\n self.assertTrue(within_length(flow2, 5, 11))",
"def testAllInputOptions(self):\n num_batches = 5\n num_channels = 3\n num_rows = 20\n num_cols = 30\n for pseudo_random in True, False:\n for overlapping in True, False:\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(\n rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,\n overlapping)",
"def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist",
"def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False",
"def is_best(self, metric: float) -> bool:"
] | [
"0.66576874",
"0.6199647",
"0.59378564",
"0.5789172",
"0.5702513",
"0.56209457",
"0.5614542",
"0.5610369",
"0.56012976",
"0.5587149",
"0.5558948",
"0.5496086",
"0.5490565",
"0.54831845",
"0.54587644",
"0.5453182",
"0.5418705",
"0.5415977",
"0.5410122",
"0.53655595",
"0.5350248",
"0.5323014",
"0.52976376",
"0.5277377",
"0.52688116",
"0.52520114",
"0.52285206",
"0.5228446",
"0.5224637",
"0.52155477"
] | 0.66751057 | 0 |
Get all predefined statistics as an instance of PermutationStatistic. | def _get_all(cls) -> Iterator["PermutationStatistic"]:
yield from (cls(name, func) for name, func in PermutationStatistic._STATISTICS) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _predefined_statistics() -> str:\n return \"\\n\".join(\n f\"[{i}] {name}\"\n for i, (name, _) in enumerate(PermutationStatistic._STATISTICS)\n )",
"def mutation_probabilities(self):\n return list(self.mutation_pool.values())",
"def show_predefined_statistics(idx: int = -1) -> None:\n if idx < 0:\n print(PermutationStatistic._predefined_statistics())\n else:\n print(PermutationStatistic._STATISTICS[idx][0])",
"def _perm_stat(self, index): # pragma: no cover\n\n permu = np.random.permutation(self.u)\n permv = np.random.permutation(self.v)\n\n # calculate permuted statics, store in null distribution\n perm_stat = self.indep_test._statistic(permu, permv)\n\n return perm_stat",
"def get_by_index(cls, idx: int) -> \"PermutationStatistic\":\n return cls(*PermutationStatistic._STATISTICS[idx])",
"def get_general_stats() ->List[BaseStat]:\n return [PositionalTendencies(),\n SpeedTendencies(),\n ItemGoals(),\n DropshotGoals(),\n DropshotBallPhaseTimes(),\n DropshotStats()\n ]",
"def getStats(self):\n\n raise NotImplementedError",
"def statistics(self):\n return StatisticsCollection(self._statistics)",
"def statistics(self):\n return self.get_statistics()",
"def get_player_stats() -> List[BaseStat]:\n return [BoostStat(),\n PositionalTendencies(),\n Averages(),\n BallDistanceStat(),\n ControlsStat(),\n SpeedTendencies(),\n CarryStat(),\n PerPossessionStat(),\n SpeedTendencies(),\n RumbleItemStat(),\n KickoffStat(),\n DropshotStats(),\n DemoStat()\n ]",
"def statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))",
"def statistics(self):\n return self._statistics",
"def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError",
"def get_statistics(self):\n return self.results",
"def statistics(self):\n raise NotImplemented()",
"def get_profile_stats():\n return p_stats",
"def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)",
"def get_stats(self):\n return self.stats",
"def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def stats(self):\n pass",
"def base_stats(self):\n return (\n self._get_seconds_played_stats_items()\n + self._get_possessions_played_stats_items()\n )",
"def theoretical_stats_selectivity(self) -> np.ndarray:\n warn('This method will likely be phased out', category=FutureWarning)\n grand_final = []\n all_of_it = []\n for elt in self.final_comb_table:\n for elt2 in self.mean_and_sd_dic.keys():\n if str(elt[:self.mutation_number]) == str(elt2):\n elt = np.append(elt, list(self.mean_and_sd_dic[elt2]))\n for elt3 in self.combs_only:\n if np.array_equal(elt[len(self.mutations_list)], elt3) == True:\n theor_mean = np.array([0])\n replicate_values = np.zeros((1, len(self.replicate_matrix[0])))\n for elt4 in elt3:\n target = self.mean_and_sd_array[elt4 - 1][0]\n theor_mean = np.add(theor_mean, target)\n target2 = self.replicate_matrix[elt4 - 1]\n replicate_values = np.add(replicate_values, target2)\n theor_sd = (np.std(replicate_values)) / math.sqrt(self.replicate_number)\n elt = np.append(elt, list(theor_mean))\n elt = np.append(elt, theor_sd)\n grand_final.append(elt)\n if self.verbose:\n print('mutationlist', self.mutations_list)\n print('grand_final', grand_final)\n for elt5 in grand_final:\n at_last = (elt5[len(self.mutations_list) + 1:][0]) - (elt5[len(self.mutations_list) + 1:][2])\n elt5 = np.append(elt5, at_last)\n all_of_it.append(elt5)\n return np.array(all_of_it)",
"def stats(self):\r\n return {}",
"def stats(self) -> Dict:\n return self._stats",
"def get_stats(self) -> Dict[str, Any]:\r\n stats = {}\r\n for attr in [attr for attr in self.__dict__ if attr not in Stats.PRINT_IGNORES]:\r\n stats[attr] = self.get_stat(attr)\r\n stats[\"level\"] = self.level\r\n return stats",
"def mem(self) -> List[float]:\n return list(map(attrgetter(\"mem\"), self.stats))",
"def stats(self):\n return self._stats",
"def collect_all_perms(cls):\n permissions = filter(lambda perm: perm.startswith('biom_perm') or perm.startswith('entity_perm'), dir(cls))\n\n result = [{\n 'perm_name': perm,\n 'description': getattr(cls, perm).__doc__,\n 'perm_type': getattr(cls, perm).action_type if hasattr(getattr(cls, perm), 'action_type') else None,\n 'default_value': getattr(cls, perm).default_value if hasattr(getattr(cls, perm), 'default_value') else None,\n\n } for perm in permissions]\n return result",
"def _samples(self):\n finite_types = \\\n [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],\n ['C', 3], ['C', 5], ['D', 2], ['D', 5],\n [\"E\", 6], [\"E\", 7], [\"E\", 8], [\"F\", 4],\n [\"G\", 2]]]\n affine_types = \\\n [QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]\n elliptic_types = \\\n [QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]\n mutation_finite_types = \\\n [QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]\n mutation_infinite_types = \\\n [QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]\n\n return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types"
] | [
"0.70551056",
"0.6512036",
"0.6451701",
"0.6330738",
"0.62371397",
"0.6094051",
"0.60733724",
"0.59972787",
"0.59330034",
"0.587885",
"0.5836238",
"0.57374936",
"0.56783426",
"0.5667591",
"0.5646153",
"0.55885726",
"0.5553806",
"0.5530332",
"0.55006117",
"0.54704416",
"0.5459323",
"0.54377365",
"0.5434017",
"0.54305214",
"0.5413591",
"0.5406655",
"0.53762865",
"0.5366028",
"0.53556323",
"0.53511226"
] | 0.7527292 | 0 |
Given a bijection, check what statistics transform into others. | def check_all_transformed(cls, bijection: BijectionType) -> Dict[str, List[str]]:
transf = defaultdict(list)
all_stats = cls._get_all()
for stat1, stat2 in product(all_stats, all_stats):
if all(stat1.func(k) == stat2.func(v) for k, v in bijection.items()):
transf[stat1.name].append(stat2.name)
return dict(transf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preserved_in(self, bijection: BijectionType) -> bool:\n return all(self.func(k) == self.func(v) for k, v in bijection.items())",
"def analyse(self):\n self.__try_fitting()\n self.second.rotate()\n self.__try_fitting()",
"def test_sufficient_statistics(self):\n assert (\n len(self.data),\n self.data.var(),\n self.data.mean(),\n ) == sufficient_statistics(self.data)",
"def check_all_preservations(cls, bijection: BijectionType) -> Iterator[str]:\n return (stats.name for stats in cls._get_all() if stats.preserved_in(bijection))",
"def test_estimate_statistics_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)\n s.process(\"a\", element_weight)\n sampling_probability = (FAILURE_PROBABILITY_INVERSE -\n 1) / FAILURE_PROBABILITY_INVERSE\n self.assertEqual(s.estimate_statistics(),\n element_weight / sampling_probability)",
"def check_collisions(self):",
"def testTicket1025(self):\n \n # check the exact example in the ticket\n values = [1.0, 2.0, 3.0, 2.0]\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 2)\n self.assertEqual(afwMath.makeStatistics(sorted(values), afwMath.MEDIAN).getValue(), 2)\n\n # check some other possible ways it could show up\n values = range(10)\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 4.5)\n values = range(11)\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 5.0)",
"def testoptdone(self):\r\n\r\n assert self.data.optdone\r\n\r\n targets = self.data.geotargets\r\n values = numpy.abs(self.data.geovalues[-1])\r\n\r\n # Since the other criteria are not used and are not printed in this case, they should\r\n # be parsed as numpy.inf, for which we can check.\r\n assert numpy.isinf(targets[2])\r\n assert numpy.isinf(targets[4])\r\n\r\n conv = values[1] < targets[1] and (values[0] < targets[0] or values[3] < targets[3])\r\n assert conv",
"def aggregate_behavior(Z):\n nsamp, nsen = Z.shape\n median_trace = np.median(Z, axis=1)\n dev = np.std(Z - np.repeat(np.matrix(median_trace).transpose(),\n nsen, axis=1), axis=1)\n cmpr_high_variability = [(Z[:, sen_i] > median_trace + 2 * dev\n ).sum()/nsamp > 0.5 for sen_i in range(nsen)]\n return nsamp, nsen, cmpr_high_variability, median_trace, dev",
"def check_to_clause(shot, k, dic):\n if ('_to' in k and isinstance(shot,(list, tuple, ndarray)) \n and dic[k][1] == 0):\n print('******** Warning - valid shot of 0 in to clause?')",
"def is_bijective(self):\n return self.is_injective() and self.is_surjective()",
"def matches_p2(info: Mapping[str, int],\n result: Mapping[str, int]) -> bool:\n for key, value in info.items():\n if key in ['cats', 'trees']:\n if value <= result[key]:\n return False\n elif key in ['pomeranians', 'goldfish']:\n if value >= result[key]:\n return False\n else:\n if result[key] != value:\n return False\n return True",
"def testVarianceClip(self):\n stats = afwMath.makeStatistics(self.image, afwMath.VARIANCECLIP)\n self.assertEqual(stats.getValue(afwMath.VARIANCECLIP), 0)",
"def ct_bis(env, a, median, mad):\n\n\tb1 = (a.T < median - env.threshold * mad).T\n\tb2 = a[:, :a.shape[1] - 1] < a[:, 1:]\n\tb3 = a[:, 1:] < a[:, :a.shape[1] - 1]\n\tb1[:, :a.shape[1] - 1] = b1[:, :a.shape[1] - 1] & b2\n\tb1[:, 1:] = b1[:, 1:] & b3\n\treturn (b1)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def extract_belief(ontology, state, threshold=0.3):\n\n \"\"\"\n need to check if the value at argmax is bigger than a threshold\n \"\"\"\n\n request_idx = np.argmax([item[1] for item in state[\"request\"].items()])\n frequency_idx = np.argmax([item[1] for item in state[\"frequency\"].items()])\n illness_type_idx = np.argmax([item[1] for item in state[\"type\"].items()])\n symptom_idx = np.argmax([item[1] for item in state[\"symptom\"].items()])\n escalation_idx = np.argmax([item[1]\n for item in state[\"escalation\"].items()])\n duration_idx = np.argmax([item[1] for item in state[\"duration\"].items()])\n confirmation_idx = np.argmax([item[1]\n for item in state[\"confirmation\"].items()])\n\n # then it is neccessary to map the indices back to words\n\n request = is_plausible(state[\"request\"].items(\n ), ontology, \"request\", request_idx, threshold)\n frequency = is_plausible(\n state[\"frequency\"].items(), ontology, \"frequency\", frequency_idx, threshold)\n illness_type = is_plausible(\n state[\"type\"].items(), ontology, \"type\", illness_type_idx, threshold)\n symptom = is_plausible(state[\"symptom\"].items(\n ), ontology, \"symptom\", symptom_idx, threshold)\n\n escalation = is_plausible(state[\"escalation\"].items(\n ), ontology, \"escalation\", escalation_idx, threshold)\n\n duration = is_plausible(state[\"duration\"].items(\n ), ontology, \"duration\", duration_idx, threshold)\n confirmation = is_plausible(state[\"confirmation\"].items(\n ), ontology, \"confirmation\", confirmation_idx, threshold)\n\n if request is not None:\n request = \"request \"+request\n if frequency is not None:\n frequency = \"frequency \"+frequency\n if illness_type is not None:\n illness_type = \"type \"+illness_type\n if symptom is not None:\n symptom = \"symptom \"+symptom\n if escalation is not None:\n escalation = \"escalation \"+escalation\n if duration is not None:\n duration = \"duration \"+duration\n if confirmation is not None:\n confirmation = \"confirmation \"+confirmation\n values = [request, frequency, illness_type,\n symptom, escalation, duration, confirmation]\n values = [val for val in values if val is not None]\n return sorted(values)",
"def get_most_probable_bit_with_thres(filtered_capture, prob_threshold):\n # Enumerate challenge/responses to build a list of\n # (probabilities, challenge, value)\n sorted_prob = []\n for challenge, response in filtered_capture.items():\n n0, n1 = response\n n = n0 + n1\n if n < MIN_COUNT_FOR_MEANING:\n # Ignore challenges with not enough measures\n continue\n # q0 = ncr(n, n0) * pow(1 - PROB, n0) * pow(PROB, n1)\n # q1 = ncr(n, n0) * pow(PROB, n0) * pow(1 - PROB, n1)\n # unnormalize_q0 = pow(1 - PROB, n0) * pow(PROB, n1)\n # unnormalize_q1 = pow(PROB, n0) * pow(1 - PROB, n1)\n # p_b0 = unnormalized_q0 / (unnormalized_q1 + unnormalized_q0)\n # p_b1 = unnormalized_q1 / (unnormalized_q1 + unnormalized_q0)\n if n >= 500:\n # Prevent \"OverflowError: (34, 'Numerical result out of range')\"\n p_b0 = n0 / (n0 + n1)\n p_b1 = n1 / (n1 + n0)\n else:\n p_b0 = 1. / (1 + pow(PROB / (1 - PROB), n0) * pow((1 - PROB) / PROB, n1))\n p_b1 = 1. / (1. + pow(PROB / (1 - PROB), n1) * pow((1 - PROB) / PROB, n0))\n # print(f\"[{n0:2}+{n1:2}={n:2}] pb0={p_b0:.3}, pb1={p_b1:.3}\")\n if p_b1 > prob_threshold and p_b1 > p_b0:\n sorted_prob.append((p_b1, challenge, 1))\n elif p_b0 > prob_threshold and p_b0 > p_b1:\n sorted_prob.append((p_b0, challenge, 0))\n\n sorted_prob.sort()\n # print(f\"Trying to find a bit out of {len(sorted_prob)} challenges with p>={prob_threshold}...\")\n while sorted_prob:\n best_prob, best_chall, best_val = sorted_prob.pop()\n best_count = count_ones(best_chall)\n if best_count == 1:\n print(f\"Found a bit: {best_chall:#x} = {best_val} (proba {best_prob})\")\n return (best_chall, best_val)\n\n # Combine the probabilities\n for prob, chall, val in sorted_prob.copy():\n if chall == best_chall:\n continue\n # Reduce the masking with XOR\n if chall & ~best_chall == 0:\n # Ensure that the masking is reasonable\n count_xor = count_ones(best_chall ^ chall)\n if count_xor <= 2 and count_xor < best_count:\n new_prob = prob * best_prob\n if new_prob > prob_threshold:\n sorted_prob.append((new_prob, chall ^ best_chall, val ^ best_val))\n sorted_prob.sort()\n\n print(f\"Unable to found a bit with threshold={prob_threshold}\")\n return None",
"def check(self):\r\n self.check_probabilities()\r\n self.check_sum()",
"def test_avalanche_warning_by_region_obs(self):\n pass",
"def if_any(self, other):\n return self.weighted_by_sum(other)",
"def test_mis_output(self, graph, constrained, cost_hamiltonian, mixer_hamiltonian):\n\n cost_h, mixer_h = qaoa.max_independent_set(graph, constrained=constrained)\n\n assert decompose_hamiltonian(cost_hamiltonian) == decompose_hamiltonian(cost_h)\n assert decompose_hamiltonian(mixer_hamiltonian) == decompose_hamiltonian(mixer_h)",
"def _check_result(self, tesselation, orig_gdf, unique_id):\n # check against input layer\n ids_original = list(orig_gdf[unique_id])\n ids_generated = list(tesselation[unique_id])\n if len(ids_original) != len(ids_generated):\n\n self.collapsed = set(ids_original).difference(ids_generated)\n warnings.warn(\n f\"Tessellation does not fully match buildings. \"\n f\"{len(self.collapsed)} element(s) collapsed \"\n f\"during generation - unique_id: {self.collapsed}\"\n )\n\n # check MultiPolygons - usually caused by error in input geometry\n self.multipolygons = tesselation[tesselation.geometry.type == \"MultiPolygon\"][\n unique_id\n ]\n if len(self.multipolygons) > 0:\n warnings.warn(\n \"Tessellation contains MultiPolygon elements. Initial objects should \"\n f\"be edited. unique_id of affected elements: {list(self.multipolygons)}\"\n )",
"def _removeInsufficientTransformer(self, working_stats, params):\n\n for choice, subsets in working_stats.items():\n sufficient_values = [value for value in subsets if value > 0]\n if not sufficient_values:\n del working_stats[choice]\n\n return working_stats",
"def _no_improve(self):\n improve = [p-f for (f,p),_ in self.population]\n return np.mean(improve) < 1.0",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def compute_statistics(self):"
] | [
"0.5534995",
"0.4957287",
"0.4899984",
"0.4860593",
"0.4808464",
"0.47695872",
"0.47544986",
"0.47357872",
"0.47212732",
"0.4716885",
"0.46912074",
"0.4689866",
"0.468611",
"0.4674001",
"0.46639493",
"0.46639493",
"0.46639493",
"0.46483684",
"0.46417412",
"0.46233192",
"0.45975816",
"0.45965812",
"0.45875484",
"0.45744032",
"0.45665243",
"0.45637506",
"0.4546241",
"0.4546241",
"0.4546241",
"0.45327508"
] | 0.7150287 | 0 |
Yield all symmetric versions of a bijection. | def symmetry_duplication(
bijection: BijectionType,
) -> Iterator[BijectionType]:
return (
bij
for rotated in (
{k.rotate(angle): v.rotate(angle) for k, v in bijection.items()}
for angle in range(4)
)
for bij in (rotated, {k.inverse(): v.inverse() for k, v in rotated.items()})
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def symmetric(self):\n result = self.directed()\n result.extend([(down, up) for up, down in result])\n return Pairs(result)",
"def yield_symmetric_images(image):\n for h in (True, False): # horizontal\n for v in (True, False): # vertical\n for d in (True, False): # diagonal\n new_image = list(image)\n\n if v:\n new_image = list(reversed(new_image))\n\n if h:\n new_image = [row[::-1] for row in new_image]\n\n if d:\n new_image = [\n \"\".join([new_image[c][r] for c in range(len(new_image))])\n for r in range(len(new_image))\n ]\n\n yield tuple(new_image)",
"def symmetric(k):\r\n k_ = k.copy()\r\n k_.parts = [parts.symmetric.Symmetric(p) for p in k.parts]\r\n return k_",
"def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v",
"def weakly_connected_components(G):\n seen = set()\n for v in G:\n if v not in seen:\n c = set(_plain_bfs(G, v))\n yield c\n seen.update(c)",
"def swp_combo_iter(self) -> Iterable[Tuple[Any, ...]]:\n return itertools.product(*(self._sweep_params[var] for var in self._swp_var_list))",
"def __iter__(self):\n from itertools import product\n\n if self._length == 1:\n if self._degree == 1:\n yield self([[0]])\n return\n\n S = self._sym\n for p in product(S, repeat=self._length - 1):\n if self._connected and not perms_are_connected(p, self._degree):\n continue\n yield self(list(p) + [None], check=False)",
"def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))",
"def __iter__(self):\n return iproduct(*self.sets)",
"def symmetrize(self):\n if self.is_symmetric:\n return self\n else:\n return self.append(self.reverse()).squash().scale(0.5)",
"def get_contradictory_pairs(graph):\n for u, v in _iter_pairs(graph):\n if pair_has_contradiction(graph, u, v):\n yield u, v",
"def iteritems(self):\n for aVal, bValues in self._forwardMap.iteritems():\n for bVal in bValues:\n yield aVal, bVal\n\n return",
"def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)",
"def indirect(stack):\n g = nx.Graph(stack)\n for group in nx.connected_components(g):\n yield from map(frozenset, combinations(group, 2))",
"def make_symmetric(prior):\n print \"making symmetric\"\n\n new_map = {}\n for key1 in prior.keys():\n for key2 in prior[key1].keys():\n if not key2 in prior:\n new_map.setdefault(key2, {})\n new_map[key2][key1] = prior[key1][key2]\n\n for key in new_map:\n assert not key in prior\n prior[key] = new_map[key]\n print \"fixed\", len(new_map), \"entries\"",
"def global_decomposition(iterable = None):\n\tfor graph in iterable:\n\t\tyield graph_decomposition(graph = graph)",
"def tamari_inversions_iter(self):\n n1 = self.size() + 1\n for a in range(1, self.size()): # a == n will never work\n ipa = self.increasing_parent(a)\n if ipa is None:\n max_b_1 = n1\n else:\n max_b_1 = ipa\n for b in range(a + 1, max_b_1):\n dpb = self.decreasing_parent(b)\n if dpb is None or dpb < a:\n yield (a, b)",
"def bipartite_sets(G):\n color=bipartite_color(G)\n X=set(n for n in color if color[n]==1)\n Y=set(n for n in color if color[n]==0)\n return (X,Y)",
"def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True",
"def is_symmetric(self):\n return self.all_equal(self.transpose())",
"def symmetry_reduction(s):\n for i in s:\n all_symmetry_items = symmetric_set(i)\n if i == '000001010':\n print(i, all_symmetry_items)\n for symmetry_item in all_symmetry_items:\n if symmetry_item in s:\n s.remove(symmetry_item)",
"def inequality_generator(self):\n for H in self.Hrepresentation():\n if H.is_inequality():\n yield H",
"def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)",
"def _symmetric_image(S_elems):\n image = S_elems[0]\n symmetric_image = cp.zeros(image.shape + (image.ndim, image.ndim))\n for idx, (row, col) in enumerate(\n combinations_with_replacement(range(image.ndim), 2)\n ):\n symmetric_image[..., row, col] = S_elems[idx]\n symmetric_image[..., col, row] = S_elems[idx]\n return symmetric_image",
"def get_consistent_edges(graph):\n for u, v in _iter_pairs(graph):\n if pair_is_consistent(graph, u, v):\n yield u, v",
"def __iter__(self):\n from sage.misc.mrange import cartesian_product_iterator\n\n if self._cd._length == 1:\n if self._cd._degree == 1:\n yield self([[0]])\n return\n\n S = self._cd._sym\n profile = list(self._profile)[:-1]\n for p in cartesian_product_iterator([S.conjugacy_class(pi)\n for pi in profile]):\n if self._cd._connected and not perms_are_connected(p, self._cd._degree):\n continue\n c = self._cd(list(p) + [None], check=False)\n if c.profile() == self._profile:\n yield c",
"def unique_iso(gs):\n # For speed, instead of using a separate isomorphism-checking\n # function, we use our own helper function ck_iso. This checks\n # isomorphism of 2 graphs, given the graphs and their _degree_verts\n # output.\n def ck_iso(gc, gcdv, hc, hcdv):\n # Compare nbr-degree sequences\n if len(gcdv) != len(hcdv):\n return False\n for k in gcdv:\n if k not in hcdv:\n return False\n if len(gcdv[k]) != len(hcdv[k]):\n return False\n # Now we know that gc, hc have the same order\n\n # Try all permutations of the vertex set of graph g that take\n # each vertex to a vertex whose neighbors have the same degree\n # sequence.\n n = len(gc)\n hcsets = list(map(set, hc))\n for p in _partition_perms(list(gcdv.values()), n):\n for v in range(n):\n if hcsets[p[v]] != set([p[w] for w in gc[v]]):\n # A set comprehension would be nice above\n break\n else:\n return True\n return False\n\n # canons is list of pairs: (gc, gcdv)\n canons = []\n\n for g in gs:\n gdv = _degree_verts(g)\n gvp = list(itertools.chain.from_iterable(\n ( gdv[k] for k in sorted(gdv.keys()) )\n ))\n\n # Make semi-canonical form of g\n gc = [ sorted([gvp.index(w) for w in g[v]]) for v in gvp ]\n gcdv = _degree_verts(gc)\n\n # Check isomorphism w/ each graph in canons\n for hc, hcdv in canons:\n if ck_iso(gc, gcdv, hc, hcdv):\n break\n else:\n canons.append((gc, gcdv))\n yield g",
"def get_symmetrized_bond_set(bond_force):\n\n bond_set = set()\n n_bonds = bond_force.getNumBonds()\n\n for k in range(n_bonds):\n (i0, i1, r0, k0) = bond_force.getBondParameters(k)\n bond_set.add((i0, i1))\n bond_set.add((i1, i0))\n\n return bond_set",
"def abelian_invariants(self):\n if self.is_trivial:\n return []\n gns = self.generators\n inv = []\n G = self\n H = G.derived_subgroup()\n Hgens = H.generators\n for p in primefactors(G.order()):\n ranks = []\n while True:\n pows = []\n for g in gns:\n elm = g**p\n if not H.contains(elm):\n pows.append(elm)\n K = PermutationGroup(Hgens + pows) if pows else H\n r = G.order()//K.order()\n G = K\n gns = pows\n if r == 1:\n break\n ranks.append(multiplicity(p, r))\n\n if ranks:\n pows = [1]*ranks[0]\n for i in ranks:\n for j in range(i):\n pows[j] = pows[j]*p\n inv.extend(pows)\n inv.sort()\n return inv",
"def iter_atom_pairs(self, bound=True, unique=True, sort=True):\n blacklist = []\n for atom1 in self.iter_atoms(sort=sort):\n for atom2 in self.iter_atoms(sort=sort):\n if not atom1 == atom2:\n if not bound or is_bound(atom1.cart, atom1.element, atom2.cart, atom2.element):\n blackstring = '{}{}'.format(*sorted([atom1.name, atom2.name]))\n if unique and not blackstring in blacklist:\n yield atom1, atom2\n blacklist.append(blackstring)\n elif not unique:\n yield atom1, atom2"
] | [
"0.65729105",
"0.59622586",
"0.59610325",
"0.58295286",
"0.56446946",
"0.5581517",
"0.5525889",
"0.5328512",
"0.5313593",
"0.5273254",
"0.526318",
"0.5234545",
"0.5233796",
"0.52252924",
"0.52191466",
"0.51288265",
"0.5095979",
"0.5066039",
"0.50310177",
"0.50216424",
"0.49980775",
"0.49851397",
"0.49759027",
"0.49492675",
"0.4944563",
"0.49234745",
"0.48897827",
"0.48886913",
"0.48876488",
"0.48864982"
] | 0.7548317 | 0 |
Creates a binary model using the configuration above. | def create_model(
input_length, input_depth, num_conv_layers, conv_filter_sizes, conv_stride,
conv_depths, max_pool_size, max_pool_stride, num_fc_layers, fc_sizes,
num_tasks, batch_norm, conv_drop_rate, fc_drop_rate
):
bin_model = binary_models.BinaryPredictor(
input_length=input_length,
input_depth=input_depth,
num_conv_layers=num_conv_layers,
conv_filter_sizes=conv_filter_sizes,
conv_stride=conv_stride,
conv_depths=conv_depths,
max_pool_size=max_pool_size,
max_pool_stride=max_pool_stride,
num_fc_layers=num_fc_layers,
fc_sizes=fc_sizes,
num_tasks=num_tasks,
batch_norm=batch_norm,
conv_drop_rate=conv_drop_rate,
fc_drop_rate=fc_drop_rate
)
return bin_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args",
"def create_model(self):\n pass",
"def create_model(self):\n pass",
"def build_model():",
"def _binary_app(self):\n self.make_binary()",
"def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)",
"def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)",
"def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model",
"def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance",
"def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)",
"def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()",
"def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')",
"def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def binary_model_dict() -> dict:\n return {\n \"LogisticRegression\": createModel(\n \"LogisticRegression\", \"binary\", solver=\"lbfgs\", max_iter=1000\n ),\n \"GaussianNB\": createModel(\"GaussianNB\", \"binary\"),\n \"KNeighborsClassifier\": createModel(\"KNeighborsClassifier\", \"binary\"),\n \"DecisionTreeClassifier\": createModel(\"DecisionTreeClassifier\", \"binary\"),\n \"AdaBoostClassifier\": createModel(\"AdaBoostClassifier\", \"binary\"),\n \"BaggingClassifier\": createModel(\"BaggingClassifier\", \"binary\"),\n \"ExtraTreesClassifier\": createModel(\n \"ExtraTreesClassifier\", \"binary\", n_estimators=100\n ),\n \"GradientBoostingClassifier\": createModel(\n \"GradientBoostingClassifier\", \"binary\"\n ),\n \"RandomForestClassifier\": createModel(\n \"RandomForestClassifier\", \"binary\", n_estimators=100\n ),\n \"XGBoost\": createModel(\"XGBoostBinary\", \"binary\", num_boost_round=100),\n \"LightGBM\": createModel(\"LightGBMBinary\", \"binary\", num_boost_round=100),\n }",
"def MakeModel(self):\n pass",
"def binary_classification_model() -> tf.keras.Model:\n\n # Build model\n model = tf.keras.Sequential(tf.keras.layers.Dense(1, activation='sigmoid'))\n model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.BinaryCrossentropy())\n\n return model",
"def create_model(sess, FLAGS, mode):\n if FLAGS.model == \"vallina\":\n model = LinearModel(FLAGS, mode)\n model.build()\n else:\n pass\n # other model \n\n # create task file\n model_path = os.path.join(FLAGS.logdir, FLAGS.task_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print (\"Save model to {}\".format(model_path))\n elif (FLAGS.reset):\n shutil.rmtree(model_path)\n os.makedirs(model_path)\n print (\"Remove existing model at {} and restart.\".format(model_path))\n else:\n raise ValueError(\"Fail to create the new model.\")\n\n # Save the current configurations\n config = dict(FLAGS.__flags.items())\n with open(\"/\".join([model_path, \"config.json\"]), \"w\") as file:\n json.dump(config, file)\n\n # initialize variables\n sess.run(tf.global_variables_initializer())\n\n return model",
"def build(model_name):\n return pretrain.factory.create(model_name)",
"def create_model(project_parameters):\n model = Net(project_parameters=project_parameters)\n if project_parameters.checkpoint_path is not None:\n model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,\n use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)\n return model",
"def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")",
"def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple",
"def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)",
"def build_model(self, model_def_path: Optional[str] = None) -> 'nn.Module':\n cfg = self.cfg\n model = cfg.model.build(\n num_classes=cfg.data.num_classes,\n in_channels=cfg.data.img_channels,\n save_dir=self.modules_dir,\n hubconf_dir=model_def_path,\n img_sz=cfg.data.img_sz)\n return model",
"def create_model(self):\n model = solph.Model(self.es)\n return model",
"def create_model(ModelName=None, PrimaryContainer=None, Containers=None, ExecutionRoleArn=None, Tags=None, VpcConfig=None, EnableNetworkIsolation=None):\n pass",
"def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()",
"def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model",
"def build_model(self):\n raise NotImplementedError"
] | [
"0.6976374",
"0.6738157",
"0.67200863",
"0.67200863",
"0.6598115",
"0.65837914",
"0.65531623",
"0.6531232",
"0.65258634",
"0.65250564",
"0.65024483",
"0.64977425",
"0.6426681",
"0.6377093",
"0.6304412",
"0.6303984",
"0.62989664",
"0.6277534",
"0.62273085",
"0.62052596",
"0.6200794",
"0.61999846",
"0.6172503",
"0.6126633",
"0.6122576",
"0.61184025",
"0.6111805",
"0.6108386",
"0.61005586",
"0.60902095"
] | 0.7062543 | 0 |
Gets environment variable as string. | def getenv_string(setting, default=''):
return os.environ.get(setting, default) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_env(key: str) -> str:\n value = os.getenv(key)\n assert isinstance(value, str), (\n f\"the {key} environment variable must be set and a string, \" f\"{value=}\"\n )\n return value",
"def env(var):\n return os.environ[var]",
"def windows_get_env_value(var_name: str) -> str:\n if var_name in os.environ.keys():\n return os.environ[var_name]",
"def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()",
"def GetEnvVariable(name):\n return os.environ.get(name)",
"def getenv(self, var):\n return os.environ[var]",
"def test_get_environment_string(self):\n pass",
"def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)",
"def env(key: str) -> Optional[Any]:\n return os.getenv(key)",
"def env_str(name: str, default: str) -> str:\n value = stringify(env.get(name))\n return default if value is None else value",
"def get(self):\n self.value = os.getenv(self.name, self.default)\n return self.value",
"def environment_value(self, name):\n if not os.environ.has_key(name):\n return None\n return os.environ[name]",
"def maybe_environ(key):\n try:\n return os.environ[key]\n except KeyError:\n return \"\"",
"def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)",
"def get_var(var_name: str):\n return os.environ[var_name]",
"def get_envvar(name, silent=False):\n value = os.environ.get(name)\n if value is None:\n if not silent:\n raise RuntimeError(\n 'The environment variable %r is not set '\n 'and as such configuration could not be '\n 'loaded. Set this variable and make it '\n 'point to a configuration file' % name\n )\n else:\n return ''\n return value",
"def get_environment_variable(name):\n\n variable = None\n try:\n variable = os.environ[name]\n except KeyError:\n pass\n \n return variable",
"def getenv(self, key):\n return self._env[key]",
"def env_variable(self, name: str) -> Optional[str]:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"envVariable\", _args)\n return _ctx.execute_sync(Optional[str])",
"def get_os_env():\n env = os.environ\n# print(\"env \\n\" , env)\n return env",
"def environment_variable_string(self, name):\n return \"$(\" + name + \")\"",
"def get_env(self) -> str:\n return self.env or ENV",
"def get_value(key:str):\n value = environ.get(key)\n if value == None or len(str(value)) == 0:\n raise ValueError('Missing env: '+key)\n return value",
"def compute_env_var(self):\n return COMPUTE_SETTINGS_VARNAME",
"def show_current_value(variable=None):\n value = os.getenv(variable, None)\n return f\" ('{value}')\" if value is not None else ''",
"def getenv(self, name):\n return self.environment[name]",
"def get_environment_var(env_name, default_value):\n if env_name in os.environ:\n return os.environ[env_name]\n else:\n return default_value",
"def getenv(device, variable_name):\n command = 'getenv \"%s\" \"%s\"' % (device.udid, variable_name)\n variable = _run_command(command)\n # The variable has an extra new line at the end, so remove it when returning\n return variable[:-1]",
"def Environ(envstring):\n try:\n envint = int(envstring)\n except ValueError:\n return os.environ.get(envstring, \"\")\n # Is an integer - need to get the envint'th value\n try:\n return \"%s=%s\" % (list(os.environ.keys())[envint], list(os.environ.values())[envint])\n except IndexError:\n return \"\"",
"def __getitem__(self, key):\n return os.environ[key]"
] | [
"0.81528544",
"0.7418339",
"0.7405049",
"0.72382337",
"0.72329384",
"0.72261554",
"0.7057835",
"0.69984245",
"0.69965094",
"0.69708145",
"0.6922805",
"0.68801343",
"0.68776995",
"0.685753",
"0.68367887",
"0.68226147",
"0.6817827",
"0.6792033",
"0.6738943",
"0.67099005",
"0.66787946",
"0.66567904",
"0.6614596",
"0.6613439",
"0.65988135",
"0.65941525",
"0.65896887",
"0.65817666",
"0.6558416",
"0.654501"
] | 0.7531773 | 1 |
Gets environment variable as boolean value. | def getenv_bool(setting, default=None):
result = os.environ.get(setting, None)
if result is None:
return default
return str2bool(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval_env_as_boolean(varname, standard_value) -> bool:\n return str(os.getenv(varname, standard_value)).lower() in (\"true\", \"1\", \"t\", \"y\")",
"def env_var_bool(key: str) -> bool:\n return env_var_line(key).upper() in (\"TRUE\", \"ON\", \"YES\")",
"def environ_bool(var, default=False):\n if var not in os.environ:\n return default\n else:\n return str_to_bool(os.environ[var])",
"def get_envbool(key, *default):\n return get_env(key, *default, coerce=_bool)",
"def to_bool(env, default='false'):\n return bool(util.strtobool(os.getenv(env, default)))",
"def env_var(key, default=None):\n val = os.environ.get(key, default)\n if val == 'True':\n val = True\n elif val == 'False':\n val = False\n return val",
"def env(key, default=None):\n val = os.getenv(key, default)\n\n if val == 'True':\n val = True\n elif val == 'False':\n val = False\n return val",
"def get_env_variable(var_name, prefix=VARIABLE_PREFIX, as_bool=False):\n if prefix is not None:\n var_name = prefix + var_name\n\n try:\n value = os.environ[var_name]\n if as_bool:\n value = value.lower() == 'true'\n return value\n except KeyError:\n raise ImproperlyConfigured('Set the %s environment variable' % var_name)",
"def _env_to_bool(val: str | bool) -> bool:\n if isinstance(val, bool):\n return val\n if val.strip().lower() in (\"1\", \"true\", \"yes\"):\n return True\n\n return False",
"def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)",
"def GetBool(self, variable):\n value = self.GetString(variable)\n return value and value.strip() == 'true'",
"def envset(name, default):\n if name in os.environ:\n return os.environ[name].lower() not in ['no', 'n', 'false', 'off', '0', '0.0']\n else:\n return bool(default)",
"def getBoolean(self, key):\n self._check(key)\n return self.__config.value(key).toBool()",
"def config_get_bool(section, option):\n return __CONFIG.getboolean(section, option)",
"def get_env_variable(var_name, default=None):\n if default is not None:\n return _fix_booleans(environ.get(var_name, default))\n try:\n return _fix_booleans(environ[var_name])\n except KeyError:\n error_msg = \"Please set the %s environment variable\" % var_name\n raise KeyError(error_msg)",
"def getbool(self, key):\n try:\n return self.parser.getboolean(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err",
"def bool_var(\n default: Any = RAISE, name: str | None = None, help: str | None = None\n) -> Any:\n return var(default=default, name=name, converter=_env_to_bool, help=help)",
"def getenv_check(e):\n res = os.getenv(e)\n if res == None:\n print(e, 'environment variable not set - stopping.')\n exit(1)\n else:\n return res",
"def _getbool(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> bool:\n try:\n return parser.getboolean(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read boolean value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be Yes/No\"\n ) from err",
"def in_travis():\n return os.getenv(IN_TRAVIS_ENV) == 'true'",
"def get_bool(self, key, default):\n value = self.get(key, default)\n if isinstance(value, bool):\n return value\n return value.lower() in (\"true\", \"t\", \"yes\", \"y\")",
"def is_on(self):\n return bool(getattr(self.resource, self.variable))",
"def get_env_variable(self, var_name, optional=False):\n try:\n return environ[var_name]\n except KeyError:\n if optional:\n return False\n else:\n error_msg = f'Error: You must set the {var_name} environment variable.'\n raise Exception(error_msg)",
"def get_boolean_attribute_value(attrs, attr_name):\n return 1 if attrs.get(attr_name, 0) in [\"True\", \"1\"] else 0",
"def variable_boolean(self, value):\n\n text_value = to_text(value)\n text_value = text_value.lower()\n\n if text_value == 'true' or text_value == 'false':\n return True\n\n return False",
"def _read_bool_from_config(key, default):\n if config.has_option('docker', key):\n return config.getboolean('docker', key)\n else:\n return default",
"def _set(env_var: str) -> bool:\n return os.getenv(env_var) not in [None, \"0\"]",
"def _getBoolFeature(self):\n\n # create args\n valueToGet = c_bool()\n\n errorCode = VimbaDLL.featureBoolGet(self._handle,\n self._name,\n byref(valueToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return valueToGet.value",
"def getbool(self, strcommand):\n result = ct.c_bool()\n command = ct.c_wchar_p(strcommand)\n self.lib.AT_GetBool(self.AT_H, command, ct.addressof(result))\n return result.value",
"def retrieve_user_env(user_agent, fbs):\n\tif fbs:\n\t\treturn False#, True\n\telif user_agent:\n\t\tif 'Presto' in user_agent and 'Opera Mini' in user_agent:\n\t\t\treturn False#, False\n\t\telse:\n\t\t\treturn True#, False\n\telse:\n\t\treturn True#, False"
] | [
"0.82893",
"0.80127674",
"0.7843964",
"0.783441",
"0.7601062",
"0.75889426",
"0.75562733",
"0.73252225",
"0.7213979",
"0.6979548",
"0.6971869",
"0.68721783",
"0.6818791",
"0.6491351",
"0.6471654",
"0.64534855",
"0.6428621",
"0.6404237",
"0.6373961",
"0.6329838",
"0.6319821",
"0.6311249",
"0.6305491",
"0.63043374",
"0.6279751",
"0.6275216",
"0.6269637",
"0.6246462",
"0.6235152",
"0.6226668"
] | 0.81049186 | 1 |
Handle a change of the target object. This handler will remove the old observer and attach a new observer to the target attribute. If the target object is not an Atom object, an exception will be raised. | def __call__(self, change: ChangeDict) -> None:
old = None
new = None
ctype = change["type"]
if ctype == "create":
new = change["value"]
elif ctype == "update":
old = change["oldvalue"]
new = change["value"]
elif ctype == "delete":
old = change["value"]
attr = self.attr
owner = change["object"]
handler = getattr(owner, self.funcname)
if isinstance(old, Atom):
old.unobserve(attr, handler)
if isinstance(new, Atom):
new.observe(attr, handler)
elif new is not None:
msg = "cannot attach observer '%s' to non-Atom %s"
raise TypeError(msg % (attr, new)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, object, name, old, new):\n raise NotImplementedError",
"def update(self, target):\n self.target = target.detach()",
"def handle_dst(self, object, name, old, new):\n self.next.unregister(old)\n object, name = self.next.register(new)\n if old is not Uninitialized:\n if object is None:\n raise TraitError(\n \"on_trait_change handler signature is \"\n \"incompatible with a change to an intermediate trait\"\n )\n\n wh = self.wrapped_handler_ref()\n if wh is not None:\n wh(object, name, old, getattr(object, name, Undefined))",
"def change_object(self, new_object):\n raise NotImplementedError",
"def update_target(self):\n pass",
"def _transformChanged(self, source):\n if source is not self:\n self.notify()",
"def listener(self, proxy, changed_properties, invalidated_properties):\n metadata = changed_properties.lookup_value('Metadata')\n # do not signal if the metadata is empty\n self.process_metadata(metadata, False)",
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def observe_model_modification(\n sender: type, instance: Model, created: bool = False, **kwargs\n):\n if isinstance(instance, Observable):\n if created:\n handle_permission_change(instance)\n elif not getattr(instance, suppress_notifications_attribute, False):\n Observer.observe_instance_changes(instance, ChangeType.UPDATE)\n Observer.observe_instance_container(instance, ChangeType.UPDATE)",
"def notifyChange(self, uri, observer, syncToNetwork=None, flags=None):\n pass",
"def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now",
"def putOn(self,obj):\n if obj not in self.on:\n self.on.append(obj)\n if self not in obj.on:\n obj.putOn(self)",
"def putOn(self,obj):\n if obj not in self.on:\n self.on.append(obj)\n if self not in obj.on:\n obj.putOn(self)",
"def set_observer(self, observer):\r\n\r\n observer = str(observer)\r\n if observer not in color_constants.OBSERVERS:\r\n raise InvalidObserverError(self)\r\n self.observer = observer",
"def after_update(self, obj, st):\n pass",
"def on_change(self, event):\n event_path = event.src_path\n observed_paths = []\n\n for watchdog_path, child_observed_paths in self._watch_dog_observed_paths.items():\n if event_path.startswith(watchdog_path):\n observed_paths += child_observed_paths\n\n if not observed_paths:\n return\n\n changed_paths = []\n for path in observed_paths:\n path_obj = Path(path)\n # The path got deleted\n if not path_obj.exists():\n self._observed_paths.pop(path, None)\n changed_paths += [path]\n else:\n new_checksum = calculate_checksum(path)\n if new_checksum != self._observed_paths.get(path, None):\n changed_paths += [path]\n self._observed_paths[path] = new_checksum\n if changed_paths:\n self._input_on_change(changed_paths)",
"def handle_actual_updated(self):\n self._actual_updated()",
"def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)",
"def dispatchObjectMovedEvent(ob, ev):\n if ob is not ev.object:\n if ev.oldParent is ev.newParent:\n notify(ObjectModifiedEvent(ob))",
"def handle_error(self, obj, name, old, new):\n if old is not None and old is not Uninitialized:\n raise TraitError(\n \"on_trait_change handler signature is \"\n \"incompatible with a change to an intermediate trait\"\n )",
"def target(self, target):\n\n self._target = target",
"def set(self, obj, value):\n raise NotImplementedError",
"def process_event(self, event):\n if not self.frozen:\n if event[\"event\"] in [self.event, self.devent]:\n if self.what is None or event[\"target\"].startswith(self.what):\n self.lastone = None\n if event[\"event\"] == self.event:\n val = event\n for key in self.subval:\n val = val[key]\n if val in self.addV:\n if event[\"target\"] not in self.entities:\n self.entities.add(event[\"target\"])\n self.lastone = event[\"target\"]\n if event[\"event\"] == self.devent:\n val = event\n for key in self.dsubval:\n val = val[key]\n if val in self.subV:\n try:\n self.entities.remove(event[\"target\"])\n self.lastone = event[\"target\"]\n except:\n pass\n if self.lastone is not None and bridgectl.log:\n bridgectl.log.debug(\n \"New value for {} is {}\".format(\n self.name,\n self.entities))\n if event['event'] == 'time tick':\n if self.period in event[\"starts\"]:\n self.reset()",
"def on_entity_update(self, event):\n self.entity.on_entity_update(event)",
"def register_observation(self, target: DriverTarget) -> None:\n self.register_observed_target(target=target)",
"def _data_updated_callback(self, attr, old, new):\n pass",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass"
] | [
"0.5505239",
"0.5447666",
"0.5287387",
"0.5242358",
"0.5202192",
"0.5201194",
"0.51845044",
"0.5054616",
"0.5054616",
"0.5051807",
"0.5024639",
"0.5023258",
"0.50199413",
"0.50199413",
"0.50012773",
"0.4991372",
"0.4981381",
"0.4964892",
"0.4939625",
"0.49377948",
"0.4923551",
"0.49190852",
"0.48473287",
"0.48319802",
"0.48207492",
"0.48136365",
"0.48103613",
"0.48090225",
"0.48090225",
"0.48090225"
] | 0.5871039 | 0 |
Add or override a member after the class creation. | def add_member(cls: AtomMeta, name: str, member: Member) -> None:
existing = cls.__atom_members__.get(name)
if existing is not None:
member.set_index(member.index)
member.copy_static_observers(member)
else:
member.set_index(len(cls.__atom_members__))
member.set_name(name)
# The dict is mutable but we do not want to say it too loud
cls.__atom_members__[name] = member # type: ignore
cls.__atom_specific_members__ = frozenset(
set(cls.__atom_specific_members__) | {name}
)
setattr(cls, name, member) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_member_function(cls, methodName, newMethod):\n cls.add_registration_code('def(\"%s\",%s)'%(methodName, newMethod), True)",
"def add_to_class(cls, name, value):\n if hasattr(value, 'contribute_to_class'):\n value.contribute_to_class(cls, name)\n if not name.startswith('_'):\n cls._fields[name] = value\n else:\n setattr(cls, name, value)",
"def after_class_creation(cls):\n pass",
"def fm_append_member(cls, parent, child):\n parent.fm_append(child, cls.CHILD)\n child.fm_append(parent, cls.PARENT)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def __setattr__(self, name, value):\r\n self.assert_valid()\r\n\r\n\r\n\r\n\r\n self.__dict__.setdefault(\"_members\",{})[name] = value\r\n\r\n return _swig_setattr(self, self.__class__, name, value)",
"def member(self, member: object):\n\n self._member = member",
"def create_property_on_class(self, cls, internal_name):\n if not self._use_custom_properties:\n if self._is_mutable:\n setter = partial(_setattr, internal_name)\n else:\n setter = None\n setattr(\n cls,\n self._name,\n property(\n partial(_getattr, internal_name),\n setter,\n None,\n self._description\n )\n )",
"def __setattr__(self, attr, value):\n super().__setattr__(attr, value)",
"def _add(object, name, value):\n self.__added__.append(name)\n setattr(object, name, value)",
"def add_property(self, cls: type):\n return _add_property(cls)",
"def as_member(self, as_member):\n\n self._as_member = as_member",
"def add_property(python_name, default_value, name, type = \"\", doc = \"(no documentation available yet)\"):\n curframe = inspect.currentframe()\n try:\n calframe = inspect.getouterframes(curframe, 2)\n try:\n cls_locals = calframe[1][0].f_locals\n if not \"_properties\" in cls_locals:\n cls_locals[\"_properties\"] = OrderedDict()\n cls_locals[\"_properties\"][name] = (default_value, type, doc, python_name)\n finally:\n del calframe\n finally:\n del curframe",
"def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)",
"def add_method (self, cls, name) :\n wrapped = self._wrapped (cls, name)\n setattr (cls, name, pyk.new_instancemethod (wrapped, None, cls))",
"def add_member(self, persona):\n if persona not in self.members:\n self.members.append(persona)",
"def contribute_to_class(self, *args: Any, **kwargs: Any) -> None:\n super().contribute_to_class(*args, **kwargs)\n self.add_base_fields()\n self.add_sub_factories()\n self.add_related_factories()\n self.add_m2m_factories()\n # Reevaluated declarations:\n for k, v in vars(self.factory).items():\n if self._is_declaration(k, v):\n self.base_declarations[k] = v\n self.pre_declarations, self.post_declarations = (\n factory.builder.parse_declarations(self.declarations))",
"def __setattr__(self, attr, value):\r\n return setattr(self.__instance, attr, value)",
"def add_member(self, member):\n self.members[member.name] = member\n self.relationships[member.name] = []",
"def addMember(self, *args):\n return _libsbml.ListOfMembers_addMember(self, *args)"
] | [
"0.60400194",
"0.603651",
"0.5997592",
"0.5895129",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.58183837",
"0.57411194",
"0.56308806",
"0.56049895",
"0.5564861",
"0.5560791",
"0.5549321",
"0.5528876",
"0.5493454",
"0.54781973",
"0.5466337",
"0.5447941",
"0.54479116",
"0.5404679",
"0.5393345"
] | 0.6289327 | 0 |
Get the members dictionary for the type. Returns | def members(cls) -> Mapping[str, Member]:
return cls.__atom_members__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_members():",
"def members(self) -> object:\n return self._members",
"def _types(cls):\n return {}",
"def get_members(self):\n return self._members",
"def getMembers(self):\n outProperties = ctypes.c_void_p()\n _res = self.mAPIContext.SDTypeStruct_getMembers(self.mHandle, ctypes.byref(outProperties))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n constructor = self.mAPIContext.mTypeMap[SDAPIObject(self.mAPIContext, outProperties, ownHandle=False).getClassName()]\n return constructor(self.mAPIContext, outProperties.value, ownHandle=True)",
"def as_dict(self):\n\n output_dictionary = dict()\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n continue\n\n if isinstance(type_instance, bool):\n output_dictionary[attribute_name] = type_instance\n elif isinstance(type_instance, self.__class__):\n output_dictionary[attribute_name] = type_instance.as_dict()\n\n return output_dictionary",
"def collect_members_by_type(modules, type_checker, skip_underscored=True, predefined=None):\n\n accumulator = dict(predefined) if predefined else {}\n\n for module in modules:\n for name, member in module_members_by_type(module, type_checker, skip_underscored):\n accumulator[name] = member\n\n return accumulator",
"def getMembers():",
"def getMembers():",
"def getMembers():",
"def getMembers():",
"def to_dict(self):\n dct = dict(zip(self._fields, self))\n dct['type'] = type(self).__name__\n return dct",
"def to_dictionary(self):\n my_dict = {}\n for i in inspect.getmembers(self):\n if not i[0].startswith('_'):\n if not inspect.ismethod(i[1]) and not\\\n inspect.isfunction(i[1]):\n my_dict[i[0]] = i[1]\n return my_dict",
"def fields_dict(self):\n return self._declared_fields",
"def mof_metadata(self):\n\n mof_dict = collections.defaultdict(dict)\n\n mof_dict['class'] = self.name\n mof_dict['parent_class'] = self.parent_class\n mof_dict['qualifiers'] = self.qualifiers\n\n for func in self.members:\n mof_dict['functions'].update(func.mof_metadata)\n\n return dict(mof_dict)",
"def members(self):\n return self._members",
"def extract(self):\n if 'email' not in self._dict:\n raise ex.NoMemberEmailError\n\n extracted = dict(x for x in self._dict.items()\n if x[0] in ['member_id', 'email'])\n fields = dict(x for x in self._dict.items()\n if x[0] in self.account.fields.export_shortcuts())\n if fields:\n extracted['fields'] = fields\n\n return extracted",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n return _dict",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n return _dict",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def member_types(self):\n raise exceptions.NotImplementedError()",
"def as_dict(self):\n return dict((\n (k, getattr(self, k)) for k in self.get_flat_type_info(self)\n if getattr(self, k) is not None\n ))",
"def members(self):\r\n return Members(self)",
"def registered_fields(self):\n return {key for mapping in self for key in mapping.mapping.keys()}",
"def whoAreYou(self):\n tempDict = {}\n tempDict['Class'] = '{0:15}'.format(self.__class__.__name__) +' from '+' '.join([str(base) for base in self.__class__.__bases__])\n tempDict['Type' ] = self.type\n tempDict['Name' ] = self.name\n return tempDict",
"def fields(self) -> Dict[str, Field]:\n return self._fields",
"def get_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if not l.can_view(g.user.id):\n raise Http403('Access denied')\n\n return {'members': [m.to_dict(public_only=True) for m in l.members]}",
"def map_all_members():\n\n members_map = {}\n for member in Member.all():\n members_map[member.clockify_id] = {\n \"id\": member.id,\n \"acronym\": member.acronym,\n \"email\": member.email,\n }\n return members_map"
] | [
"0.63952565",
"0.6141157",
"0.61048436",
"0.6043802",
"0.59790736",
"0.59537697",
"0.59458727",
"0.59143937",
"0.59143937",
"0.59143937",
"0.59143937",
"0.588506",
"0.586556",
"0.58518934",
"0.582172",
"0.58169",
"0.5795562",
"0.579131",
"0.579131",
"0.5717494",
"0.5717494",
"0.5717494",
"0.571108",
"0.5704409",
"0.55954075",
"0.5590317",
"0.5549021",
"0.5541306",
"0.55322444",
"0.552737"
] | 0.6644524 | 0 |
Set the program details in the GUI. {Boolean} Always returns True. | def __setDetails(self):
self.MainWindow.setWindowTitle("{0} {1}".format(
const.APP_NAME, const.VERSION))
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setProgram(self, program):\n self.program = program",
"def set_program(self, prog):\n self.prog = prog",
"def pr_info(self):\n process = self.backend.get_process(str(self.processBox.currentText()))\n\n if not process:\n return\n\n self.infoWindow2 = QDialog(parent=self)\n hbox2 = QHBoxLayout()\n info_box = QTextEdit()\n\n if process.returns:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description) + \"\\n\\n Returns: \\n\" +\n str(process.get_return_type()) + \"\\n\" + process.returns[\"description\"]))\n else:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description)))\n\n info_box.setReadOnly(True)\n info_box.setMinimumWidth(500)\n info_box.setMinimumHeight(500)\n hbox2.addWidget(info_box)\n self.infoWindow2.setLayout(hbox2)\n self.infoWindow2.setWindowTitle('Process Information')\n self.infoWindow2.show()",
"def isprogram(self):\n return True",
"def set_by_gui(self):\n\n # Use the GetFromGui class (below):\n user_choice = GetFromGui(None, -1, 'Params')\n # success is achieved if the user presses 'done': \n if user_choice.success: \n user_params = {\n \"subject\" : user_choice.subject,\n \"orientation\" : user_choice.sc_ori,\n \"target_loc\": user_choice.target_loc,\n \"demo\": user_choice.demo,\n \"start_per_staircase\":float(user_choice.start_per),\n \"start_fix_staircase\":float(user_choice.start_fix),\n \"do_peripheral\":user_choice.do_per,\n \"do_fixation\":user_choice.do_fix\n }\n else:\n user_choice.Destroy()\n raise ValueError(\"Program stopped by user\")\n # Stop execution of the window\n user_choice.Destroy()\n \n for k in user_params.keys():\n self.__setattr__(k,user_params[k])",
"def isprogram(self):\n return False",
"def show_gui():\n pass",
"def update_has_data(self):\n self.main()",
"def guiMode(options):\n configuration = {'config_project_name': 'untitled', 'config_address': '0.0.0.0',\n 'config_port': 8081, 'config_multiple_instance': True, 'config_enable_file_cache': True,\n 'config_start_browser': True, 'config_resourcepath': './res/'}\n start(MainWindow, address=configuration['config_address'], port=configuration['config_port'],\n multiple_instance=configuration['config_multiple_instance'],\n enable_file_cache=configuration['config_enable_file_cache'],\n start_browser=configuration['config_start_browser'])",
"def set_defaults(self):\n if self.main_win.working_dir is None or self.main_win.id is None or \\\n len(self.main_win.working_dir) == 0 or len(self.main_win.id) == 0:\n msg_window('Working Directory or Reconstruction ID not configured')\n else:\n self.reconstructions.setText('1')\n self.device.setText('(0,1)')\n self.alg_seq.setText('((3,(\"ER\",20),(\"HIO\",180)),(1,(\"ER\",20)))')\n self.beta.setText('.9')\n self.support_area.setText('(0.5, 0.5, 0.5)')\n self.cont.setChecked(False)",
"def aboutmenu(self):\n tkMessageBox.showinfo(\"About This Program\", \"The project of PSIT subject in 2014.\\nThis program is unit converter program.\")",
"def on_about(self):\n MessageBox.showinfo(\"SuperSID\", self.controller.about_app())",
"def start(self):\n if self._system.get_settings().show_gui and not self.is_running():\n self.frame = True\n app = QtWidgets.QApplication(sys.argv)\n self.gui = GUI(self._system)\n if self.is_speech_enabled:\n self.gui.enable_speech(True)\n\n # if not self._system._domain._xml_file is None:\n # self.gui.open_domain(self._system._domain)\n # self._system.start_system()\n sys.exit(app.exec_())",
"def set_by_gui(self):\n\n # Use the GetFromGui class (below):\n user_choice = GetFromGui(None, -1, 'Params')\n # success is achieved if the user presses 'done': \n if user_choice.success: \n user_params = {\n \"subject\" : user_choice.subject,\n \"texture_dur\" : float(user_choice.SOA)/1000.,\n \"demo\": user_choice.demo,\n }\n else:\n user_choice.Destroy()\n raise ValueError(\"Program stopped by user\")\n # Stop execution of the window\n user_choice.Destroy()\n \n for k in user_params.keys():\n self.__setattr__(k,user_params[k])",
"def Update(self, mode = UPDATE_MODE.all):\r\n aux_versions = dstore.Get(\"versions\")\r\n \r\n if(aux_versions['hw'] != None): \r\n Ui().lineHwVersion.setText(str(aux_versions['hw'])) \r\n else:\r\n Ui().lineHwVersion.setText(\"- -\")\r\n \r\n if(aux_versions['fw'] != None): \r\n Ui().lineFwVersion.setText(str(aux_versions['fw'])) \r\n else:\r\n Ui().lineFwVersion.setText(\"- -\") \r\n \r\n \r\n \r\n \"\"\" TERMINAL INFO \"\"\"\r\n aux_terminal_info = dstore.Get(\"terminal_info\", \"GET\")\r\n \r\n \"\"\" number of cells \"\"\"\r\n if(aux_terminal_info['number_of_cells'] != None):\r\n Ui().lineCells.setText(str(aux_terminal_info['number_of_cells'])) \r\n else:\r\n Ui().lineCells.setText(\"-\") \r\n \r\n \r\n \"\"\" battery \"\"\"\r\n if(aux_terminal_info['battery'] != None):\r\n Ui().lineBattery.setText(str(aux_terminal_info['battery'])+\" %\") \r\n else:\r\n Ui().lineBattery.setText(\"-- %\") \r\n \r\n \"\"\" speaker \"\"\" \r\n if(aux_terminal_info['speaker']['keys'] == True):\r\n Ui().lineSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['keys'] == False):\r\n Ui().lineSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerKeys.setText(\"- -\")\r\n Ui().pushSpeakerKeys.setText(\"- -\")\r\n \r\n if(aux_terminal_info['speaker']['system'] == True):\r\n Ui().lineSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['system'] == False):\r\n Ui().lineSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['timing'] == True):\r\n Ui().lineSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['timing'] == False):\r\n Ui().lineSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else: \r\n Ui().lineSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['keys'] == None or aux_terminal_info['speaker']['timing']==None or aux_terminal_info['speaker']['system']==None): \r\n Ui().pushSpeakerKeys.setEnabled(False)\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n else:\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n \r\n \r\n return True",
"def main():\n\tif mc.window( 'SetupOccUI', q = 1, ex = 1 ):\n\t\tmc.deleteUI( 'SetupOccUI' )\n\tPyForm=SetupOccUI()\n\tPyForm.show()",
"def show(self, caller_is_main: bool):\n pass",
"def showAppPreferencesGUI(script=None, inputEvent=None):\n\n try:\n module = __import__(settings.appGuiPreferencesModule,\n globals(),\n locals(),\n [''])\n module.showPreferencesUI()\n except:\n debug.printException(debug.LEVEL_SEVERE)\n\n return True",
"def __showDetails(self):\n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.__showDetailsButton.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__detailsData = {}\n \n itm = self.resultList.selectedItems()[0]\n packageVersions = itm.data(0, self.VersionRole)\n if len(packageVersions) == 1:\n packageVersion = packageVersions[0]\n elif len(packageVersions) == 0:\n packageVersion = \"\"\n else:\n packageVersion, ok = QInputDialog.getItem(\n self,\n self.tr(\"Show Package Details\"),\n self.tr(\"Select the package version:\"),\n packageVersions,\n 0, False)\n if not ok:\n return\n \n packageName = itm.text(0)\n self.__client.call(\n \"release_data\",\n (packageName, packageVersion),\n lambda d: self.__getPackageDownloadsData(packageVersion, d),\n self.__detailsError\n )",
"def set_info_text(self):\n if not self.vars[\"enabled\"].get():\n msg = \"{} disabled\".format(self.tabname.title())\n elif self.vars[\"enabled\"].get() and not self.vars[\"ready\"].get():\n msg = \"Waiting for {}...\".format(self.tabname)\n else:\n msg = \"Displaying {}\".format(self.tabname)\n logger.debug(msg)\n self.set_info(msg)",
"def userSetup(self):\n if self.user[\"Save\"] == \"\":\n self.ui.b_run.setEnabled(False)\n else:\n name_split = self.splitPath(self.user[\"Save\"])[-1]\n name = name_split.split(\".\")[0]\n self.ui.l_save.setText(\"Save to: \" + name)\n\n if self.user[\"GT\"] != \"\":\n self.ui.l_ground_truth.setText(self.splitPath(self.user[\"GT\"])[-1])\n\n self.ui.l_colour.setText(self.user[\"Colour\"])",
"def _program_key(self):\n prg_dialogue = _ProgrammingWindow(self)\n self.root.wait_window(prg_dialogue.top)",
"def showInfoWindow():\n\treturn 0",
"def optionsWindow():\n\t# create the main interface\n\tif cmds.window(kSetupOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kSetupOptionsWindow)\n\tmainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new twist joints\n\tif_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')\n\tif_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)\n\t\n\t# position the input fields for the twist joints\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])\n\t\n\t# build the section to get information for the hip constraint\n\tconstraintFrame = eval('cmds.frameLayout(collapsable=True, label=\"Hip Constraint Options:\" %s)'%amui.__frameAlignCenter__)\n\tconstraintForm = cmds.formLayout(nd=100)\n\t\n\t# attempt to guess what the pelvis is if there is a selection when the GUI is created\n\tpelvisText = 'CenterRoot'\n\tsel = cmds.ls(sl=True, l=True, type='transform')\n\tif sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list\n\t\ttry:\n\t\t\thip = cmds.listRelatives(sel[0], p=True, f=True) # just use the first knee in the selection\n\t\t\tpelvis = cmds.listRelatives(hip[0], p=True, f=True)\n\t\t\tpelvisText = pelvis[0]\n\t\texcept: pass\n\t\t\n\tif_pelvis = cmds.textFieldGrp(label='Pelvis Object:', tx=pelvisText)\n\tif_hipAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Hip Aim Axis:')\n\tif_hipFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Hip Front Axis:')\n\tif_pelvisAimAxis = cmds.floatFieldGrp(v1=0, v2=1, v3=0, nf=3, pre=4, label='Pelvis Aim Axis:')\n\tif_pelvisFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Pelvis Front Axis:')\n\t\n\t# position the input fields for the hip constraint\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvis, 'left', 30), (if_pelvis, 'top', 5)], attachNone=[(if_pelvis, 'right'), (if_pelvis, 'bottom')])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipAimAxis, 'left', 30)], attachNone=[(if_hipAimAxis, 'right'), (if_hipAimAxis, 'bottom')], attachControl=[(if_hipAimAxis, 'top', 5, if_pelvis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipFrontAxis, 'left', 30)], attachNone=[(if_hipFrontAxis, 'right'), (if_hipFrontAxis, 'bottom')], attachControl=[(if_hipFrontAxis, 'top', 5, if_hipAimAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisAimAxis, 'left', 30)], attachNone=[(if_pelvisAimAxis, 'right'), (if_pelvisAimAxis, 'bottom')], attachControl=[(if_pelvisAimAxis, 'top', 5, if_hipFrontAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisFrontAxis, 'left', 30)], attachNone=[(if_pelvisFrontAxis, 'right'), (if_pelvisFrontAxis, 'bottom')], attachControl=[(if_pelvisFrontAxis, 'top', 5, if_pelvisAimAxis)])\n\t\n\tcmds.setParent('..') # go up to constraintForm\n\tcmds.setParent('..') # go up to mainForm\n\t\n\t# position the frame for the hip constraint\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create='amTools.rigging.hipSetup.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(\n\t\tif_suffixName, \n\t\tif_numberTwistJoints, \n\t\tif_pelvis, \n\t\tif_hipAimAxis, \n\t\tif_hipFrontAxis, \n\t\tif_pelvisAimAxis, \n\t\tif_pelvisFrontAxis)\n\tutils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)",
"def show_setting_port():\r\n\r\n def verify_sensor():\r\n sensor_com = ui_setting_port.com_senser.text()\r\n try:\r\n ui_setting_port.textBrowser.setText(f'传感器串口: {sensor_com}')\r\n ser = Serial(sensor_com, 9600, timeout=2)\r\n temp_sensor = Sensor(ser)\r\n ui_setting_port.textBrowser.append(str(temp_sensor.current_t_rh))\r\n ser.close()\r\n input_parameters.sensor_comp = sensor_com\r\n except Exception as e:\r\n ui_setting_port.textBrowser.append(str(e))\r\n\r\n def verify_motor():\r\n motor_com = ui_setting_port.com_motor.text()\r\n try:\r\n ui_setting_port.textBrowser.setText(f'步进电机串口: {motor_com}')\r\n ser = Serial(motor_com, 9600, timeout=0.2)\r\n temp_sensor = StepMotor(ser)\r\n temp_sensor.move_forward()\r\n ser.close()\r\n input_parameters.motor_comp = motor_com\r\n except Exception as e:\r\n ui_setting_port.textBrowser.append(str(e))\r\n\r\n def verify_network_analyzer():\r\n na_identifier = ui_setting_port.com_na.text()\r\n try:\r\n ui_setting_port.textBrowser.setText(f'网分: {na_identifier}')\r\n ser = input_parameters.visa_rm.open_resource(na_identifier)\r\n NetworkAnalyzer(ser)\r\n ui_setting_port.textBrowser.append('没毛病嗷\\n┗|`O′|┛ 嗷~~')\r\n ser.close()\r\n input_parameters.NA_identifier = na_identifier\r\n except Exception as e:\r\n ui_setting_port.textBrowser.append(str(e))\r\n\r\n setting_port = QDialog()\r\n ui_setting_port = PortSetting.Ui_Dialog()\r\n ui_setting_port.setupUi(setting_port)\r\n ports = list(list_ports.comports())\r\n text = ' 当前已连接串口:\\n'\r\n for p in ports:\r\n text += f'{p[1]}\\n'\r\n text += ' 仪器\\n'\r\n for p in ResourceManager().list_resources():\r\n text += f'{p}\\n'\r\n ui_setting_port.current_comports.setText(text)\r\n ui_setting_port.com_motor.setText(input_parameters.motor_comp)\r\n ui_setting_port.com_senser.setText(input_parameters.sensor_comp)\r\n ui_setting_port.com_na.setText(input_parameters.NA_identifier)\r\n ui_setting_port.apply_sensor.clicked.connect(verify_sensor)\r\n ui_setting_port.apply_motor.clicked.connect(verify_motor)\r\n ui_setting_port.apply_NA.clicked.connect(verify_network_analyzer)\r\n setting_port.exec_()",
"def setProgram(self, *args):\n return _libsbml.SBMLExternalValidator_setProgram(self, *args)",
"def run_datalab_pyinstaller():\n\n app = QtWidgets.QApplication(sys.argv)\n win = DataLab()\n # debug_setup(win)\n win.show()\n sys.exit(app.exec_())",
"def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])",
"def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )",
"def set_program_name(program_name):\n global _PROGRAM_NAME\n _PROGRAM_NAME = program_name"
] | [
"0.60660547",
"0.59619004",
"0.59247255",
"0.58688223",
"0.5844067",
"0.5796609",
"0.57606316",
"0.57349515",
"0.56842154",
"0.5634517",
"0.5594872",
"0.55846405",
"0.5563236",
"0.55436593",
"0.554133",
"0.55260307",
"0.5519292",
"0.55175734",
"0.5494302",
"0.54732877",
"0.5459346",
"0.5453867",
"0.54437923",
"0.5434663",
"0.5433883",
"0.54327947",
"0.5432715",
"0.54296505",
"0.5420935",
"0.54162216"
] | 0.7473526 | 0 |
Delegate len() to the list | def __len__(self):
return len(self.list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __len__(self):\n return len(self.lst)",
"def __len__(self) -> int:\n return len(self._list)",
"def __len__(self):\n return self._list_size",
"def __len__(self):\n return len(self._list)",
"def __len__(self, *args, **kwargs):\n return len(self._list(*args, **kwargs))",
"def len_list(self) -> int:\n return 1",
"def getLength(self):\r\n return len(self.list)",
"def __len__(self):\n return _libsbml.ListWrapperSBase___len__(self)",
"def __len__(self):\n \n return len(self.ilist)",
"def __len__(self):\n return self._number_of_items",
"def length(self):\n # TODO: Count number of items\n # print(\"self\", self.list)\n # print(\"type\", type(self.list))\n return len(self.list)",
"def length(self):\n return len(self.list)",
"def __len__(self):\n return _libsbml.ListOf___len__(self)",
"def __len__(self):\n return len(self.as_list())",
"def length(self):\n return self.list.length",
"def length(self):\n return self.list.length",
"def __len__(self):\n return len(list(iter(self)))",
"def __len__(self):\n return len(self.data_list)",
"def __len__(self):\n raise NotImplementedError(\"Not implmented!\")",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def __len__(self):\n raise NotImplementedError",
"def length(self):\n return self.list.length()",
"def __len__(self):\n raise NotImplementedError() # pragma: no cover",
"def __len__(self): # noqa:D401\n raise NotImplementedError",
"def len (self):\n raise NotImplementedError('must be implemented by subclass')"
] | [
"0.82745445",
"0.82071984",
"0.803236",
"0.80218816",
"0.7948731",
"0.7840801",
"0.7820482",
"0.7789977",
"0.77131003",
"0.76979923",
"0.7642971",
"0.7616903",
"0.7570917",
"0.75674087",
"0.7518233",
"0.7518233",
"0.7495621",
"0.7467969",
"0.7455057",
"0.7443009",
"0.7443009",
"0.7443009",
"0.7443009",
"0.7443009",
"0.7443009",
"0.7443009",
"0.74392444",
"0.74373597",
"0.7376657",
"0.73477966"
] | 0.82164276 | 1 |
Delegate pop() to the list | def pop(self):
self.list.pop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop(self):",
"def pop(self):",
"def pop(self):\r\n return self.list.pop()",
"def pop(self): ##################### <-\n value = self.lst[-1]\n self.lst = self.lst[:-1]\n return value",
"def pop():",
"def pop(self):\n pass",
"def pop(self):\n pass",
"def pop(self):\n pass",
"def pop(self):\n return self.list.pop()",
"def pop(self):\n #print(self.list_x[0])\n #return self.list_x[0]\n #self.list_x.remove(self.list_x[0])\n pop = self.list_x[0]\n self.list_x = self.list_x[1:]\n return pop",
"def popitem(self):\n pass",
"def popitem(self):\n pass",
"def pop(self):\n raise NotImplementedError",
"def pop(self):\n popped = self.__list[-1]\n self.__list = self.__list[:-1]\n return popped",
"def popitem(self): # real signature unknown; restored from __doc__\n pass",
"def Pop(self):\n # Alternativly use built-in pop()\n #return self.list.pop()\n top = self.list[len(self.list) - 1]\n self.list.remove(top)\n return top",
"def pop(self, *args, **kwargs): # real signature unknown\n pass",
"def pop(self) -> Any:\n # TODO: Implement this method\n ...",
"def pop_from_deque(self):",
"def pop(self):\n return self.List_store.pop()",
"def pop(self):\n return super().remove_item_from_front()",
"def pop(self) -> T:\n pass",
"def remove(self):\n return self.stack_list.pop()",
"def pop_last(self):\n self.pop_item(-1)",
"def pop(self) -> object:\n if len(self) <= 0:\n raise EmptyListException(\"The list is empty.\")\n\n pop_node = self._head\n self._head = self._head.next()\n self._len -= 1\n\n return pop_node.value()",
"def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data",
"def pop(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val",
"def pop(self):\n if not self.head:\n raise IndexError(\"Empty list, unable to pop\")\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output",
"def pop(self, index=None, last=True):\n if index == None:\n return super().pop(last)\n else:\n ret = self[index]\n self.remove(ret)\n return ret",
"def popitem(self):\n return self.pop(0)"
] | [
"0.8104019",
"0.8104019",
"0.79311556",
"0.7867167",
"0.7832172",
"0.77851623",
"0.77503335",
"0.77503335",
"0.77467185",
"0.7742908",
"0.77351236",
"0.77227414",
"0.767835",
"0.76257366",
"0.7521559",
"0.74597466",
"0.74431336",
"0.7318125",
"0.7282987",
"0.7224658",
"0.71769005",
"0.71109945",
"0.708467",
"0.7029329",
"0.695372",
"0.6939019",
"0.6929467",
"0.689615",
"0.688494",
"0.68569046"
] | 0.8389024 | 0 |
If avoid_repeats is False, delegates extend() to the list. Otherwise, appends all items that don't create a repeat of 2 items to the list. | def extend(self, other_list:list, avoid_repeats:bool=False):
if not avoid_repeats:
self.list.extend(other_list)
else:
for item in other_list:
if not self.list or not self.list[-1] == item:
self.list.append(item) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extend(self, items):\n\t\tfor item in items:\n\t\t\tself.append(item)",
"def extend(self, item: Any) -> BaseList:\n super().extend(item)\n return self",
"def _maybe_repeat(self, x):\n if isinstance(x, list):\n assert len(x) == self.n\n return x\n else:\n return [x] * self.n",
"def listExtend(lst, items):\n if lst is None:\n return list(items)\n else:\n lst.extend(items)\n return lst",
"def extend(self, items):\n # type: (Iterable[Any]) -> None\n return list.extend(self, self._refs(items))",
"def extend(self, items):\n self.work.extend(items)",
"def collect(listing, item):\n\n listing.extend(item)\n return listing",
"def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]",
"def _extend(cls, li1, li2):\n return li1 + li2",
"def append(self, items):\n self.__add__(items)",
"def __append_to_item_list(self):\n Item.get_item_list().append(self)",
"def _maybe_repeat(x, n):\n if isinstance(x, list):\n assert len(x) == n\n return x\n else:\n return [x] * n",
"def _expand_main_list(self):\n\n # Compute how much to extend underlying list by\n new_length = self.resizing_factor * len(self.main_list)\n change_in_length = new_length - len(self.main_list)\n\n # Entend underlying list\n self.main_list.extend([None] * change_in_length)",
"def appendAndOwn(self, *args):\n return _libsbml.ListOf_appendAndOwn(self, *args)",
"def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)",
"def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items",
"def extend(self, seq):\n for element in seq:\n self.append(element)",
"def extend(self, in_items):\n\n items = self.list\n items.extend(in_items)\n self.value = self.__class__.SEPARATOR.join(items)",
"def extend(self, sequence):\n self.__field.validate(sequence)\n return list.extend(self, sequence)",
"def extend_list(some_list):\n print(f\"This is our list: {some_list}\")\n one_el = [9]\n tre_el = [9, 8, 7]\n some_list.append(one_el)\n print(f\"Appending some_list with a single element iterable: {some_list}\")\n some_list.append(tre_el)\n print(f\"Appending some_list with a multi-element iterable: {some_list}\")\n some_list.extend(one_el)\n print(f\"extending some_list with a single element iterable: {some_list}\")\n some_list.extend(tre_el)\n print(f\"extending some_list with a multi-element iterable: {some_list}\")\n return \"done\"",
"def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it",
"def repopulate(self):\n new_items = self._list_populate_function()\n\n new_set = set(new_items.values() if isinstance(new_items, dict) else new_items)\n\n if len(new_items) != len(self._display_list):\n if isinstance(new_items, dict):\n # for dictionaries store the key as user role data\n for key in sorted(new_items.keys()):\n item = new_items[key]\n if item not in self._display_list:\n self.list_widget.addItem(item)\n self.list_widget.item(self.list_widget.count() - 1).setData(Qt.UserRole, key)\n else:\n for item in new_items:\n if item not in self._display_list:\n self._add_item(item)\n self._display_list = sorted(set(new_set) | set(self._display_list))",
"def append(self, other: Energy) -> None:\n\n for item in self:\n if other == item:\n logger.debug(\n f\"Not appending {other} to the energies - \"\n f\"already present. Moving to the end\"\n )\n self.append(self.pop(self.index(item)))\n return\n\n return super().append(other)",
"def extend(self, sequence):\n self.__field.validate(sequence)\n return list.extend(self, sequence)",
"def add_items_quantity_not_duplicates(request):\n all_items_no_duplicates = []\n\n for loop_index, item in enumerate(all_shopping_items(request)):\n item_dict = {\n 'item': item.item,\n 'quantity': item.quantity,\n 'category': item.category.category,\n 'id': item.id,\n 'user': {\n 'username': item.user.first_name\n }\n }\n\n if loop_index == 0:\n all_items_no_duplicates.append(item_dict)\n else:\n item_is_not_a_copy = True\n for list_item in all_items_no_duplicates:\n if list_item['item'] == item.item:\n item_is_not_a_copy = False\n list_item['quantity'] += item.quantity\n list_item['user']['username'] += ' / ' + item.user.first_name\n if item_is_not_a_copy:\n all_items_no_duplicates.append(item_dict)\n\n return all_items_no_duplicates",
"def extend(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> None:\n list1.extend(list2)",
"def extend(self, extension):\n for element in extension:\n self.append(element)",
"def extended(self) -> List:\n raise NotImplementedError",
"def left_merge(self,list_to_merge):\n self.items = list_to_merge + self.items\n return self.items",
"def append(self, item: Any) -> BaseList:\n super().append(item)\n return self"
] | [
"0.62498456",
"0.58820844",
"0.57340544",
"0.5694424",
"0.56086457",
"0.5561484",
"0.5498792",
"0.5492445",
"0.54523814",
"0.5431818",
"0.5429031",
"0.5411647",
"0.540451",
"0.5382095",
"0.53687876",
"0.5337805",
"0.53252906",
"0.53235775",
"0.5305681",
"0.52904165",
"0.5287506",
"0.526407",
"0.51708496",
"0.51605487",
"0.5136493",
"0.51241493",
"0.51042634",
"0.50846076",
"0.5084302",
"0.5018895"
] | 0.7816751 | 0 |
Reverses the portion of the list between start and end indexes, inclusive. | def reverse(self, start:int=0, end:int=None):
if end == None:
if start == 0:
self.list.reverse()
return
end = len(self) - 1
left = start
right = end
while left < right:
self.swap(left, right)
left += 1
right -= 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rev_list_in_place(lst):\n\n for i in range(len(lst)//2):\n start = lst[i] #0, 1\n end = lst[-i-1] #-1, -2\n\n lst[i] = end\n lst[-i-1] = start\n return lst",
"def reverse_(data, start, stop):\n if start >= stop:\n return\n else:\n tmp = data[start]\n data[start] = data[stop]\n data[stop] = tmp\n reverse_(data, start +1, stop -1)",
"def reverse(S, start, stop):\n if start < stop - 1:\n S[start], S[stop-1] = S[stop-1], S[start]\n reverse(S, start+1, stop-1)",
"def partial_reverse(lst, start):\n for x in lst[start:]:\n lst.insert(start,x)\n lst.pop()",
"def reverse_enumerate(iterable, start=0):\n if(start == 0):\n return itertools.izip(reversed(xrange(len(iterable))), reversed(iterable))\n else:\n return itertools.izip(reversed(xrange(len(iterable[::start]))), reversed(iterable[::start]))",
"def reverse_list(items):\n\n return items[::-1]",
"def reverse_elements(seq):\n seq_copy = seq [::-1]\n return seq_copy",
"def reverse_list(self,list_):\r\n list_.reverse()",
"def elements_reversed(seq):\n new_seq = seq[::-1]\n return new_seq",
"def reverse_list_recursively_in_place(list1, first = 0, last = -1):\n\n while first <= len(list1) // 2 - 1: #midpoint\n \n #switch first and last indexes\n new_last_item = list1[0]\n new_first_item = list1[-1]\n list1[0] = new_first_item\n list1[-1] = new_last_item\n # print(list1)\n #redefine first and last indexes\n first = first + 1\n last = last - 1\n\n return reverse_list_recursively_in_place(list1, first, last)\n # return list1",
"def reverse_elements(seq):\n\n new_seq = []\n\n i = -1\n\n while i >= -len(seq):\n new_seq.append(seq[i])\n i -= 1\n\n return format_seq(seq, new_seq)",
"def reversed(x) -> List:\n pass",
"def reverse_enumerate(collection: [Iterable, Sized, Reversible], start_index: int = None) -> Iterator[Tuple]:\n # TODO: implement start_index functionality that mirrors enumerate builtin\n return zip(reversed(range(len(collection))), reversed(collection))",
"def reverse(lst):\n i = 0 # first item\n j = len(lst)-1 # last item\n while i<j:\n lst[i],lst[j] = lst[j],lst[i]\n i += 1\n j -= 1",
"def reverse(seq):\n return seq[::-1]",
"def reverse(seq):\n return seq[::-1]",
"def reverse_slice(n):\n return n[::-1]",
"def reverse_list(s):\r\n for i in range(len(s) // 2):\r\n s[i], s[-1-i] = s[-1-i], s[i]",
"def mirror_update(lst: list):\n lst[len(lst)//2:] = lst[len(lst)//2-1::-1]",
"def reverse(self) -> BaseList:\n super().reverse()\n return self",
"def reverseTheList(n):\n print(n[::-1])\n return(n[::-1])",
"def reverse_iterative(S):\n start, stop = 0, len(S) - 1\n while start < stop:\n S[start], S[stop] = S[stop], S[start]\n start, stop = start + 1, stop - 1",
"def reverse(self):\n left = 0 # Start at beginning of array\n right = self._length - 1 # Start at end of array\n while left <= right: # Swap values until pointers collide\n self._arr[left], self._arr[right] = self._arr[right], self._arr[left]\n left, right = left + 1, right - 1",
"def reverse_list(vals):\n fst = 0\n \"\"\"Last variable is in the -1 position\"\"\"\n lst = len(vals)-1\n \"\"\"formula to replace first positions variable with last position variable, while counting backward form this position\"\"\"\n while fst < lst:\n t = vals[fst]\n vals[fst] = vals[lst]\n vals[lst] = t\n fst += 1\n lst -= 1",
"def _string_reversial(string : list, start : int, end : int): # function is inplace\n if len(string) < 2:\n return\n\n while end > start:\n string[start], string[end] = string[end], string[start]\n start += 1\n end -=1",
"def my_reversed(my_list):\r\n i = 0\r\n my_reversed = []\r\n while i < len(my_list):\r\n my_reversed += my_list[len(my_list) - 1 - i:len(my_list) - i]\r\n i += 1\r\n return my_reversed",
"def reverse(arr: StaticArray) -> None:\n # Sets the length as an integer of half the size of the array.\n length = int(arr.size() / 2)\n\n for index in range(length):\n # Gets the first and last unsorted elements\n beginning = arr.get(index)\n end = arr.get((arr.size() - 1) - index)\n # Swaps the first and last unsorted elements\n arr.set(index, end)\n arr.set((arr.size() - 1) - index, beginning)",
"def flip(self):\n self._start, self._end = self._end, self._start",
"def reversed(lst) -> \"List\": # noqa: N805\n acc = Nil\n while lst:\n x, lst = lst.uncons\n acc = acc.cons(x)\n return acc",
"def reverse(list_of_chars):\n\n left = 0\n right = len(list_of_chars) - 1\n\n while left < right:\n list_of_chars[left], list_of_chars[right] = \\\n list_of_chars[right], list_of_chars [left]\n\n left += 1\n right -= 1\n\n return list_of_chars"
] | [
"0.75801146",
"0.7426304",
"0.73637533",
"0.72495973",
"0.69379836",
"0.6891903",
"0.6763126",
"0.6715727",
"0.66684985",
"0.6529971",
"0.65189874",
"0.651104",
"0.6492778",
"0.6492647",
"0.6479766",
"0.6479766",
"0.6469574",
"0.6395415",
"0.6336724",
"0.6288921",
"0.62816465",
"0.6269771",
"0.6254514",
"0.62441576",
"0.62241447",
"0.6215017",
"0.6196924",
"0.6161236",
"0.6146389",
"0.6137154"
] | 0.8482028 | 0 |
A generator that filters through the tuples under specific conditions that can be specified. | def filter(self, filters:list)->list:
for item in self.list:
use_item = True
for filter in filters:
filter_key, filter_value, filter_type = filter
if filter_type == "<" and item[filter_key] >= filter_value:
use_item = False
break
elif filter_type == ">" and item[filter_key] <= filter_value:
use_item = False
break
elif filter_type == "<=" and item[filter_key] > filter_value:
use_item = False
break
elif filter_type == ">=" and item[filter_key] < filter_value:
use_item = False
break
elif filter_type == "=" and not item[filter_key] == filter_value:
use_item = False
break
if use_item:
yield item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def combination2_with_pruning(items: Sequence[U], condition: Callable[[U, U], bool]) -> Iterator[Tuple[U, U]]:\n for i in range(len(items) - 1):\n item1 = items[i]\n if not condition(item1, item1):\n break\n for j in range(i + 1, len(items)):\n item2 = items[j]\n if not condition(item1, item2):\n break\n yield item1, item2",
"def sample(self):\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n else: # let's not filter\n if len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError",
"def filter_reads(f, condition, riter):\n for r in riter:\n # TODO: looks like we don't need 'fpass'\n new_r = tuple(dict(mate, fpass=f(mate) and mate['fpass']) for mate in r)\n if condition(tuple(mate['fpass'] for mate in new_r)):\n yield new_r",
"def _model_filter_in_operator_generator(filter_operator: Operator) -> Generator:\n for operator in filter_operator:\n if isinstance(operator.unresolved_value, ModelFilter):\n yield operator",
"def filtered_xyz(self) -> tuple[int, int, int]:",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))",
"def filter(iterable, predicate):\n\n for x in iterable:\n if predicate(x):\n yield x",
"def filter(\n self, items: Iterable[Any], spec: Specification\n ) -> Generator[Any, None, None]:",
"def filter(iterable, filter_func):\n for item in iterable:\n item = filter_func(item)\n if item is not None:\n yield item",
"def filter_collection(collection, filter_tuples):\n\n for filter_tuple in filter_tuples:\n collection = collection[collection[filter_tuple[0]] == filter_tuple[1]]\n\n return collection",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a | c, b & d\n yield ((ab | cd) & 2) | ((ab & cd) & 1)",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & c, b | d\n yield ((ab & cd) & 2) | ((ab | cd) & 1)",
"def filter(\n self, items: Iterable[Product], spec: Specification\n ) -> Generator[Product, None, None]:\n return (item for item in items if spec.is_satisfied(item))",
"def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list",
"def test():\n # Separate generator objects.\n infilename = 'filter2.py'\n infile = open(infilename, 'r')\n g1 = filter(infile, skip_if_emptystring)\n g2 = filter(g1, add_double_mash)\n g3 = filter(g2, rstrip_line)\n for line in g3:\n print line\n infile.close()\n # Nested calls to generator functions.\n print '-' * 50\n infile = open(infilename, 'r')\n for line in filter(\n filter(\n filter(infile, skip_if_emptystring),\n add_double_mash),\n rstrip_line):\n print line\n infile.close()",
"def identity_filter(element_tuple):\r\n\treturn element_tuple",
"def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)",
"def visitCriteria(self, ctx: ApiQLParser.CriteriaContext):\n return lmap(lambda c: c.accept(self), ctx.getChildren(self.filter_ignored))",
"def sync_filter(func, *iterables):\n return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(\n iterables\n )",
"def filterfalse(iterable, predicate):\n for x in iterable:\n if not predicate(x):\n yield x",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & d | b & c, a & c | b & d\n a, b, c, d = ab >> 1, ab & 1, cd >> 1, cd & 1\n yield ((a & d | b & c) << 1) | (a & c | b & d)",
"def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt",
"def iter_ents(self, **cond: str) -> Iterator['Entity']:\n items = cond.items()\n for ent in self.entities[:]:\n for key, value in items:\n if key not in ent or ent[key] != value:\n break\n else:\n yield ent",
"def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]",
"def predicates(\n self,\n subject: Optional[\"_SubjectType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_PredicateType\", None, None]:\n for t, c in self.triples((subject, None, object)):\n yield t[1]",
"def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y",
"def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt",
"def filter(self, op):\n def op_filter(seqs):\n r = [s for s in seqs if op(s)]\n if len(r) == 0:\n return None\n else:\n return r\n return self.element_wise(op_filter)",
"def predicate_objects(\n self, subject: Optional[\"_SubjectType\"] = None\n ) -> Generator[Tuple[\"_PredicateType\", \"_ObjectType\"], None, None]:\n for t, c in self.triples((subject, None, None)):\n yield t[1], t[2]"
] | [
"0.6575245",
"0.6373978",
"0.6246004",
"0.62212527",
"0.6165847",
"0.6111315",
"0.6110025",
"0.60950655",
"0.6079471",
"0.6078307",
"0.6066584",
"0.60238826",
"0.6008566",
"0.59748495",
"0.5969665",
"0.57931423",
"0.5792632",
"0.57537127",
"0.57456875",
"0.57093096",
"0.5704929",
"0.57012516",
"0.56888396",
"0.56248987",
"0.56085783",
"0.56085783",
"0.5600093",
"0.5592654",
"0.5572125",
"0.55517673"
] | 0.64063805 | 1 |
Quicksorts the list by outside_key, then divides the list by stable blocks of outside_key and quicksorts those blocks by inner_key. Essentially equivalent to SQL statement of SORT BY outside_key, inner_key. | def double_sort(self, outside_key:int, inner_key:int, start:int=0, end:int=None, reverse_outside:bool=False, reverse_inside:bool=False):
self.quicksort(outside_key, start, end)
if reverse_outside:
self.reverse(start, end)
self.sub_quicksort(outside_key, inner_key, start, end, reverse_inside) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quick_sort(partition_list, low, high):\n if low >= high:\n return\n part_point = get_partition(partition_list, low, high)\n quick_sort(partition_list, low, part_point - 1)\n quick_sort(partition_list, part_point + 1, high)",
"def sub_quicksort(self, stable_key:int, sort_key:int, start:int=0, end:bool=None, reverse:bool=False):\n if end == None: \n end = len(self) - 1\n if start >= end:\n return\n first = start\n for index in range(start + 1, end + 1):\n if not self[index][stable_key] == self[first][stable_key]:\n self.quicksort(sort_key, first, index - 1)\n if reverse:\n self.reverse(first, index - 1)\n first = index\n if not first == end:\n self.quicksort(sort_key, first, end)\n if reverse:\n self.reverse(first, end)",
"def quicksort(self, key:int, start:int=0, end:int=None):\n if end == None:\n end = len(self) - 1\n if start >= end:\n return\n if start == end - 1:\n if self[start][key] > self[end][key]:\n self.swap(start, end)\n return\n work = [(start, end)]\n while work:\n first, last = work.pop()\n pivot = (first + last) // 2\n self.swap(pivot, last)\n pivot_to = first\n for index in range(first, last + 1):\n if self[index][key] < self[last][key]:\n self.swap(index, pivot_to)\n pivot_to += 1\n self.swap(pivot_to, last)\n if pivot_to > (first + 1):\n work.append((first, pivot_to - 1))\n if last > (pivot_to + 1):\n work.append((pivot_to + 1, last))",
"def partial_sort(seq):\n for i in range(1, int(0.75 * len(seq))):\n key = seq[i]\n low, up = 0, i\n while up > low:\n middle = (low + up) // 2\n if seq[middle] < key:\n low = middle + 1 \n else:\n up = middle\n seq[:] = seq[:low] + [key] + seq[low:i] + seq[i + 1:]",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def qsort(my_list):\n\n comparisions = quickSortHelper(my_list,0,len(my_list)-1)\n return (my_list, comparisions)",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n\n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n\n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def quicksort(list, low, high):\n if(low < high):\n partitionindex = partition(list, low, high)\n\n quicksort(list, low, partitionindex-1)\n quicksort(list, partitionindex + 1, high)",
"def quick_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(sorted_list) > 1:\r\n\t\tpivot = sorted_list[0]\r\n\t\tfor item in sorted_list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quick_sort(less) + equal + quick_sort(greater)\r\n\telse:\r\n\t\treturn sorted_list",
"def qsort2(list):\n if list == []: \n return []\n else:\n pivot = list[0]\n lesser, equal, greater = partition(list[1:], [], [pivot], [])\n return qsort2(lesser) + equal + qsort2(greater)",
"def quick_sort_v2(collection):\n\n def partition(lst, left, right):\n pivot = lst[left]\n i = left\n j = right\n while j > i:\n while lst[j] >= pivot and j > i:\n j -= 1\n lst[i] = lst[j]\n while lst[i] <= pivot and j > i:\n i += 1\n lst[j] = lst[i]\n lst[i] = pivot\n return i\n\n def sort(lst, left, right):\n if left >= right:\n return\n p = partition(lst, left, right)\n sort(lst, left, p - 1)\n sort(lst, p + 1, right)\n\n sort(collection, 0, len(collection) - 1)\n return collection",
"def helper_smartsort(grouped, key):\n df = grouped.get_group(key)\n head = df.loc[df.index[0], :]\n tail = df.loc[df.index[1:], :]\n return head, tail",
"def quick_sort_helper(a_list, first_position, last_position):\n if first_position < last_position:\n\n split_point = partition(a_list, first_position, last_position)\n\n quick_sort_helper(a_list, first_position, split_point)\n quick_sort_helper(a_list, split_point + 1, last_position)\n\n return a_list",
"def qsort3(items):\n\n # create an initial unit of work. Each unit work is marked by a starting index and its sub-partition so that\n # we know when we add up the starting index to compute the final location of the pivot in the final sorted list.\n work = (0, items)\n\n # we will be using works to track remaining partitions needed to be quick-sorted\n works = [work]\n\n # create a result list to store all of our pivots sorted thru qsort in our final sorted list\n result = [-1] * len(items)\n\n # keep working on partition till no remaining partitions left.\n while len(works) > 0:\n\n # remove a unit of partition to work on in this iteration\n startIndex, part = works.pop()\n\n\n # standard quick-sorting start here...\n\n pivot = part[0]\n lessPart = []\n morePart = []\n for x in part:\n if x < pivot:\n lessPart.append(x)\n elif x > pivot:\n morePart.append(x)\n else:\n # ignoring pivot\n pass\n\n if len(lessPart) > 0:\n # create a unit of work for the lesser partition.\n works.append((startIndex, lessPart))\n\n if len(morePart) > 0:\n # create a unit of work for the greater partition.\n works.append((startIndex + len(lessPart) + 1, morePart))\n\n # A pivot's location is final in standard quick-sort algorithm. Hence we put it back to the result.\n result[ startIndex + len(lessPart) ] = pivot\n\n return result",
"def quick_sort(data, head, tail, draw_data, time_tick):\n if head < tail:\n partition_index = partition(data, head, tail, draw_data, time_tick)\n\n # Left partition\n quick_sort(data, head, partition_index-1, draw_data, time_tick)\n\n # Right partition\n quick_sort(data, partition_index+1, tail, draw_data, time_tick)",
"def quick_sort(items, low=None, high=None):\r\n # TODO: Check if high and low range bounds have default values (not given)\r\n # TODO: Check if list or range is so small it's already sorted (base case)\r\n # TODO: Partition items in-place around a pivot and get index of pivot\r\n # TODO: Sort each sublist range by recursively calling quick sort\r",
"def quick_sort(mylist):\n _inplace_quick_sort(mylist, 0, len(mylist)-1)",
"def quick_sort(items, low = 0, high = -1):\n if high == -1:\n high = len(items) - 1 \n\n if low < high:\n pivot = partition(items, low, high)\n # Left side of pivot\n quick_sort(items, low, pivot - 1)\n # Right side of pivot\n quick_sort(items, pivot + 1, high)",
"def quick_sort(data):\n def partition(data,start,end):\n \"\"\" Creating the partition and returning the partition key to sort further\"\"\"\n i=start-1\n for j in range(start,end):\n if data[j]<=data[end]:\n i+=1\n data[i],data[j]=data[j],data[i]\n data[i+1],data[end]=data[end],data[i+1]\n return i+1\n \n def sort(data,start,end):\n \"\"\"\n Sorting the data provided \n \"\"\"\n if start < end:\n partition_index=partition(data,start,end)\n sort(data,start,partition_index-1)\n sort(data,partition_index+1,end)\n sort(data,0,len(data)-1)\n #print(data)\n return data",
"def quicksort(inputList):\n\tif not inputList:\n\t\treturn []\t\n\telse:\n\t\treturn quicksort(filter(lambda x: x < inputList[len(inputList)/2], inputList))+[inputList[len(inputList)/2]]+quicksort(filter(lambda x: x > inputList[len(inputList)/2], inputList))",
"def _quick_sort(l, start, end):\n if start < end:\n split_point = partition(l, start, end)\n\n _quick_sort(l, start, split_point - 1)\n _quick_sort(l, split_point + 1, end)\n\n return l",
"def __QuickSortHelper(ulist, start, stop):\n if start < stop:\n i = __Partition(ulist, start, stop)\n __QuickSortHelper(ulist, start, i -1)\n __QuickSortHelper(ulist, i+1, stop)",
"def quick_sort(collection):\n\n def partition(lst, left, right):\n pivot = lst[left]\n i = left\n j = right\n while j > i:\n while lst[j] >= pivot and j > i:\n j -= 1\n while lst[i] <= pivot and j > i:\n i += 1\n if j > i:\n lst[i], lst[j] = lst[j], lst[i]\n lst[left], lst[i] = lst[i], lst[left]\n return i\n\n def sort(lst, left, right):\n if left >= right:\n return\n p = partition(lst, left, right)\n sort(lst, left, p - 1)\n sort(lst, p + 1, right)\n\n sort(collection, 0, len(collection) - 1)\n return collection",
"def quick_sort(my_list):\n if len(my_list) == 0:\n return []\n else:\n pivot = my_list[0]\n left = [element for element in my_list if element[1] < pivot[1]]\n pivots = [element for element in my_list if element[1] == pivot[1]]\n right = [element for element in my_list if element[1] > pivot[1]]\n return quick_sort(left) + pivots + quick_sort(right)",
"def QuickSort(ulist):\n __QuickSortHelper(ulist, 0, len(ulist)-1)",
"def helper_smartsort2(grouped, key):\n df = grouped.get_group(key)\n return df.loc[df.index[0], :]",
"def quipSort(lst):\n i = 0\n first = lst[0]\n last = lst[len(lst)-1]\n middle = lst[len(lst)//2]\n if first < last:\n if first < middle:\n pivot = first\n else:\n pivot = middle\n elif last < middle:\n pivot = last\n else:\n pivot = middle\n less, same, more = list(), list(), list()\n N = len(lst)\n limit = int(math.log(N,2))\n quipSortRec(lst, limit, less, same, more, pivot, i)",
"def quicksort_slice (s, cmp, pivotalea=False):\n right=s['right']\n left=s['left']\n if right>left:\n s1,s2 = partition(s,cmp, pivotalea=pivotalea)\n s2['left']+=1\n quicksort_slice(s1,cmp, pivotalea=pivotalea)\n quicksort_slice(s2,cmp, pivotalea=pivotalea)",
"def quicksort_recursive_algo(numbers, left, right):\n i, j = partition(numbers, left, right)\n if left < j:\n quicksort_recursive_algo(numbers, left, j)\n if i < right:\n quicksort_recursive_algo(numbers, i, right)",
"def shell_sort(a_list):\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n a_list = insertion_sort(\n a_list,\n start=start_position,\n gap=sublist_count\n )\n sublist_count = sublist_count // 2\n return a_list"
] | [
"0.64460015",
"0.6355268",
"0.63343626",
"0.61252177",
"0.61231464",
"0.60208863",
"0.59688854",
"0.59314775",
"0.59294957",
"0.5915863",
"0.58827007",
"0.58791566",
"0.5866802",
"0.58583575",
"0.58298904",
"0.58237565",
"0.5819016",
"0.57973635",
"0.579664",
"0.57711905",
"0.57646483",
"0.57514507",
"0.5726618",
"0.5654585",
"0.56275547",
"0.5584358",
"0.5574841",
"0.55734926",
"0.5546084",
"0.55418205"
] | 0.70455134 | 0 |
Quicksorts subsets of the list grouped by a stable key. Inplace, nonrecursive. This function maintains the order of blocks of tuples having the same stable_key. Within that block, items are resorted by sort_key using quicksort(). Since quicksort() is ascending, specifying reverse = True will reverse the order within those blocks after quicksorting. | def sub_quicksort(self, stable_key:int, sort_key:int, start:int=0, end:bool=None, reverse:bool=False):
if end == None:
end = len(self) - 1
if start >= end:
return
first = start
for index in range(start + 1, end + 1):
if not self[index][stable_key] == self[first][stable_key]:
self.quicksort(sort_key, first, index - 1)
if reverse:
self.reverse(first, index - 1)
first = index
if not first == end:
self.quicksort(sort_key, first, end)
if reverse:
self.reverse(first, end) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quicksort(self, key:int, start:int=0, end:int=None):\n if end == None:\n end = len(self) - 1\n if start >= end:\n return\n if start == end - 1:\n if self[start][key] > self[end][key]:\n self.swap(start, end)\n return\n work = [(start, end)]\n while work:\n first, last = work.pop()\n pivot = (first + last) // 2\n self.swap(pivot, last)\n pivot_to = first\n for index in range(first, last + 1):\n if self[index][key] < self[last][key]:\n self.swap(index, pivot_to)\n pivot_to += 1\n self.swap(pivot_to, last)\n if pivot_to > (first + 1):\n work.append((first, pivot_to - 1))\n if last > (pivot_to + 1):\n work.append((pivot_to + 1, last))",
"def quick_sort(partition_list, low, high):\n if low >= high:\n return\n part_point = get_partition(partition_list, low, high)\n quick_sort(partition_list, low, part_point - 1)\n quick_sort(partition_list, part_point + 1, high)",
"def helper_smartsort(grouped, key):\n df = grouped.get_group(key)\n head = df.loc[df.index[0], :]\n tail = df.loc[df.index[1:], :]\n return head, tail",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def quicksort_slice (s, cmp, pivotalea=False):\n right=s['right']\n left=s['left']\n if right>left:\n s1,s2 = partition(s,cmp, pivotalea=pivotalea)\n s2['left']+=1\n quicksort_slice(s1,cmp, pivotalea=pivotalea)\n quicksort_slice(s2,cmp, pivotalea=pivotalea)",
"def quick_sort_helper(a_list, first_position, last_position):\n if first_position < last_position:\n\n split_point = partition(a_list, first_position, last_position)\n\n quick_sort_helper(a_list, first_position, split_point)\n quick_sort_helper(a_list, split_point + 1, last_position)\n\n return a_list",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n\n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n\n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def _quick_sort(l, start, end):\n if start < end:\n split_point = partition(l, start, end)\n\n _quick_sort(l, start, split_point - 1)\n _quick_sort(l, split_point + 1, end)\n\n return l",
"def quick_sort(mylist):\n _inplace_quick_sort(mylist, 0, len(mylist)-1)",
"def helper_smartsort2(grouped, key):\n df = grouped.get_group(key)\n return df.loc[df.index[0], :]",
"def quicksort(list, low, high):\n if(low < high):\n partitionindex = partition(list, low, high)\n\n quicksort(list, low, partitionindex-1)\n quicksort(list, partitionindex + 1, high)",
"def qsort3(items):\n\n # create an initial unit of work. Each unit work is marked by a starting index and its sub-partition so that\n # we know when we add up the starting index to compute the final location of the pivot in the final sorted list.\n work = (0, items)\n\n # we will be using works to track remaining partitions needed to be quick-sorted\n works = [work]\n\n # create a result list to store all of our pivots sorted thru qsort in our final sorted list\n result = [-1] * len(items)\n\n # keep working on partition till no remaining partitions left.\n while len(works) > 0:\n\n # remove a unit of partition to work on in this iteration\n startIndex, part = works.pop()\n\n\n # standard quick-sorting start here...\n\n pivot = part[0]\n lessPart = []\n morePart = []\n for x in part:\n if x < pivot:\n lessPart.append(x)\n elif x > pivot:\n morePart.append(x)\n else:\n # ignoring pivot\n pass\n\n if len(lessPart) > 0:\n # create a unit of work for the lesser partition.\n works.append((startIndex, lessPart))\n\n if len(morePart) > 0:\n # create a unit of work for the greater partition.\n works.append((startIndex + len(lessPart) + 1, morePart))\n\n # A pivot's location is final in standard quick-sort algorithm. Hence we put it back to the result.\n result[ startIndex + len(lessPart) ] = pivot\n\n return result",
"def quick_sort(items, low=None, high=None):\r\n # TODO: Check if high and low range bounds have default values (not given)\r\n # TODO: Check if list or range is so small it's already sorted (base case)\r\n # TODO: Partition items in-place around a pivot and get index of pivot\r\n # TODO: Sort each sublist range by recursively calling quick sort\r",
"def quick_sort(a_list):\n return quick_sort_helper(a_list, 0, len(a_list) - 1)",
"def quick_sort(lst, first, last):\r\n if first < last:\r\n split_marker = split_list(lst, first, last)\r\n\r\n quick_sort(lst, split_marker + 1, last)\r\n quick_sort(lst, first, split_marker - 1)",
"def quick_sort(items, low = 0, high = -1):\n if high == -1:\n high = len(items) - 1 \n\n if low < high:\n pivot = partition(items, low, high)\n # Left side of pivot\n quick_sort(items, low, pivot - 1)\n # Right side of pivot\n quick_sort(items, pivot + 1, high)",
"def quick_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(sorted_list) > 1:\r\n\t\tpivot = sorted_list[0]\r\n\t\tfor item in sorted_list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quick_sort(less) + equal + quick_sort(greater)\r\n\telse:\r\n\t\treturn sorted_list",
"def QuickSortStable(array):\n if len(array) <=1:\n return array\n pivot = array[0]\n greater = list()\n lesser = list()\n for eachItem in array[1:]:\n if eachItem > pivot:\n greater.append(eachItem)\n else:\n lesser.append(eachItem)\n sortedLesser = QuickSortStable(lesser)\n sortedGreater = QuickSortStable(greater)\n sortedLesser.append(pivot)\n sortedLesser.extend(sortedGreater)\n return sortedLesser",
"def quicksort(sortable):\n\n __quicksort(sortable, 0, len(sortable) - 1)\n return sortable",
"def quicksort (t,cmp, pivotalea=False):\n quicksort_slice({'data':t, 'left':0, 'right':len(t)-1}, cmp, pivotalea=pivotalea)",
"def quick_sort(data, head, tail, draw_data, time_tick):\n if head < tail:\n partition_index = partition(data, head, tail, draw_data, time_tick)\n\n # Left partition\n quick_sort(data, head, partition_index-1, draw_data, time_tick)\n\n # Right partition\n quick_sort(data, partition_index+1, tail, draw_data, time_tick)",
"def qsort(my_list):\n\n comparisions = quickSortHelper(my_list,0,len(my_list)-1)\n return (my_list, comparisions)",
"def _quicksort(self, l, r):\n\t\tif l < r:\n\t\t\tsplitpoint = self._partition(l, r)\n\t\t\tself._quicksort(l, splitpoint-1)\n\t\t\tself._quicksort(splitpoint+1, r)",
"def quick_sort(data):\n def partition(data,start,end):\n \"\"\" Creating the partition and returning the partition key to sort further\"\"\"\n i=start-1\n for j in range(start,end):\n if data[j]<=data[end]:\n i+=1\n data[i],data[j]=data[j],data[i]\n data[i+1],data[end]=data[end],data[i+1]\n return i+1\n \n def sort(data,start,end):\n \"\"\"\n Sorting the data provided \n \"\"\"\n if start < end:\n partition_index=partition(data,start,end)\n sort(data,start,partition_index-1)\n sort(data,partition_index+1,end)\n sort(data,0,len(data)-1)\n #print(data)\n return data",
"def quick_sort_v2(collection):\n\n def partition(lst, left, right):\n pivot = lst[left]\n i = left\n j = right\n while j > i:\n while lst[j] >= pivot and j > i:\n j -= 1\n lst[i] = lst[j]\n while lst[i] <= pivot and j > i:\n i += 1\n lst[j] = lst[i]\n lst[i] = pivot\n return i\n\n def sort(lst, left, right):\n if left >= right:\n return\n p = partition(lst, left, right)\n sort(lst, left, p - 1)\n sort(lst, p + 1, right)\n\n sort(collection, 0, len(collection) - 1)\n return collection",
"def __QuickSortHelper(ulist, start, stop):\n if start < stop:\n i = __Partition(ulist, start, stop)\n __QuickSortHelper(ulist, start, i -1)\n __QuickSortHelper(ulist, i+1, stop)",
"def quick_sort(my_list):\n if len(my_list) == 0:\n return []\n else:\n pivot = my_list[0]\n left = [element for element in my_list if element[1] < pivot[1]]\n pivots = [element for element in my_list if element[1] == pivot[1]]\n right = [element for element in my_list if element[1] > pivot[1]]\n return quick_sort(left) + pivots + quick_sort(right)",
"def quick_sort(l):\n return _quick_sort(l, 0, len(l) - 1)",
"def QuickSort(ulist):\n __QuickSortHelper(ulist, 0, len(ulist)-1)",
"def selection(items, k):\n lo = 0\n hi = len(items) - 1\n while hi > lo:\n pivot = quick_sort._partition(items, lo, hi)\n if pivot < k:\n lo = pivot + 1\n elif pivot > k:\n hi = pivot - 1\n else:\n return items[k]\n\n return items[k]"
] | [
"0.6481071",
"0.6124734",
"0.61035883",
"0.5960609",
"0.5938006",
"0.59239113",
"0.5895712",
"0.58804065",
"0.58605987",
"0.5831882",
"0.578833",
"0.5766656",
"0.5744841",
"0.5650269",
"0.5648659",
"0.56472456",
"0.56437397",
"0.56407726",
"0.56317866",
"0.56163543",
"0.5608906",
"0.55794036",
"0.55574673",
"0.55511236",
"0.5515631",
"0.5495882",
"0.54958785",
"0.5487476",
"0.54759854",
"0.5472677"
] | 0.7287639 | 0 |
A nonrecursive, inplace version of quicksort. Note that Python has notgreat tailrecursion properties, so a recursive approach is not generally recommended. This is inplace to save on memory. Otherwise, it is a straightforward ascending quicksort of all the items between start and end indexes comparing the values in the "key" slot of the tuples in the list. There is a quick fall back if only two or fewer items need to be considered. | def quicksort(self, key:int, start:int=0, end:int=None):
if end == None:
end = len(self) - 1
if start >= end:
return
if start == end - 1:
if self[start][key] > self[end][key]:
self.swap(start, end)
return
work = [(start, end)]
while work:
first, last = work.pop()
pivot = (first + last) // 2
self.swap(pivot, last)
pivot_to = first
for index in range(first, last + 1):
if self[index][key] < self[last][key]:
self.swap(index, pivot_to)
pivot_to += 1
self.swap(pivot_to, last)
if pivot_to > (first + 1):
work.append((first, pivot_to - 1))
if last > (pivot_to + 1):
work.append((pivot_to + 1, last)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _quick_sort(l, start, end):\n if start < end:\n split_point = partition(l, start, end)\n\n _quick_sort(l, start, split_point - 1)\n _quick_sort(l, split_point + 1, end)\n\n return l",
"def quick_sort(items, low=None, high=None):\r\n # TODO: Check if high and low range bounds have default values (not given)\r\n # TODO: Check if list or range is so small it's already sorted (base case)\r\n # TODO: Partition items in-place around a pivot and get index of pivot\r\n # TODO: Sort each sublist range by recursively calling quick sort\r",
"def quick_sort(a, start=0, end=None):\n if end is None:\n end = len(a) - 1\n if (start < end):\n pIndex = _partition(a, start, end)\n quick_sort(a, start, pIndex - 1)\n quick_sort(a, pIndex + 1, end)\n return a",
"def quicksort(list, low, high):\n if(low < high):\n partitionindex = partition(list, low, high)\n\n quicksort(list, low, partitionindex-1)\n quicksort(list, partitionindex + 1, high)",
"def quick_sort(partition_list, low, high):\n if low >= high:\n return\n part_point = get_partition(partition_list, low, high)\n quick_sort(partition_list, low, part_point - 1)\n quick_sort(partition_list, part_point + 1, high)",
"def quick_sort(lst, first, last):\r\n if first < last:\r\n split_marker = split_list(lst, first, last)\r\n\r\n quick_sort(lst, split_marker + 1, last)\r\n quick_sort(lst, first, split_marker - 1)",
"def __QuickSortHelper(ulist, start, stop):\n if start < stop:\n i = __Partition(ulist, start, stop)\n __QuickSortHelper(ulist, start, i -1)\n __QuickSortHelper(ulist, i+1, stop)",
"def quick_sort(items, low = 0, high = -1):\n if high == -1:\n high = len(items) - 1 \n\n if low < high:\n pivot = partition(items, low, high)\n # Left side of pivot\n quick_sort(items, low, pivot - 1)\n # Right side of pivot\n quick_sort(items, pivot + 1, high)",
"def _quick_sort(self, start, end):\n if start >= end: # Length of 1 or less\n return\n\n pivot = self._arr[end] # Select pivot as last value in array\n left = start\n right = end - 1 # Begin one before the pivot\n\n while left <= right: # Continue until all values are ordered\n while left <= right and self._arr[left] < pivot: # Find first value greater than pivot\n left += 1\n while left <= right and pivot < self._arr[right]: # Find first value less than pivot\n right -= 1\n\n if left <= right: # If unordered, then swap two found values\n self._arr[left], self._arr[right] = self._arr[right], self._arr[left]\n left, right = left + 1, right - 1 # Increment for next iteration\n\n self._arr[left], self._arr[end] = self._arr[end], self._arr[left] # Move pivot to middle\n self._quick_sort(start, left - 1) # Sort left portion\n self._quick_sort(left + 1, end) # Sort right portion",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def quicksort(self, Items, Start, End):\n if Start < End:\n p = self.partition(Items, Start, End)\n self.quicksort(Items, Start, p - 1)\n self.quicksort(Items, p + 1, End)",
"def quick_sort(items, low=None, high=None):\n # TODO: Check if high and low range bounds have default values (not given)\n if low == None and high == None:\n low = 0\n high = len(items) - 1\n\n # TODO: Check if list or range is so small it's already sorted (base case)\n if low < high:\n # TODO: Partition items in-place around a pivot and get index of pivot\n pivot = partition(items, low, high)\n # TODO: Sort each sublist range by recursively calling quick sort\n quick_sort(items, low, pivot - 1)\n quick_sort(items, pivot + 1, high)",
"def quick_sort(mylist):\n _inplace_quick_sort(mylist, 0, len(mylist)-1)",
"def sub_quicksort(self, stable_key:int, sort_key:int, start:int=0, end:bool=None, reverse:bool=False):\n if end == None: \n end = len(self) - 1\n if start >= end:\n return\n first = start\n for index in range(start + 1, end + 1):\n if not self[index][stable_key] == self[first][stable_key]:\n self.quicksort(sort_key, first, index - 1)\n if reverse:\n self.reverse(first, index - 1)\n first = index\n if not first == end:\n self.quicksort(sort_key, first, end)\n if reverse:\n self.reverse(first, end)",
"def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n\n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n\n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items",
"def quicksort(lst):\n n = len(lst)\n qsort(lst, 0, n - 1)",
"def quicksort(a,start,end):\n\n # base case\n if end - start <= 1:\n return a\n\n #Pivot is the last element\n pivot = end - 1\n\n i = start - 1\n\n for j in range(start,pivot):\n if a[j] <= a[pivot]:\n i += 1\n a[i], a[j] = a[j], a[i]\n a[i+1], a[pivot] = a[pivot], a[i+1]\n\n quicksort(a,start,i+1)\n quicksort(a,i+2,end) \n \n return a",
"def quick_sort(list):\n\n if list == []:\n return []\n else:\n pivot = list[0]\n lesser = quick_sort([x for x in list[1:] if x < pivot])\n greater = quick_sort([x for x in list[1:] if x > pivot])\n return lesser + [pivot] + greater",
"def quick_sort(a_list):\n return quick_sort_helper(a_list, 0, len(a_list) - 1)",
"def _inplace_quick_sort(mylist, a, b):\n if a >= b:\n return # range is trivially sorted\n pivot_index = _choose_pivot(mylist, a, b) # select pivot index\n pivot = mylist[pivot_index] # get pivot value\n _swap(mylist, pivot_index, b) # move pivot to edge\n left = a # scans rightward\n right = b-1 # scans leftward\n while left <= right:\n # scan until reaching value equal or larger than pivot (or right marker)\n while left <= right and mylist[left] < pivot:\n left += 1\n # scan until reaching value equal or smaller than pivot (or left marker)\n while left <= right and pivot < mylist[right]:\n right -= 1\n if left <= right: # scans did not cross\n _swap(mylist, left, right) # swap\n left += 1\n right -= 1 # shrink the range we're looking at\n\n # put pivot into its final place (currently marked by left index)\n _swap(mylist, left, b)\n # recurse\n _inplace_quick_sort(mylist, a, left-1)\n _inplace_quick_sort(mylist, left+1, b)",
"def quick_sort(l):\n return _quick_sort(l, 0, len(l) - 1)",
"def quick_sort_v2(collection):\n\n def partition(lst, left, right):\n pivot = lst[left]\n i = left\n j = right\n while j > i:\n while lst[j] >= pivot and j > i:\n j -= 1\n lst[i] = lst[j]\n while lst[i] <= pivot and j > i:\n i += 1\n lst[j] = lst[i]\n lst[i] = pivot\n return i\n\n def sort(lst, left, right):\n if left >= right:\n return\n p = partition(lst, left, right)\n sort(lst, left, p - 1)\n sort(lst, p + 1, right)\n\n sort(collection, 0, len(collection) - 1)\n return collection",
"def quicksort(L, left, right):\n if left >= right:\n return\n pivot = partition(L, left, right)\n quicksort(L, left, pivot - 1)\n quicksort(L, pivot + 1, right)",
"def quick_sort_helper(a_list, first_position, last_position):\n if first_position < last_position:\n\n split_point = partition(a_list, first_position, last_position)\n\n quick_sort_helper(a_list, first_position, split_point)\n quick_sort_helper(a_list, split_point + 1, last_position)\n\n return a_list",
"def quick_sort(my_list):\n if len(my_list) == 0:\n return []\n else:\n pivot = my_list[0]\n left = [element for element in my_list if element[1] < pivot[1]]\n pivots = [element for element in my_list if element[1] == pivot[1]]\n right = [element for element in my_list if element[1] > pivot[1]]\n return quick_sort(left) + pivots + quick_sort(right)",
"def qsort2(list):\n if list == []: \n return []\n else:\n pivot = list[0]\n lesser, equal, greater = partition(list[1:], [], [pivot], [])\n return qsort2(lesser) + equal + qsort2(greater)",
"def qsort(my_list):\n\n comparisions = quickSortHelper(my_list,0,len(my_list)-1)\n return (my_list, comparisions)",
"def iter_tuple_quick_sort(list, max, min):\n #push min and max indices onto a stack\n stack = LinkedList()\n stack.push(min)\n stack.push(max)\n\n #keep pushing min and max indices onto stack to\n #iteratively sort those lists\n while not stack.is_empty():\n #pop the next min and max indices to partition\n max = stack.pop()\n min = stack.pop()\n\n #partition the list and find the next pivot\n pivot_index = tuple_partition(list, max, min)\n\n #if there's more than one element in the list\n #check if LHS needs to be partitioned\n if pivot_index - 1 >= min:\n stack.push(min)\n stack.push(pivot_index - 1)\n #check if RHS needs to be partitioned\n if pivot_index + 1 <= max:\n stack.push(pivot_index + 1)\n stack.push(max)",
"def quick_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(sorted_list) > 1:\r\n\t\tpivot = sorted_list[0]\r\n\t\tfor item in sorted_list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quick_sort(less) + equal + quick_sort(greater)\r\n\telse:\r\n\t\treturn sorted_list",
"def quicksort(lst):\n if not lst:\n return []\n else:\n pivot = lst[0]\n lesser = quicksort([x for x in lst[1:] if x < pivot])\n greater = quicksort([x for x in lst[1:] if x >= pivot])\n return lesser + [pivot] + greater"
] | [
"0.8193505",
"0.77273595",
"0.76876134",
"0.76642364",
"0.76446205",
"0.76436085",
"0.7545366",
"0.7532147",
"0.748278",
"0.7454062",
"0.74537796",
"0.7445257",
"0.7441872",
"0.73965734",
"0.73959565",
"0.7365618",
"0.7348234",
"0.7309217",
"0.72909856",
"0.72894216",
"0.72791255",
"0.72694564",
"0.72363085",
"0.72300065",
"0.719532",
"0.7189933",
"0.71641564",
"0.716237",
"0.71540385",
"0.71021897"
] | 0.83892685 | 0 |
Determines if the slope of a line is positive. | def positive_slope(line:tuple)->bool:
return line[0][1] < line[1][1] == line[0][0] < line[1][0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_slope(self):\n\t\tif self.high_elevation != self.low_elevation:\n\t\t\treturn True\n\t\treturn False",
"def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b",
"def filter_slope(self,slope):\n if self.slope_interval[0] <= abs(slope) <= self.slope_interval[1]:\n return True\n return False",
"def is_vertical(self):\n return self.slope == float(\"+inf\")",
"def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)",
"def perpendicular(self, L):\n if self.slope() is None: # if the line is vertical, L must be horizontal\n return L.slope() == 0\n elif self.slope() == 0: # if the line is horizontal, L must be vertical\n return L.slope() is None\n else:\n return self.slope() * L.slope() == -1",
"def isunder(x, y, slope, yint):\r\n line_ypt = x * slope + yint\r\n return y < line_ypt",
"def is_left(self, line):\n return line.angle() < 0",
"def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])",
"def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx",
"def _slope(A, B):\n if (B[0] - A[0]) == 0:\n return \"vertical\"\n slope = (B[1] - A[1]) / (B[0] - A[0])\n if slope == 0:\n return \"horizontal\"\n elif slope > 0:\n return \"inclined\"\n else:\n return \"declined\"",
"def test_point_positive_on_one_line(self):\n a = Point(1, 0)\n b = Point(34, 0)\n c = Point(42, 0)\n\n self.assertTrue(Point.on_one_line(a, b, c),\n \"Test of Point.on_one_line(a, b, c) failed, returned value != True.\")\n d = Point(1, 2)\n e = Point(34, 43)\n f = Point(42, 54)\n\n self.assertFalse(Point.on_one_line(d, e, f),\n \"Test of Point.on_one_line(d, e, f) failed, returned value != False.\")\n\n self.assertTrue(Point.on_one_line(a), \"Test of Point.on_one_line(a) failed, returned value != True.\")",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def isnegative(x):\n if x < 0 :\n return True\n return False",
"def checkStraightLine(coordinates: List[List[int]]) -> bool:\n\t# initializing our comparison slope value\n\tnum = coordinates[1][1] - coordinates[0][1]\n\tden = coordinates[1][0] - coordinates[0][0]\n\tif den == 0:\n\t\tslope = math.inf\n\telse:\n\t\tslope = num / den\n\n\t# checking the initial slope against all other slopes\n\tslope_check = 0\n\tfor i in range(2, len(coordinates)):\n\t\tnum = coordinates[i][1] - coordinates[i-1][1]\n\t\tden = coordinates[i][0] - coordinates[i-1][0]\n\t\tif den == 0:\n\t\t\tslope_check = math.inf\n\t\telse:\n\t\t\tslope_check = num/den\n\n\t\tif slope_check != slope:\n\t\t\treturn False\n\n\treturn True",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def slope(self, x1, y1, x2, y2):\n if x1 == x2:\n slope = np.inf\n else:\n slope = (y2-y1)/(x2-x1)\n\n return np.math.atan(slope)",
"def is_negative(self, a):\n return a < 0",
"def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)",
"def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)",
"def positive_only(self):\n return True",
"def positive_only(self):\n return True",
"def positive_only(self):\n return True",
"def is_positive(self, a):\n return a > 0",
"def testSlopeSetNegative(self):\n def setSlope():\n self.node.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.node.slope\n )",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def slope_from_origin(self):\n\n return self.y / self.x",
"def testSlopeSetNegative(self):\n def setSlope():\n self.cc.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.cc.slope\n )",
"def slope_from_origin(self):\n\n return (self.y / self.x)"
] | [
"0.736785",
"0.67845374",
"0.66309035",
"0.6547908",
"0.64874506",
"0.6483003",
"0.6478444",
"0.63426495",
"0.63259435",
"0.6320206",
"0.62273353",
"0.6205177",
"0.6158633",
"0.613933",
"0.61122525",
"0.6104726",
"0.6096476",
"0.6087337",
"0.6063536",
"0.6063536",
"0.59833056",
"0.59833056",
"0.59833056",
"0.59828097",
"0.5954577",
"0.5936968",
"0.5930071",
"0.5915811",
"0.5912727",
"0.5908369"
] | 0.83179325 | 0 |
Determines if a line moves up from left to right. | def is_upwards(line:tuple)->bool:
return line[1][1] > line[0][1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _move_up(self) -> bool:\n current_agent_node = self._maze.get_player_node()\n\n if current_agent_node.y == 0:\n # Can't go up. Already on the top row\n return False\n else:\n next_node = self._maze.get_node_up(current_agent_node)\n return self._handle_movement(current_agent_node, next_node)",
"def moveUp(self):\n if self._position.y != 0:\n self._position.y -=1\n return True\n return False",
"def is_left(self, line):\n return line.angle() < 0",
"def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n return True\n else: \n return False",
"def _move_down(self) -> bool:\n current_agent_node = self._maze.get_player_node()\n\n if current_agent_node.y == self._settings.nrows - 1:\n # Can't go down. Already on the bottom row.\n return False\n else:\n next_node = self._maze.get_node_down(current_agent_node)\n return self._handle_movement(current_agent_node, next_node)",
"def move_up(self):\n if self.pointer != 0:\n logging.debug(\"moved up\")\n self.pointer -= 1\n self.refresh()\n self.reset_scrolling()\n return True\n else:\n return False",
"def move_up(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_MOVE_UP",
"def up(self):\n if self.head.heading() != DOWN and self.last_direction != DOWN:\n self.head.setheading(UP)",
"def leftUp(self):",
"def lines_up(x1, y1, x2, y2):\n if x1 == x2 and y1 == y2:\n return 4\n if x1 <= x2 and y1 >= y2:\n return 3\n if x1 >= x2 and y1 <= y2:\n return 2\n if (x2 <= x1 and x1 <= y2) or (x2 <= y1 and y1 <= y2):\n return 1\n if y1 < x2 or y2 < x1:\n return 0\n return 5",
"def check_up_left(prev_button, cur_button):\r\n\r\n if button_lst[cur_button][0] == button_lst[prev_button][0] - 1 and \\\r\n button_lst[cur_button][1] == button_lst[prev_button][1] - 1:\r\n return True\r\n return False",
"def isUp ( self ) :\n return not self.isDown()",
"def _up_left(self, col, row):\n ones = 0\n twos = 0\n for step in range(4):\n\n current = self.layout[col + (step*-1)][row + (step)] #step up and left\n if current == 1: ones+=1\n if current == 2: twos+=1\n\n return self._score_a_quartet(ones, twos)",
"def is_horizontal(line:tuple)->bool:\n return line[0][1] == line[1][1]",
"def one_step_left(self):\n if (self.column-1 <0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row][self.column-1] == False):\n return False\n else:\n self.column-=1\n self.battery -= 1\n return True",
"def moveDown(self):\n if self._position.y != 14:\n self._position.y +=1\n return True\n return False",
"def can_move_up(self, index):\n # If the index of the '0' tile is in the top-row then we cannot do the action\n if index in range(0, self.puzzle_width):\n return False\n return True",
"def move_up():\n return __maze.move_up()",
"def continues_to_left(self):\n if self.col_num == 0:\n return False\n return (self.master_grid.matrix[self.row_num][self.col_num-1] \n == self.character )",
"def move_up(self) -> None:\n try:\n line_start: int = self.buffer.reverse_index('\\n', end=self.index) + 1\n except ValueError:\n return\n\n previous_line_start: int\n try:\n previous_line_start = self.buffer.reverse_index('\\n', end=line_start - 1) + 1\n except ValueError:\n previous_line_start = 0\n\n previous_line_length = line_start - previous_line_start\n column: int = self.index - line_start\n if previous_line_length <= column:\n previous_line_end = line_start - 1\n self.index = previous_line_end\n else:\n self.index = previous_line_start + column",
"def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r",
"def is_up(self):\n \n return self.is_level('up')",
"def shift_board_up(self) -> bool:\n for c in range(self.board_size):\n if self.board[0][c] is not None:\n return False\n\n for r in range(0, self.board_size - 1):\n for c in range(self.board_size):\n self.board[r][c] = self.board[r + 1][c]\n\n for c in range(self.board_size):\n self.board[self.board_size - 1][c] = None\n\n return True",
"def check_up_right(prev_button, cur_button):\r\n\r\n if button_lst[cur_button][0] == button_lst[prev_button][0] - 1 and \\\r\n button_lst[cur_button][1] == button_lst[prev_button][1] + 1:\r\n return True\r\n return False",
"def rightUp(self):",
"def is_moving(self):\n return self.steps < self.max_steps",
"def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False",
"def up_down(self, up):\n if up == 'u':\n up = 1\n elif up == 'n':\n up = 0\n elif up == 'd':\n up = -1\n else:\n raise ValueError(\"The heck you doing Servo?? u d or n ONLY\")\n self.h += up\n if self.get_pos() == blocks['wall']:\n self.h -= up",
"def _hasChangedDirection(motionPts: list) -> tuple:\n dispPts = Ball._getDisplacements(motionPts)\n xDir = yDir = None\n xChange = yChange = False\n for dispPt in dispPts:\n # Compute differences\n xDirNow = RIGHT if dispPt[0] > 0 else LEFT\n yDirNow = DOWN if dispPt[1] > 0 else UP\n # Look for x changes\n if xDir is None:\n xDir = xDirNow\n elif xDirNow != xDir:\n xChange = True\n # Look for y changes\n if yDir is None:\n yDir = yDirNow\n elif yDirNow != yDir:\n yChange = True\n return xChange, yChange",
"def continues_above(self):\n if self.row_num == 0:\n return False\n return (self.master_grid.matrix[self.row_num-1][self.col_num] \n == self.character)"
] | [
"0.70861965",
"0.6750818",
"0.6675489",
"0.6598131",
"0.65929246",
"0.6382831",
"0.6267976",
"0.6258866",
"0.618461",
"0.6151891",
"0.61254895",
"0.61195827",
"0.60737306",
"0.6071918",
"0.6045149",
"0.60422784",
"0.6026788",
"0.60164386",
"0.60142016",
"0.59950626",
"0.5994049",
"0.59792095",
"0.5969059",
"0.5962517",
"0.5929903",
"0.58903414",
"0.58746165",
"0.58725595",
"0.5863741",
"0.585474"
] | 0.7699524 | 0 |
Determines if a line is horizontal. | def is_horizontal(line:tuple)->bool:
return line[0][1] == line[1][1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_horizontal(self):\n return self.start.x == self.end.x",
"def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)",
"def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)",
"def check_horizontal_rule(line):\n if line.count('-') >= 3 and contains_only_char(line, '-'):\n return True, '<hr></hr>'\n if line.count('*') >= 3 and contains_only_char(line, '*'):\n return True, '<hr></hr>'\n if line.count('_') >= 3 and contains_only_char(line, '_'):\n return True, '<hr></hr>'\n return False, ''",
"def check_horizontal(line):\n\tline = line.rstrip('\\n')\n\tif line.rstrip() != line:\n\t\traise StyleError(\"Line has trailing white-space\")\n\tif '\\t' in line.lstrip('\\t'):\n\t\traise StyleError(\"Tabs may only be used for indentation\")",
"def is_horizontal(coordinates, field):\r\n return has_ship((coordinates[0], coordinates[1] + 1), field) or \\\r\n has_ship((coordinates[0], coordinates[1] - 1), field)",
"def horizontal(self):\n return self._horizontal",
"def isAnyLineAt(self, x, y):\n return (self.isLineAt(x, y, 1, 0) or # Horizontal\n self.isLineAt(x, y, 0, 1) or # Vertical\n self.isLineAt(x, y, 1, 1) or # Diagonal up\n self.isLineAt(x, y, 1, -1)) # Diagonal down",
"def horizontal_line(t,n, h):\n lt(t)\n pu(t)\n fd(t,h)\n pd(t)\n lt(t)\n fd(t,n)\n rt(t)",
"def is_left(self, line):\n return line.angle() < 0",
"def is_line(self):\n return True",
"def is_line(self):\n return True",
"def is_horizontal_win(self, checker):\n for row in range(self.height):\n for col in range(self.width - 3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row][col + 1] == checker and \\\n self.slots[row][col + 2] == checker and \\\n self.slots[row][col + 3] == checker:\n return True\n \n # if we make it here, there were no horizontal wins\n return False",
"def is_horizontal(self, tangent_vec, base_point, atol=gs.atol):\n return gs.all(\n gs.isclose(\n tangent_vec,\n self.horizontal_projection(tangent_vec, base_point),\n atol=atol,\n ),\n axis=(-2, -1),\n )",
"def is_line(self):\n return False",
"def get_horizontal(self, position):\n pass",
"def belongsToLine(self, index, direction, line): \n first_point = 1 \n if direction == 'Horizontal': # Check if index's y coordinate is the same as line's first point\n if self.getCoordinates(index)[1] == self.getCoordinates(line[first_point])[1]:\n return True\n elif direction == 'Vertical': # Check if index's x coordinate is the same as line's first point\n if self.getCoordinates(index)[0] == self.getCoordinates(line[first_point])[0]:\n return True\n else:\n x, y = self.getCoordinates(index)\n\n if direction == 'D-pos' and x == y: # points in positive diagonal have equal x and y coordinates\n return True\n if direction == 'D-neg' and x + y == self.size - 1: # some of coordinates negative diagonal point is n -1\n return True\n return False",
"def horizontal(self, ox, curRow, curCol):\n count = 0\n while count < 4 and curRow < self.height and self.board[curRow][curCol] == ox:\n count += 1\n curRow += 1\n return count == 4",
"def horizontal_line(numbers, p_current, relative = False):\n if len(numbers) != 1:\n return None\n\n if relative:\n p_next = Point(numbers[0] + p_current.x, p_current.y)\n else:\n p_next = Point(numbers[0], p_current.y)\n\n return Line(p_current, p_next)",
"def is_line(self): \n return False",
"def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])",
"def draw_horizontal_winning_line(self, row, player):\n posY = row * GameData.square_size + GameData.square_size // 2\n\n if player == 1:\n color = GameData.circle_color\n elif player == 2:\n color = GameData.cross_color\n\n pygame.draw.line(\n self.game_screen,\n color, (15, posY),\n (GameData.screen_dim - 15, posY),\n GameData.win_line_width\n )",
"def hline(self, x, y, width, color):\n self.rect(x, y, width, 1, color, fill=True)",
"def is_diagonal(self):\n return self.is_upper() and self.is_lower()",
"def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:\n\n return self.get_line_from_axis_to_point(1, point, **kwargs)",
"def is_point_on_same_line(self, x, y = None):\n x, y = y is not None and (x, y) or (x[0], x[1])\n\n if self.is_vertical():\n return Point(x, 0) == Point(self.x_value, 0)\n else:\n return Point(0, y) == Point(0, (self.slope * x + self.y_intercept))",
"def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0",
"def is_horizontal_win(self, checker):\r\n for row in range(self.height):\r\n for col in range(self.width - self.win_condition + 1):\r\n # Analyze every horizontal group of win_condition checkers \r\n # (eg. every horizontal group of 3 checkers if the winning \r\n # condition is 3 checkers in a row).\r\n # and calculate how many checkers that are in a row\r\n checker_group = self.grid[row, col : col + self.win_condition]\r\n num_checkers = sum(checker_group == checker)\r\n\r\n if num_checkers == self.win_condition:\r\n return True\r\n\r\n # if we get here, there's no horizontal win\r\n return False",
"def is_vertical(self):\n return self.slope == float(\"+inf\")",
"def IsHorizontal(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]"
] | [
"0.7609686",
"0.72768867",
"0.72768867",
"0.6881701",
"0.68597597",
"0.6499938",
"0.6489282",
"0.64614856",
"0.63482904",
"0.63324815",
"0.6304397",
"0.6304397",
"0.61203885",
"0.61043155",
"0.6032632",
"0.6023981",
"0.6008056",
"0.59356326",
"0.5913236",
"0.59085",
"0.5905486",
"0.58542174",
"0.58424735",
"0.5822547",
"0.57851183",
"0.57650936",
"0.57321006",
"0.56378627",
"0.5589858",
"0.55643255"
] | 0.80236757 | 0 |
Determines the length and the cosine of the angle from a positive horizontal ray of a line segment. | def line_length_angle(line:tuple)->tuple:
squared_dist = point_sqr_distance(line[0], line[1])
if squared_dist == 0:
return 0,1
distance = math.sqrt(squared_dist)
angle_cosine = (line[1][0] - line[0][0]) / distance
return squared_dist, angle_cosine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))",
"def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))",
"def horizontal_angle(cX):\n\n return atan(((FRAME_CENTER[0] + .5) - cX) / FOCAL_LENGTH)",
"def hangle(dx, dy):\r\n\r\n l = m.sqrt(dx * dx + dy * dy)\r\n if dy > 0:\r\n return m.acos(dx / l)\r\n else:\r\n return 2 * m.pi - m.acos(dx / l)",
"def _acute_angle(point, line_point1, line_point2):\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False",
"def two_d_horizontal_angle(lower_point, upper_point):\n\n \"\"\"finds angle from the horizontal. It is good for scenarios such as jacking coeff and anti squat\"\"\"\n vect = np.subtract(upper_point, lower_point)\n np.ndarray.tolist(vect)\n # project to front view by deleting x term\n # vertical vect\n horiz_vect = [1, 0]\n\n # using this relation http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n angle = np.arccos(np.divide(np.dot(vect, horiz_vect), (magnitude(lower_point, upper_point)))) * 180 / math.pi\n return angle",
"def find_angle(im_binary: np.ndarray) -> float:\n angles = np.linspace(-np.pi/2, np.pi/2, 360)\n h, theta, d = skimage.transform.hough_line(im_binary, theta=angles)\n _, angles, distances = skimage.transform.hough_line_peaks(\n h, theta, d, num_peaks=1)\n return angles, distances",
"def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)",
"def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0",
"def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d",
"def lineShape(self,peakAngle,width,angle):\n x = abs(peakAngle - angle)/width\n if x == 0.0:\n return 1.0\n else:\n return (2*j1(x)/x)**2",
"def calculate_line_length(x1, y1, x2, y2):\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return distance",
"def acos (cls, x) :\n return Angle_R (math.acos (x))",
"def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm",
"def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest",
"def _calculate_angle(x0, y0, x1, y1):\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle",
"def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]",
"def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)",
"def findHAngle(dx, dy):\n l = m.sqrt(dx*dx + dy*dy)\n if dy > 0:\n return m.acos(dx/l)\n else:\n return 2*m.pi - m.acos(dx/l)",
"def arccos(x):\n raise NotImplementedError",
"def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12):\n\n ndotu = planeNormal.dot(rayDirection)\n if abs(ndotu) < epsilon:\n raise RuntimeError(\"no intersection or line is within plane\")\n\n w = rayPoint - planePoint\n si = -planeNormal.dot(w) / ndotu\n Psi = w + si * rayDirection + planePoint\n return Psi",
"def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None",
"def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end",
"def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)",
"def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)",
"def hough_line_own(image):\n\n # All possible angles and rhos\n thetas = np.deg2rad(np.arange(-90.0, 90.0))\n w, h = image.shape\n max_dist = int(np.ceil(np.sqrt(w**2 + h**2)))\n rhos = np.linspace(-max_dist, max_dist, max_dist*2)\n print(rhos.shape, thetas.shape)\n\n # Save shit for later\n theta_cos = np.cos(thetas)\n theta_sin = np.sin(thetas)\n num_thetas = len(thetas)\n\n result = np.zeros((2 * max_dist, num_thetas), dtype=np.uint64)\n\n # Only get the non zero indexes of the image\n yidx, xidx = np.nonzero(image)\n\n for i in range(len(xidx)):\n x = xidx[i]\n y = yidx[i]\n\n for theta_idx in range(num_thetas):\n rho = int(round(x * theta_cos[theta_idx] +\n y * theta_sin[theta_idx]) +\n max_dist)\n result[rho, theta_idx] += 1\n\n return result, thetas, rhos",
"def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None",
"def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle",
"def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))",
"def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab"
] | [
"0.6568259",
"0.6058625",
"0.6033344",
"0.6004007",
"0.59449476",
"0.5929289",
"0.5880977",
"0.5880165",
"0.5810554",
"0.5711322",
"0.5710918",
"0.57101655",
"0.5706915",
"0.57055837",
"0.5633181",
"0.56073636",
"0.5591553",
"0.55895156",
"0.5588889",
"0.5581489",
"0.557368",
"0.5567853",
"0.5548421",
"0.5544951",
"0.5527049",
"0.5516206",
"0.5515117",
"0.5505676",
"0.54987705",
"0.5458828"
] | 0.7148612 | 0 |
Takes a sequential list of vertices and turns it into a list of edges. | def edgify(vertices:list)->list:
edges = []
for k in range(0, len(vertices) - 1):
edges.append([vertices[k], vertices[k + 1]])
return edges | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_edges(graph):\n return list(zip(graph[:-1], graph[1:]))",
"def incoming_edges(self, vertices, labels=True):\n return list(self.incoming_edge_iterator(vertices, labels=labels))",
"def getEdges(self):\n edgeList = []\n for v in self.adjList:\n for i in range(len(self.adjList[v])):\n edgeList.append((v, self.adjList[v][i]))\n return edgeList",
"def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es",
"def edges(self):\n return [edge(self.vertices[i - 1], self.vertices[(i)]) for i in range(-1, len(self.vertices))]",
"def to_edges(l):\n it = iter(l)\n last = next(it)\n\n for current in it:\n yield last, current\n last = current",
"def createEdges(self, rawEdgesList, vertices):\n verticesLen = len(vertices)\n edges = [[0 for i in range(verticesLen)] for i in range((verticesLen))]\n for edgesData in list(rawEdgesList):\n row = [item.strip() for item in edgesData.split('/')]\n dataDict = {key : next((node for node in vertices if node['value'] == key), None) for key in row }\n temp = ''\n for index, key in enumerate(dataDict):\n if (index > 0):\n edges[dataDict[temp]['index']][dataDict[key]['index']] = 1\n edges[dataDict[key]['index']][dataDict[temp]['index']] = 1\n else:\n temp = key\n\n return edges",
"def get_edges(self) -> []:\n graph_edges = []\n\n for vertex in self.adj_list:\n for connection in self.adj_list[vertex]:\n if (vertex, connection) not in graph_edges and (connection, vertex) not in graph_edges:\n graph_edges.append((vertex, connection))\n\n return graph_edges",
"def generate_edges(graph):\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges",
"def make_adjacency_list_from_edge_list(N, edges):\n adjacency_list = [[] for _ in range(N)]\n for e, (x, y, r) in enumerate(edges):\n adjacency_list[x].append((e, y, r))\n adjacency_list[y].append((e, x, r))\n return adjacency_list",
"def process_edges(edges_string_list):\n edge_list = []\n for line in edges_string_list:\n pair = line.split(',')\n edge_list.append([int(pair[0]), int(pair[1]), float(pair[2])])\n return edge_list",
"def edges_as_vertices(self) -> Iterable[Tuple[Vec3, Vec3]]:\n v = self.vertices\n for edge in self.edges:\n yield v[edge[0]], v[edge[1]]",
"def edges(self):\n vertices = self.vertices(closed=True)\n\n for i in range(len(self)):\n yield(vertices[:, i], vertices[:, i+1])",
"def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]",
"def outgoing_edges(self, vertices, labels=True):\n return list(self.outgoing_edge_iterator(vertices, labels=labels))",
"def get_related_edges(nodes_list, graph):\n\n node_id_list = map(lambda x: x.id, nodes_list)\n node_id_set = set(node_id_list)\n edges = []\n\n for node in nodes_list:\n if node.id in graph.incoming_edges:\n for edge in graph.incoming_edges[node.id]:\n\n if edge.start in node_id_set:\n edges.append(edge)\n\n return edges",
"def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]",
"def path_to_edges(path):\n return list((u, v) for u, v in zip(path[:-1], path[1:]))",
"def edges(adj_mat, vertices):\n return [(i,j) for i,j in\n vertices if (i < j and adj_mat[i][j] == 1)]",
"def get_edges(graph):\n edges = []\n for vertex in graph.keys():\n connected_nodes = graph[vertex]\n for node in connected_nodes:\n edges.append(str(vertex + node))\n\n return edges",
"def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list",
"def createEdgesList(self, rawEdgesList, vertices):\n def createsList(item):\n tempObj = { 'name': item['value'].strip(), 'listVal': LinkedList() }\n tempNode = Node({ 'value': item['value']})\n tempObj['listVal'].addNode(tempNode)\n return tempObj\n self.edgesList = [createsList(item) for item in vertices]\n for item in rawEdgesList:\n nodesName = [i.strip() for i in item.split('/')]\n parentVal = None\n parentLinkedList = None\n childNode = None\n for index, item in enumerate(nodesName):\n if (index != 0):\n childNode = Node({'value': item})\n childNodeList = next((edge for edge in self.edgesList if item.strip() == edge['name']), None)['listVal']\n childNodeList.addNode(Node({'value': parentVal}))\n parentLinkedList.addNode(childNode)\n else:\n parentVal = item\n parentLinkedList = next((edge for edge in self.edgesList if item.strip() == edge['name']), None)['listVal']",
"def path_nodes_to_edges(path):\n \n # Edge sequence initialization\n edge_sequence = []\n \n for i in range(len(path) - 1):\n edge_sequence.append((path[i], path[i+1]))\n \n return edge_sequence",
"def populate_edges(self, edges_list):\n edges = []\n for edge in edges_list:\n source, target, weight = edge[4], edge[5], edge[6]\n freq, line, geom = edge[7], edge[1], edge[2]\n edges.append(Edge(source, target, weight,\n freq, line, geom))\n self.edges = edges",
"def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges",
"def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)",
"def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)",
"def addEdgeList(self, edges):\n for e in edges:\n self.addEdge(e[0], e[1], e[2] if len(e) > 2 else None)",
"def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges",
"def _adjacency_to_edges(adjacency):\n edges = set()\n for u in adjacency:\n for v in adjacency[u]:\n try:\n edge = (u, v) if u <= v else (v, u)\n except TypeError:\n # Py3 does not allow sorting of unlike types\n if (v, u) in edges:\n continue\n edge = (u, v)\n\n edges.add(edge)\n return edges"
] | [
"0.74463135",
"0.6988424",
"0.6973376",
"0.69115984",
"0.69084823",
"0.6798895",
"0.6756665",
"0.67535317",
"0.67494607",
"0.6699782",
"0.66954184",
"0.66598827",
"0.6621105",
"0.6606147",
"0.65837735",
"0.6566445",
"0.650843",
"0.64819276",
"0.64663",
"0.6394724",
"0.6387192",
"0.6373782",
"0.6367641",
"0.63675374",
"0.63456523",
"0.63313216",
"0.63313216",
"0.6264241",
"0.62580293",
"0.6250558"
] | 0.83298373 | 0 |
Determines the closest point on the infinite line associated with the edge to the given point. The closest point on an infinite line to a point is determined by the intersection of that line (y=mx+b) and a perpendicular line through the | def closest_line_point(point:tuple, edge:tuple)->tuple:
d_y, d_x, b = line_equation((edge[0], edge[1]))
if b == None:
# The line is vertical, need different intercept formula.
return (edge[0][0], point[1])
if d_y == 0:
# The line is horizontal, we can use a faster formula:
return (point[0], edge[0][1])
term_1 = d_x * d_y * (point[1] - edge[1][1])
term_2 = (d_y ** 2) * edge[1][0]
term_3 = (d_x ** 2) * point[0]
denom = (d_y ** 2) + (d_x ** 2)
x_int = (term_1 + term_2 + term_3) / denom
y_int = (d_y / d_x) * x_int + b
return (x_int, y_int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b",
"def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray",
"def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)",
"def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])",
"def getClosestPointToLine(self, A, B, P):\n AP = XYPoint(P.x - A.x, P.y - A.y)\n AB = XYPoint(B.x - A.x, B.y - A.y)\n ab2 = AB.x * AB.x + AB.y * AB.y\n ap_ab = AP.x * AB.x + AP.y * AB.y\n t = ap_ab / ab2\n\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n\n return XYPoint(A.x + AB.x * t, A.y + AB.y * t)",
"def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point",
"def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()",
"def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())",
"def getNearestEdge(self, point):\n edge = mm.idx.nearest((point.getPoint().x, point.getPoint().y), objects=True)\n edges = [e.object for e in edge]\n if len(edges) == 1:\n result = edges[0]\n else:\n dist = 99999999999999999999999999999999999999999\n for edge in edges:\n distance = point.getPoint().distance(edge.getGeometry())\n if distance < dist:\n dist = distance\n result = edge\n return result",
"def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n",
"def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)",
"def edistw_to_line(point, edge, walls):\r\n#\tif min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):\r\n#\t\treturn 0\r\n\t(x,y) = point\r\n\t((x1,y1),(x2,y2)) = edge\r\n\tif x1 == x2:\r\n\t\tds = [math.sqrt((x1-x)**2 + (y3-y)**2) \\\r\n\t\t\tfor y3 in range(min(y1,y2),max(y1,y2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x1,y3)), walls)]\r\n\telse:\r\n\t\tds = [math.sqrt((x3-x)**2 + (y1-y)**2) \\\r\n\t\t\tfor x3 in range(min(x1,x2),max(x1,x2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x3,y1)), walls)]\r\n\tds.append(infinity)\r\n\treturn min(ds)",
"def nearest_on_boundary(self, point):\n _, minpt = self._nearest_to_point(point)\n return Point(minpt, crs=self.crs)",
"def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d",
"def dist_to_line2d(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# intersection point\n\tx = x1 + u*(x2-x1)\n\ty = y1 + u*(y2-y1)\n\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist",
"def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p",
"def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)",
"def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la",
"def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)",
"def xymaxw_to_line(point, edge, walls):\r\n#\tif min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):\r\n#\t\treturn 0\r\n\t(x,y) = point\r\n\t((x1,y1),(x2,y2)) = edge\r\n\tif x1 == x2:\r\n\t\tds = [max(abs(x1-x), abs(y3-y)) \\\r\n\t\t\tfor y3 in range(min(y1,y2),max(y1,y2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x1,y3)), walls)]\r\n\telse:\r\n\t\tds = [max(abs(x3-x), abs(y1-y)) \\\r\n\t\t\tfor x3 in range(min(x1,x2),max(x1,x2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x3,y1)), walls)]\r\n\tds.append(infinity)\r\n\treturn min(ds)",
"def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1",
"def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis",
"def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab",
"def get_closest_point(path, point):\n np_path = convert_path_type(path) # modify path to be a numpy array\n np_point = convert_point_type(point) # modify point to be a [x,y,z] numpy array\n\n # compute the distance from current location to every point in path and find index of the min distance\n distances = ((np_path[:,0] - np_point[0])**2 + (np_path[:,1] - np_point[1])**2)**0.5\n closest_idx = np.argmin(distances)\n\n if closest_idx != len(np_path) - 1: # check if this point is behind current location, if so use index+1\n closest_point = np_path[closest_idx]\n next_closest_point = np_path[closest_idx+1]\n\n # create vectors between the three points\n path_vector = next_closest_point - closest_point\n current_vector = np_point - closest_point\n\n # compute dot product to figure out whether location is behind or in front of closest_point\n dot_prod = np.dot(path_vector, current_vector)\n\n if dot_prod >= 0: # closest point is behind current location\n closest_idx += 1\n\n closest_point = path[closest_idx] # retrieve point from original `path` argument for type consistency\n\n return closest_point, closest_idx",
"def findPointOnLine(node1, node2, distance):\n m, b, _ = geometry.lineSpec(node1, node2)\n \n xy = []\n if m == True: # parallel to y axis\n xy.append(node1[0])\n if node1[1] <= node2[1]:\n xy.append(node1[1] + distance)\n else:\n xy.append(node1[1] - distance)\n \n elif m == False: # parallel to x axis\n if node1[0] <= node2[0]:\n xy.append(node1[0] + distance)\n else:\n xy.append(node1[0] - distance)\n xy.append(node1[1])\n \n else:\n x = sp.Symbol('x')\n z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2\n xSolution = sp.solve(z, x)\n \n for xSol in xSolution:\n if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):\n xy.append(xSol)\n xy.append(xSol*m + b)\n return xy"
] | [
"0.7810633",
"0.7691588",
"0.7173058",
"0.71486485",
"0.7119254",
"0.7044393",
"0.69271195",
"0.69156563",
"0.6865664",
"0.68619066",
"0.6846239",
"0.68359554",
"0.67854995",
"0.67513555",
"0.67275",
"0.6722305",
"0.66625106",
"0.66622204",
"0.6593246",
"0.6526185",
"0.6519477",
"0.64910764",
"0.6487646",
"0.64810467",
"0.6476103",
"0.6451201",
"0.63972193",
"0.6351317",
"0.63398576",
"0.63192725"
] | 0.7811466 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.