id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1760061
|
import argparse, sys, os, time
from mitopipeline.pipeline_builder import PipelineBuilder
from mitopipeline.pipeline_runner import PipelineRunner
class CommandLineParser():
def __init__(self, argv=sys.argv[1:]):
self.__opts = self.parse_commands(argv)
def parse_commands(self, argv=sys.argv[1:]):
parser = self._build_parser()
opts = parser.parse_args(argv)
return opts
def build_and_run(self, steps):
pipeline_builder = PipelineBuilder()
if pipeline_builder.build_pipeline(tools=self.__opts.tools, directory=self.__opts.directory, steps=steps, output=self.__opts.output, refs=self.__opts.genomes, email=self.__opts.slurm):
PipelineRunner.run(self.__opts)
else:
raise RuntimeError("There was an error in building the pipeline. Please double check your command lien arguments")
def _build_parser(self):
parser = argparse.ArgumentParser()
#required arguments
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('-s', '--directory', help="Path to the directory of files to be run", type=str)
required_args.add_argument('-g', '--genomes', help="Path of location of reference genomes", default=None)
required_args.add_argument('-t', '--tools', help="Path to the directory that contains all of the 3rd party packages", default=None)
#optional arguments
parser.add_argument('-o', '--output', help="Path to where you want the output to be stored", default=None)
parser.add_argument('-l', '--slurm', help="Use slurm jobs to run each step, include an email address for status", type=str, default=None)
parser.add_argument('-d', '--download', help="Specify softwares you want to download", default=False, action='store_true')
parser.add_argument('-r', '--remove', nargs='+', help="Steps to not run in this pipeline", default=None)
#parser.add_argument('-c', '--config', help="Use the config file to specify software options", default=None)
parser.add_argument('-w', '--workers', type=int, help="Number of workers to use to run the pipeline", default=1)
return parser
|
StarcoderdataPython
|
145793
|
from django import forms
class BaseBoostrapFormMixin:
def __init__(self, **kwargs):
super().__init__(**kwargs)
for _, field in self.fields.items():
field_widget = field.widget
field_widget.attrs.update({'tabindex': 1})
if field_widget.input_type != 'checkbox':
field_widget.attrs.update({'class': 'form-control'})
def _post_clean(self):
super()._post_clean()
if self.cleaned_data:
for error in filter(lambda err: err != '__all__', self.errors):
if 'class' in self.fields[error].widget.attrs:
self.fields[error].widget.attrs['class'] += ' is-invalid'
else:
self.fields[error].widget.attrs['class'] = 'is-invalid'
|
StarcoderdataPython
|
3201170
|
<filename>neet/boolean/eca.py
"""
Elementary Cellular Automata
============================
The :class:`neet.automata.eca.ECA` class describes an `Elementary Cellular
Automaton <https://en.wikipedia.org/wiki/Elementary_cellular_automaton>`_
with an arbitrary rule.
.. rubric:: Examples
"""
import numpy as np
from .network import BooleanNetwork
class ECA(BooleanNetwork):
"""
ECA is a class to represent elementary cellular automaton rules. Each ECA
contains an 8-bit integral member variable ``code`` representing the
Wolfram code for the ECA rule and a set of boundary conditions which is
either ``None``, signifying periodic boundary conditions, or a pair of
cell states signifying fixed, open boundary conditions.
"""
def __init__(self, code, size, boundary=None):
"""
Construct an elementary cellular automaton rule.
.. rubric:: Examples
.. doctest:: automata
>>> ca = ECA(30, 5)
>>> ca.code
30
>>> ca.size
5
>>> ca.boundary
>>> ca = ECA(30, 5, boundary=(0,0))
>>> ca.boundary
(0, 0)
:param code: the Wolfram code for the ECA
:type code: int
:param size: the size of the ECA's lattice
:type size: int
:param boundary: the boundary conditions for the CA
:type boundary: tuple or None
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
super(ECA, self).__init__(size)
self.code = code
self.boundary = boundary
@property
def code(self):
"""
The Wolfram code of the elementary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30, 5)
>>> eca.code
30
>>> eca.code = 45
>>> eca.code
45
>>> eca.code = 256
Traceback (most recent call last):
...
ValueError: invalid ECA code
:type: int
:raises TypeError: if ``code`` is not an instance of int
:raises ValueError: if ``code`` is not in :math:`\\{0,1,\\ldots,255\\}`
"""
return self.__code
@code.setter
def code(self, code):
if not isinstance(code, int):
raise TypeError("ECA code is not an int")
if 255 < code or code < 0:
raise ValueError("invalid ECA code")
self.__code = code
@property
def size(self):
return self._size
@size.setter
def size(self, size):
if not isinstance(size, int):
raise TypeError("ECA size is not an int")
if size < 1:
raise ValueError("ECA size is negative")
self._size = size
self._volume = 2**size
self._shape = [2] * size
@property
def boundary(self):
"""
The boundary conditions of the elemenary cellular automaton
.. rubric:: Examples
.. doctest:: automata
>>> eca = ECA(30)
>>> eca.boundary
>>> eca.boundary = (0,1)
>>> eca.boundary
(0, 1)
>>> eca.boundary = None
>>> eca.boundary
>>> eca.boundary = [0,1]
Traceback (most recent call last):
...
TypeError: ECA boundary are neither None nor a tuple
:type: ``None`` or tuple
:raises TypeError: if ``boundary`` is neither ``None`` or an instance of tuple
:raises ValueError: if ``boundary`` is a neither ``None`` or a pair of binary states
"""
return self.__boundary
@boundary.setter
def boundary(self, boundary):
if boundary and not isinstance(boundary, tuple):
raise TypeError("ECA boundary are neither None nor a tuple")
if boundary:
if len(boundary) != 2:
raise ValueError("invalid ECA boundary conditions")
for x in boundary:
if x != 0 and x != 1:
raise ValueError("invalid ECA boundary value")
self.__boundary = boundary
def _unsafe_update(self, lattice, index=None, pin=None, values=None):
"""
Update the state of the ``lattice``, in place, without
checking the validity of the arguments.
.. rubric:: Basic Use:
.. doctest:: automata
>>> ca = ECA(30)
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs)
[0, 1, 1, 1, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update([0,0,1,0,0])
[1, 1, 1, 1, 1]
.. rubric:: Single-Node Update:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, index=1)
[0, 1, 1, 0, 0]
>>> xs
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, index=-1)
[0, 1, 1, 0, 1]
.. rubric:: State Pinning:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, pin=[-2])
[0, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> ca._unsafe_update(xs, pin=[4])
[0, 1, 0, 1, 0]
.. rubric:: Value Fixing:
.. doctest:: automata
>>> ca.boundary = None
>>> xs = [0,0,1,0,0]
>>> ca._unsafe_update(xs, values={0:1,-2:0})
[1, 1, 1, 0, 0]
>>> ca.boundary = (1,1)
>>> xs = [1,1,1,0,0]
>>> ca._unsafe_update(xs, values={1:0,-1:0})
[0, 0, 0, 1, 0]
:param lattice: the one-dimensional sequence of states
:type lattice: sequence
:param index: the index to update (or None)
:param pin: a sequence of indicies to pin (or None)
:param values: a dictionary of index-value pairs to fix after update
:returns: the updated lattice
"""
pin_states = pin is not None and pin != []
if self.boundary:
left = self.__boundary[0]
right = self.__boundary[1]
else:
left = lattice[-1]
right = lattice[0]
code = self.code
if index is None:
if pin_states:
pinned = np.asarray(lattice)[pin]
temp = 2 * left + lattice[0]
for i in range(1, len(lattice)):
temp = 7 & (2 * temp + lattice[i])
lattice[i - 1] = 1 & (code >> temp)
temp = 7 & (2 * temp + right)
lattice[-1] = 1 & (code >> temp)
if pin_states:
for (j, i) in enumerate(pin):
lattice[i] = pinned[j]
else:
if index < 0:
index += len(lattice)
if index == 0:
temp = left
else:
temp = lattice[index - 1]
temp = 2 * temp + lattice[index]
if index + 1 == len(lattice):
temp = 2 * temp + right
else:
temp = 2 * temp + lattice[index + 1]
lattice[index] = 1 & (code >> (7 & temp))
if values is not None:
for key in values:
lattice[key] = values[key]
return lattice
def neighbors_in(self, index, *args, **kwargs):
"""
Return the set of all incoming neighbor nodes.
In the cases of the lattices having fixed boundary conditions, the
left boundary, being on the left of the leftmost index 0, has an index
of -1, while the right boundary's index is the size+1. The full state
of the lattices and the boundaries is equavolent to: `[cell0, cell1,
..., cellN, right_boundary, left_boundary]` if it is ever presented as
a single list in Python.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point toward the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_in(1, size=3)
{0, 1, 2}
>>> net.neighbors_in(2, size=3)
{0, 1, 2}
>>> net.boundary = (1,1)
>>> net.neighbors_in(2, size=3)
{1, 2, 3}
>>> net.neighbors_in(0, 3)
{0, 1, -1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_in(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0 and self.boundary is None:
left = size - 1
if right > size - 1 and self.boundary is None:
right = 0
return {left, index, right}
def neighbors_out(self, index, *args, **kwargs):
"""
Return the set of all outgoing neighbor nodes.
Fixed boundaries are excluded as they are not affected by internal
states.
:param index: node index
:param size: size of ECA
:returns: the set of all node indices which point from the index node
:raises ValueError: if `index < 0 or index > n - 1`
.. rubric:: Basic Use:
.. doctest:: automata
>>> net = ECA(30)
>>> net.neighbors_out(1, 3)
{0, 1, 2}
>>> net.neighbors_out(2, 3)
{0, 1, 2}
>>> net.boundary = (1, 1)
>>> net.neighbors_out(2, 3)
{1, 2}
>>> net.neighbors_out(0, 3)
{0, 1}
.. rubric:: Erroneous Usage:
.. doctest:: automata
>>> net = ECA(30,boundary=(1, 1))
>>> net.neighbors_out(5, 3)
Traceback (most recent call last):
...
ValueError: index must be a non-negative integer less than size
"""
if not isinstance(index, int):
raise TypeError("index must be a non-negative integer")
size = self.size
if index < 0 or index > size - 1:
msg = "index must be a non-negative integer less than size"
raise ValueError(msg)
left, right = index - 1, index + 1
if left < 0:
left = size - 1 if self.boundary is None else 0
if right > size - 1:
right = 0 if self.boundary is None else size - 1
return {left, index, right}
def to_networkx_graph(self, *args, **kwargs):
kwargs['code'] = self.code
kwargs['boundary'] = self.boundary
return super(ECA, self).to_networkx_graph(*args, **kwargs)
BooleanNetwork.register(ECA)
|
StarcoderdataPython
|
3259829
|
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
import re
class TextPreprocessing:
def __init__(self, name):
if name.lower() == 'amazon':
self.path = "../dataset/amazon_cells_labelled.txt"
elif name.lower() == 'yelp':
self.path = "../dataset/yelp_labelled.txt"
elif name.lower() == 'imdb':
self.path = "../dataset/imdb_labelled.txt"
self.stop_words = stopwords.words('english')
unwanted_stopwords = {'no', 'nor', 'not', 'ain', 'aren', "aren't", 'couldn', 'what', 'which', 'who',
'whom',
'why', 'how', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't",
'hasn',
"hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn',
"wasn't",
'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't", 'don', "don't"}
self.stop_words = [ele for ele in self.stop_words if ele not in unwanted_stopwords]
self.wordnet_lemmatizer = WordNetLemmatizer()
self.embeddings_path = '../dataset/GloVe_Word_Embeddings/glove.6B.100d.txt'
# ------------------------------------ READ DATA FROM .txt FILES ------------------------------------
def get_data(self):
file = open(self.path, "r")
data = file.readlines()
corpus = []
labels = []
for d in data:
d = d.split("\t")
corpus.append(d[0])
labels.append(d[1].replace("\n", ""))
file.close()
return corpus, labels
# ------------------------------------ PREPROCESS TEXT ------------------------------------
def preprocess_text(self, user_text):
# Remove puntuations and numbers
user_text = re.sub('[^a-zA-Z]', ' ', user_text)
# Remove single characters
user_text = re.sub(r"\s+[a-zA-Z]\s+", ' ', user_text)
# remove multiple spaces
user_text = re.sub(r'\s+', ' ', user_text)
user_text = user_text.lower()
# Convert Text sentence to Tokens
user_text = word_tokenize(user_text)
# Remove unncecessay stopwords
fintered_text = []
for t in user_text:
if t not in self.stop_words:
fintered_text.append(t)
# Word lemmatization
processed_text1 = []
for t in fintered_text:
word1 = self.wordnet_lemmatizer.lemmatize(t, pos="n")
word2 = self.wordnet_lemmatizer.lemmatize(word1, pos="v")
word3 = self.wordnet_lemmatizer.lemmatize(word2, pos=("a"))
processed_text1.append(word3)
result = ""
for word in processed_text1:
result = result + word + " "
result = result.rstrip()
return result
# ------------------------------------- GENERATE COUNT FEATURES AS VECTORS---------------------------------
def count_vectorize(self, X_train, X_test):
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(X_train)
# transform the training and validation data using count vectorizer object
xtrain_count = count_vect.transform(X_train)
xvalid_count = count_vect.transform(X_test)
return xtrain_count,xvalid_count
# ---------------------- GENERATE WORD LEVEL TF-IDF FEATURES AS VECTORS---------------------------------
def word_TF_IDF_vectorize(self, X_train, X_test):
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=10000)
tfidf_vect.fit(X_train)
xtrain_tfidf = tfidf_vect.transform(X_train)
xvalid_tfidf = tfidf_vect.transform(X_test)
return xtrain_tfidf, xvalid_tfidf
# ---------------------- GENERATE n-gram LEVEL TF-IDF FEATURES AS VECTORS---------------------------------
def n_gram_TF_IDF_vectorize(self, X_train, X_test):
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2, 3),
max_features=10000)
tfidf_vect_ngram.fit(X_train)
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(X_train)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(X_test)
return xtrain_tfidf_ngram, xvalid_tfidf_ngram
# --------------------- GENERATE CHARACTER LEVEL TF-IDF FEATURES AS VECTORS------------------------------
def char_TF_IDF_vectorize(self, X_train, X_test):
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2, 3),
max_features=10000)
tfidf_vect_ngram_chars.fit(X_train)
xtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(X_train)
xvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(X_test)
return xtrain_tfidf_ngram_chars, xvalid_tfidf_ngram_chars
# ------------------------------------- TOKENIZE AND PAD FOR TRAINING--------------------------------------
def tokenizer_and_pad_training(self, X_train, X_test, max_words, oov_word, padding_type, truncating_type, pad_len):
# Generate Tokens sequences
tokenizer = Tokenizer(num_words=max_words, oov_token=oov_word)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
# Pad the sequences
vocab_size = len(tokenizer.word_index) + 2
X_train_padded = np.array(pad_sequences(X_train, padding=padding_type, truncating=truncating_type, maxlen=pad_len))
X_test_padded = np.array(pad_sequences(X_test, padding=padding_type, truncating=truncating_type, maxlen=pad_len))
return tokenizer, vocab_size, X_train_padded, X_test_padded
# ------------------------------------- TOKENIZE AND PAD FOR TRAINING--------------------------------------
def preprocess_and_tokenize_test_case(self, tokenizer, test_case, padding_type, truncating_type, pad_len):
processed_test_case = [self.preprocess_text(test_case)]
instance = tokenizer.texts_to_sequences(processed_test_case)
flat_list = []
for sublist in instance:
for item in sublist:
flat_list.append(item)
flat_list = [flat_list]
instance = pad_sequences(flat_list, padding=padding_type, truncating=truncating_type, maxlen=pad_len)
return instance
# ------------------------------------- GET EMBEDDING MATRIX --------------------------------------
def get_embedding_metrix(self, vocab_size, tokenizer):
embeddings_dictionary = dict()
glove_file = open(self.embeddings_path, encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
embedding_matrix = np.zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
return embedding_matrix
|
StarcoderdataPython
|
4815417
|
<filename>weather_where_you_are.py
import location, requests, speech
loc = location.reverse_geocode(location.get_location())
city_country = "{City},{Country}".format(**loc[0])
print(f"Weather in {city_country}:")
APPID = "beb97c1ce62559bba4e81e28de8be095&q="
URL = f"http://api.openweathermap.org/data/2.5/weather?APPID={APPID}&q="
weather = requests.get(URL + city_country).json()
print("\n".join(f"{key}: {value}" for key, value in weather.items()))
speech.say(f"It is currently {weather['weather'][0]['main']} in {city_country}.")
|
StarcoderdataPython
|
1681080
|
#!/usr/bin/env python3
# coding: utf-8
#
# duivesteyn // Python // bmdOilPriceFetch
# https://github.com/duivesteyn/bmdOilPriceFetch
#
# Testing Scripts for BMD Oil Price Fetch. Code that gets data from Yahoo Finance
#
from bmdOilPriceFetch import bmdPriceFetch
import logging
def printOilPrice():
'''Get and Print WTI Oil Price'''
data = bmdPriceFetch()
if data is not None:
outputString = 'The price of WTI is $' + str("%.2f" % data['regularMarketPrice'])
logging.info(outputString)
else:
raise ValueError('Data Error Talking to Server - printOilPrice')
def printAStockPrice():
'''Get and Print the Price of a Company Stock (Yahoo! Finance Format)'''
ticker='AAPL'
data = bmdPriceFetch(ticker)
if data is not None:
outputString = "The price of " + ticker + " is $" + str("%.2f" % data['regularMarketPrice'])
logging.info(outputString)
else:
raise ValueError(f'Data Error Talking to Server - {ticker}')
def printAStockPriceThatDoesntExist():
'''Get and Print the Price of a Company Stock (Yahoo! Finance Format)'''
ticker='DONTKNOWTHISCODEDOH'
data = bmdPriceFetch(ticker)
if data is not None:
outputString = "The price of " + ticker + " is $" + str("%.2f" % data['regularMarketPrice'])
logging.info(outputString)
else:
raise ValueError(f'Data Error Talking to Server - {ticker}')
# Main body
if __name__ == '__main__':
printAStockPrice()
printAStockPriceThatDoesntExist()
|
StarcoderdataPython
|
1662548
|
<gh_stars>1-10
from sqlalchemy import MetaData
from sqlalchemy.orm import declarative_base
# SQLite allows constraints to exist in the database that have no identifying name. This unnamed
# constraints create problems for migration. Therefore, naming_convention is passed to declarative
# base of SQLite:
# https://docs.sqlalchemy.org/en/13/core/constraints.html#configuring-constraint-naming-conventions
postgres_naming_convention = {
"ix": "ix_%(column_0_N_label)s",
"uq": "uq_%(table_name)s_%(column_0_N_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
sqlite_naming_convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
meta_sqlite = MetaData(naming_convention=sqlite_naming_convention)
meta_postgres = MetaData(naming_convention=postgres_naming_convention, schema="pepys")
# define this as the base for all the DB tables here in a common module
BasePostGIS = declarative_base(metadata=meta_postgres)
BaseSpatiaLite = declarative_base(metadata=meta_sqlite)
|
StarcoderdataPython
|
1603634
|
from os.path import join, isdir
import glob
from subprocess import call
import numpy as np
from rastervision.common.utils import _makedirs
from rastervision.common.settings import VALIDATION
from rastervision.semseg.tasks.utils import (
make_prediction_img, plot_prediction, predict_x)
from rastervision.semseg.models.factory import SemsegModelFactory
MAKE_VIDEOS = 'make_videos'
def make_videos(run_path, options, generator):
model_factory = SemsegModelFactory()
videos_path = join(run_path, 'videos')
_makedirs(videos_path)
checkpoints_path = join(run_path, 'delta_model_checkpoints')
if not isdir(checkpoints_path):
print('Cannot make videos without delta_model_checkpoints.')
return
model_paths = glob.glob(join(checkpoints_path, '*.h5'))
model_paths.sort()
models = []
for model_path in model_paths:
model = model_factory.make_model(options, generator)
model.load_weights(model_path, by_name=True)
models.append(model)
split_gen = generator.make_split_generator(
VALIDATION, target_size=options.eval_target_size,
batch_size=1, shuffle=False, augment_methods=None, normalize=True,
only_xy=False)
for video_ind, batch in \
enumerate(split_gen):
x = np.squeeze(batch.x, axis=0)
y = np.squeeze(batch.y, axis=0)
display_y = generator.dataset.one_hot_to_rgb_batch(y)
all_x = np.squeeze(batch.all_x, axis=0)
make_video(
x, display_y, all_x, models, videos_path, video_ind,
options, generator)
if video_ind == options.nb_videos - 1:
break
def make_video(x, y, all_x, models, videos_path, video_ind, options,
generator):
video_path = join(videos_path, str(video_ind))
_makedirs(video_path)
for frame_ind, model in enumerate(models):
y_pred = make_prediction_img(
x, options.target_size[0],
lambda x: generator.dataset.one_hot_to_rgb_batch(
predict_x(x, model)))
print(video_ind)
print(frame_ind)
frame_path = join(
video_path, 'frame_{:0>4}.png'.format(frame_ind))
plot_prediction(generator, all_x, y, y_pred, frame_path)
frames_path = join(video_path, 'frame_%04d.png')
video_path = join(videos_path, '{}.mp4'.format(video_ind))
call(['avconv',
'-r', '2',
'-i', frames_path,
'-vf', 'scale=trunc(in_w/2)*2:trunc(in_h/2)*2',
video_path])
|
StarcoderdataPython
|
1672074
|
import abc
from copy import copy
from dataclasses import dataclass, field
import functools
import multiprocessing
from multiprocessing import synchronize
import threading
import time
import typing as tp
import stopit
from pypeln import utils as pypeln_utils
from . import utils
from .queue import IterableQueue, OutputQueues
WorkerConstructor = tp.Callable[[int, "StageParams", IterableQueue], "Worker"]
Kwargs = tp.Dict[str, tp.Any]
T = tp.TypeVar("T")
class ProcessFn(pypeln_utils.Protocol):
def __call__(self, worker: "Worker", **kwargs):
...
class StageParams(tp.NamedTuple):
input_queue: IterableQueue
output_queues: OutputQueues
namespace: utils.Namespace
@classmethod
def create(
cls, input_queue: IterableQueue, output_queues: OutputQueues, total_workers: int
) -> "StageParams":
return cls(
namespace=utils.Namespace(active_workers=total_workers),
input_queue=input_queue,
output_queues=output_queues,
)
def worker_done(self):
with self.namespace:
self.namespace.active_workers -= 1
class WorkerInfo(tp.NamedTuple):
index: int
@dataclass
class Worker(tp.Generic[T]):
process_fn: ProcessFn
index: int
timeout: float
stage_params: StageParams
main_queue: IterableQueue
on_start: tp.Optional[tp.Callable[..., Kwargs]]
on_done: tp.Optional[tp.Callable[..., Kwargs]]
use_threads: bool
f_args: tp.List[str]
namespace: utils.Namespace = field(
default_factory=lambda: utils.Namespace(done=False, task_start_time=None)
)
process: tp.Optional[tp.Union[multiprocessing.Process, threading.Thread]] = None
def __call__(self):
worker_info = WorkerInfo(index=self.index)
on_start_args: tp.List[str] = (
pypeln_utils.function_args(self.on_start) if self.on_start else []
)
on_done_args: tp.List[str] = (
pypeln_utils.function_args(self.on_done) if self.on_done else []
)
try:
if self.on_start is not None:
on_start_kwargs = dict(worker_info=worker_info)
kwargs = self.on_start(
**{
key: value
for key, value in on_start_kwargs.items()
if key in on_start_args
}
)
else:
kwargs = {}
if kwargs is None:
kwargs = {}
kwargs.setdefault("worker_info", worker_info)
self.process_fn(
self,
**{key: value for key, value in kwargs.items() if key in self.f_args},
)
self.stage_params.worker_done()
if self.on_done is not None:
kwargs.setdefault(
"stage_status",
StageStatus(
namespace=self.stage_params.namespace,
),
)
self.on_done(
**{
key: value
for key, value in kwargs.items()
if key in on_done_args
}
)
self.stage_params.output_queues.worker_done()
except pypeln_utils.StopThreadException:
pass
except BaseException as e:
self.main_queue.raise_exception(e)
time.sleep(0.01)
finally:
self.done()
def start(self):
[self.process] = start_workers(self, use_threads=self.use_threads)
def stop(self):
if self.process is None:
return
if not self.process.is_alive():
return
if isinstance(self.process, multiprocessing.Process):
self.process.terminate()
else:
stopit.async_raise(
self.process.ident,
pypeln_utils.StopThreadException,
)
self.namespace.task_start_time = None
def done(self):
self.namespace.done = True
def did_timeout(self):
task_start_time = self.namespace.task_start_time
done = self.namespace.done
return (
self.timeout
and not done
and task_start_time is not None
and (time.time() - task_start_time > self.timeout)
)
@dataclass
class MeasureTaskTime:
worker: "Worker"
def __enter__(self):
self.worker.namespace.task_start_time = time.time()
def __exit__(self, *args):
self.worker.namespace.task_start_time = None
def measure_task_time(self):
return self.MeasureTaskTime(self)
class Applicable(pypeln_utils.Protocol):
def apply(self, worker: "Worker", elem: tp.Any, **kwargs):
...
class ApplyProcess(ProcessFn, Applicable):
def __call__(self, worker: Worker, **kwargs):
for elem in worker.stage_params.input_queue:
with worker.measure_task_time():
self.apply(worker, elem, **kwargs)
class StageStatus:
"""
Object passed to various `on_done` callbacks. It contains information about the stage in case book keeping is needed.
"""
def __init__(self, namespace):
self._namespace = namespace
@property
def done(self) -> bool:
"""
`bool` : `True` if all workers finished.
"""
return self._namespace.active_workers == 0
@property
def active_workers(self):
"""
`int` : Number of active workers.
"""
return self._namespace.active_workers
def __str__(self):
return (
f"StageStatus(done = {self.done}, active_workers = {self.active_workers})"
)
# ----------------------------------------------------------------
# create_daemon_workers
# ----------------------------------------------------------------
def start_workers(
target: tp.Callable,
n_workers: int = 1,
args: tp.Tuple[tp.Any, ...] = tuple(),
kwargs: tp.Optional[tp.Dict[tp.Any, tp.Any]] = None,
use_threads: bool = False,
) -> tp.Union[tp.List[multiprocessing.Process], tp.List[threading.Thread]]:
if kwargs is None:
kwargs = {}
workers = []
for _ in range(n_workers):
if use_threads:
t = threading.Thread(target=target, args=args, kwargs=kwargs)
else:
t = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
workers.append(t)
return workers
|
StarcoderdataPython
|
3216201
|
#!/usr/bin/env python
##############################################################################
# Imports
##############################################################################
import rosdistro
import catkin_pkg
from rosjava_build_tools import catkin
##############################################################################
# Imports
##############################################################################
def scrape_for_release_message_packages(track):
url = rosdistro.get_index_url()
index = rosdistro.get_index(url)
cache = rosdistro.get_release_cache(index, 'kinetic')
packages = []
for package_name, package_string in cache.package_xmls.items():
package = catkin_pkg.package.parse_package_string(package_string)
#print(" Name: %s" % package_name)
#print(" Buildtool Depends %s" % package.build)
if catkin.has_build_depend_on_message_generation(package):
packages.append({'name': package_name, 'version': package.version})
return packages
|
StarcoderdataPython
|
195672
|
<reponame>umyuu/Sample<gh_stars>0
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
def main():
import platform
print(platform.architecture())
print(sys.version)
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=options, executable_path=r"C:\selenium\chromedriver.exe")
driver.get('https://www.google.co.jp')
driver.save_screenshot('screen.png')
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
149134
|
import numpy as np
from .. import Geometry, Line, LineSegmentMaterial
class BoxHelper(Line):
"""A line box object. Commonly used to visualize bounding boxes.
Parameters:
size (float): The length of the box' edges (default 1).
thickness (float): the thickness of the lines (default 1 px).
"""
def __init__(self, size=1.0, thickness=1):
self._size = size
positions = np.array(
[
[0, 0, 0], # bottom edges
[1, 0, 0],
[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 1],
[0, 1, 0], # top edges
[1, 1, 0],
[0, 1, 0],
[0, 1, 1],
[1, 1, 1],
[1, 1, 0],
[1, 1, 1],
[0, 1, 1],
[0, 0, 0], # side edges
[0, 1, 0],
[1, 0, 0],
[1, 1, 0],
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
],
dtype="f4",
)
positions -= 0.5
positions *= self._size
geometry = Geometry(positions=positions)
material = LineSegmentMaterial(color=(1, 0, 0), thickness=thickness, aa=True)
super().__init__(geometry, material)
def set_transform_by_aabb(self, aabb):
"""Set the position and scale attributes
based on a given bounding box.
Parameters:
aabb (ndarray): The position and scale attributes
will be configured such that the helper
will match the given bounding box. The array
is expected to have shape (2, 3), where the
two vectors represent the minimum and maximum
coordinates of the axis-aligned bounding box.
"""
aabb = np.asarray(aabb)
if aabb.shape != (2, 3):
raise ValueError(
"The given array does not appear to represent "
"an axis-aligned bounding box, ensure "
"the shape is (2, 3). Shape given: "
f"{aabb.shape}"
)
diagonal = aabb[1] - aabb[0]
center = aabb[0] + diagonal * 0.5
scale = diagonal / self._size
self.position.set(*center)
self.scale.set(*scale)
def set_transform_by_object(self, object, space="world"):
"""Set the position and scale attributes
based on the bounding box of another object.
Parameters:
object (WorldObject): The position and scale attributes
will be configured such that the helper
will match the bounding box of the given object.
space (string, optional): If set to "world"
(the default) the world space bounding box will
be used as reference. If equal to "local", the
object's local space bounding box of its geometry
will be used instead.
:Examples:
World-space bounding box visualization:
.. code-block:: py
box = gfx.BoxHelper()
box.set_transform_by_object(mesh)
scene.add(box)
Local-space bounding box visualization:
.. code-block:: py
box = gfx.BoxHelper()
box.set_transform_by_object(mesh, space="local")
mesh.add(box)
"""
aabb = None
if space not in {"world", "local"}:
raise ValueError(
'Space argument must be either "world"'
f'or "local". Given value: {space}'
)
if space == "world":
aabb = object.get_world_bounding_box()
elif space == "local" and object.geometry is not None:
aabb = object.geometry.bounding_box()
if aabb is None:
raise ValueError(
"No bounding box could be determined "
"for the given object, it (and its "
"children) may not define any geometry"
)
self.set_transform_by_aabb(aabb)
|
StarcoderdataPython
|
35713
|
from .cs_loader import CSPointDataset
from .cs_class_loader import CSClassDataset
from .cs_seed_loader import CSSeedDataset
|
StarcoderdataPython
|
116935
|
<gh_stars>1-10
import json
import os
from tqdm import tqdm
import shapefile
import us
from geography.models import Division, Geometry
from geography.utils.lookups import township_states
class StateFixtures(object):
def create_state_fixtures(self):
SHP_SLUG = "cb_{}_us_state_500k".format(self.YEAR)
DOWNLOAD_PATH = os.path.join(self.DOWNLOAD_DIRECTORY, SHP_SLUG)
shape = shapefile.Reader(
os.path.join(DOWNLOAD_PATH, "{}.shp".format(SHP_SLUG))
)
fields = shape.fields[1:]
field_names = [f[0] for f in fields]
nation_obj = Division.objects.get(code="00", level=self.NATIONAL_LEVEL)
for shp in tqdm(shape.shapeRecords(), desc="States"):
state = dict(zip(field_names, shp.record))
postal = us.states.lookup(state["STATEFP"]).abbr
# Skip territories and DC
if int(state["STATEFP"]) > 56 or int(state["STATEFP"]) == 11:
continue
state_obj, created = Division.objects.update_or_create(
code=state["STATEFP"],
level=self.STATE_LEVEL,
parent=nation_obj,
defaults={
"name": state["NAME"],
"label": state["NAME"],
"code_components": {
"fips": {"state": state["STATEFP"]},
"postal": postal,
},
},
)
geodata = {
"type": "Feature",
"geometry": shp.shape.__geo_interface__,
"properties": {
"state": state["STATEFP"],
"name": state["NAME"],
},
}
geojson, created = Geometry.objects.update_or_create(
division=state_obj,
subdivision_level=self.STATE_LEVEL,
simplification=self.THRESHOLDS["state"],
source=os.path.join(
self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG
)
+ ".zip",
series=self.YEAR,
defaults={
"topojson": self.toposimplify(
geodata, self.THRESHOLDS["state"]
)
},
)
geojson, created = Geometry.objects.update_or_create(
division=state_obj,
subdivision_level=self.COUNTY_LEVEL,
simplification=self.THRESHOLDS["county"],
source=os.path.join(
self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG
)
+ ".zip",
series=self.YEAR,
defaults={
"topojson": self.get_state_county_shps(state["STATEFP"])
},
)
geojson, created = Geometry.objects.update_or_create(
division=state_obj,
subdivision_level=self.DISTRICT_LEVEL,
simplification=self.THRESHOLDS["district"],
source=os.path.join(
self.SHP_SOURCE_BASE.format(self.YEAR),
"cb_{}_us_cd{}_500k".format(self.YEAR, self.CONGRESS),
)
+ ".zip",
series=self.YEAR,
defaults={
"topojson": self.get_state_district_shps(state["STATEFP"])
},
)
if postal in township_states:
geojson, created = Geometry.objects.update_or_create(
division=state_obj,
subdivision_level=self.TOWNSHIP_LEVEL,
simplification=self.THRESHOLDS["county"],
source=os.path.join(
self.SHP_SOURCE_BASE.format(self.YEAR),
"cb_{}_{}_cousub_500k".format(
self.YEAR, state["STATEFP"]
),
)
+ ".zip",
series=self.YEAR,
defaults={
"topojson": self.get_state_township_shps(
state["STATEFP"]
)
},
)
tqdm.write(
self.TQDM_PREFIX
+ "> FIPS {} @ ~{}kb ".format(
state["STATEFP"],
round(len(json.dumps(geojson.topojson)) / 1000),
)
)
tqdm.write(self.style.SUCCESS("Done.\n"))
|
StarcoderdataPython
|
3371780
|
#!/usr/bin/python 3
#seup database
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base() # correspond to table in database
#create classes to correspond with the tables
class User(Base):
"""class to create the table user"""
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
class Category(Base):
"""class to create table category"""
__tablename__ = "category"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""return object data in easy format"""
return {
'id': self.id,
'name': self.name,
'user_id': self.user_id
}
class Item(Base):
"""class to create the table item"""
__tablename__ = "item"
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
description = Column(String(250))
price = Column(String(8))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""return object data in easy format"""
return {
'id': self.id,
'name': self.name,
'description': self.description,
'price': self.price,
'user_id': self.user_id,
'category_id': self.category_id
}
#add at end # creates a new database
engine = create_engine('postgresql:///var/www/catlog/catalog2/itemcatalog.db')
Base.metadata.create_all(engine)
|
StarcoderdataPython
|
138340
|
"""
MIT License
Copyright (c) 2021 UltronRoBo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import IntEnum, unique
from UltronRoBo.modules.helper_funcs.string_handling import button_markdown_parser
from telegram import Message
@unique
class Types(IntEnum):
TEXT = 0
BUTTON_TEXT = 1
STICKER = 2
DOCUMENT = 3
PHOTO = 4
AUDIO = 5
VOICE = 6
VIDEO = 7
def get_note_type(msg: Message):
data_type = None
content = None
text = ""
raw_text = msg.text or msg.caption
args = raw_text.split(None, 2) # use python's maxsplit to separate cmd and args
note_name = args[1]
buttons = []
# determine what the contents of the filter are - text, image, sticker, etc
if len(args) >= 3:
offset = len(args[2]) - len(
raw_text
) # set correct offset relative to command + notename
text, buttons = button_markdown_parser(
args[2],
entities=msg.parse_entities() or msg.parse_caption_entities(),
offset=offset,
)
if buttons:
data_type = Types.BUTTON_TEXT
else:
data_type = Types.TEXT
elif msg.reply_to_message:
entities = msg.reply_to_message.parse_entities()
msgtext = msg.reply_to_message.text or msg.reply_to_message.caption
if len(args) >= 2 and msg.reply_to_message.text: # not caption, text
text, buttons = button_markdown_parser(msgtext, entities=entities)
if buttons:
data_type = Types.BUTTON_TEXT
else:
data_type = Types.TEXT
elif msg.reply_to_message.sticker:
content = msg.reply_to_message.sticker.file_id
data_type = Types.STICKER
elif msg.reply_to_message.document:
content = msg.reply_to_message.document.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.DOCUMENT
elif msg.reply_to_message.photo:
content = msg.reply_to_message.photo[-1].file_id # last elem = best quality
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.PHOTO
elif msg.reply_to_message.audio:
content = msg.reply_to_message.audio.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.AUDIO
elif msg.reply_to_message.voice:
content = msg.reply_to_message.voice.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.VOICE
elif msg.reply_to_message.video:
content = msg.reply_to_message.video.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.VIDEO
return note_name, text, data_type, content, buttons
# note: add own args?
def get_welcome_type(msg: Message):
data_type = None
content = None
text = ""
try:
if msg.reply_to_message:
if msg.reply_to_message.text:
args = msg.reply_to_message.text
else:
args = msg.reply_to_message.caption
else:
args = msg.text.split(
None, 1
) # use python's maxsplit to separate cmd and args
except AttributeError:
args = False
if msg.reply_to_message and msg.reply_to_message.sticker:
content = msg.reply_to_message.sticker.file_id
text = None
data_type = Types.STICKER
elif msg.reply_to_message and msg.reply_to_message.document:
content = msg.reply_to_message.document.file_id
text = msg.reply_to_message.caption
data_type = Types.DOCUMENT
elif msg.reply_to_message and msg.reply_to_message.photo:
content = msg.reply_to_message.photo[-1].file_id # last elem = best quality
text = msg.reply_to_message.caption
data_type = Types.PHOTO
elif msg.reply_to_message and msg.reply_to_message.audio:
content = msg.reply_to_message.audio.file_id
text = msg.reply_to_message.caption
data_type = Types.AUDIO
elif msg.reply_to_message and msg.reply_to_message.voice:
content = msg.reply_to_message.voice.file_id
text = msg.reply_to_message.caption
data_type = Types.VOICE
elif msg.reply_to_message and msg.reply_to_message.video:
content = msg.reply_to_message.video.file_id
text = msg.reply_to_message.caption
data_type = Types.VIDEO
elif msg.reply_to_message and msg.reply_to_message.video_note:
content = msg.reply_to_message.video_note.file_id
text = None
data_type = Types.VIDEO_NOTE
buttons = []
# determine what the contents of the filter are - text, image, sticker, etc
if args:
if msg.reply_to_message:
argumen = (
msg.reply_to_message.caption if msg.reply_to_message.caption else ""
)
offset = 0 # offset is no need since target was in reply
entities = msg.reply_to_message.parse_entities()
else:
argumen = args[1]
offset = len(argumen) - len(
msg.text
) # set correct offset relative to command + notename
entities = msg.parse_entities()
text, buttons = button_markdown_parser(
argumen, entities=entities, offset=offset
)
if not data_type:
if text and buttons:
data_type = Types.BUTTON_TEXT
elif text:
data_type = Types.TEXT
return text, data_type, content, buttons
def get_filter_type(msg: Message):
if not msg.reply_to_message and msg.text and len(msg.text.split()) >= 3:
content = None
text = msg.text.split(None, 2)[2]
data_type = Types.TEXT
elif (
msg.reply_to_message
and msg.reply_to_message.text
and len(msg.text.split()) >= 2
):
content = None
text = msg.reply_to_message.text
data_type = Types.TEXT
elif msg.reply_to_message and msg.reply_to_message.sticker:
content = msg.reply_to_message.sticker.file_id
text = None
data_type = Types.STICKER
elif msg.reply_to_message and msg.reply_to_message.document:
content = msg.reply_to_message.document.file_id
text = msg.reply_to_message.caption
data_type = Types.DOCUMENT
elif msg.reply_to_message and msg.reply_to_message.photo:
content = msg.reply_to_message.photo[-1].file_id # last elem = best quality
text = msg.reply_to_message.caption
data_type = Types.PHOTO
elif msg.reply_to_message and msg.reply_to_message.audio:
content = msg.reply_to_message.audio.file_id
text = msg.reply_to_message.caption
data_type = Types.AUDIO
elif msg.reply_to_message and msg.reply_to_message.voice:
content = msg.reply_to_message.voice.file_id
text = msg.reply_to_message.caption
data_type = Types.VOICE
elif msg.reply_to_message and msg.reply_to_message.video:
content = msg.reply_to_message.video.file_id
text = msg.reply_to_message.caption
data_type = Types.VIDEO
elif msg.reply_to_message and msg.reply_to_message.video_note:
content = msg.reply_to_message.video_note.file_id
text = None
data_type = Types.VIDEO_NOTE
else:
text = None
data_type = None
content = None
return text, data_type, content
|
StarcoderdataPython
|
57146
|
<reponame>dungdinhanh/datafreeinverse
import numpy as np
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from utils import setup_dir
#%matplotlib inline
print("Using PyTorch Version %s" %torch.__version__)
from network import network
basedir = setup_dir('outputs/')
''' Training data generation '''
np.random.seed(0)
torch.manual_seed(0)
X, Y = make_moons(500, noise=0.05)
# Split into test and training data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=73)
'''
plt.figure(figsize=(12,8))
plt.scatter(X_train[:,0], X_train[:,1], c=Y_train)
plt.title('Moon Data')
plt.show()
'''
''' algorithm's options '''
baseline = False #use baseline deep inversion.
discrete_label = True #use discrete labels or continous labels [true is best].
use_generator = True #use the generator with deepinversion model.
knowledge_distill = 0.00 #transfer knowledge to student network (default 0.02).
noisify_network = 0.00 #add noise to the pre-trained classifier, this value is the weight of noise. (default 0.1).
mutual_info = 0.00 #reconstruct the latent samples to be idential to the original latent inputs (default 0.1).
batchnorm_transfer = 0.00 #transfer batchnorm from classifier to generator (default 0.02).
use_discriminator = 0.00 #adversarial training with the discriminator based on batch-norm (default 0.01).
''' hyper-parameters '''
n_samples = 128 #batch size
if use_generator == True:
lr = 0.001 #* (n_samples / 128.)**0.5
else:
lr = 0.025
net = network(X, Y, n_hidden=15, lr=lr, n_samples=n_samples, basedir=basedir)
''' training the network '''
net.train(n_iters=1000, plotfig=False)
#net.plot_training_results()
# plot decision boundary
#net.plot_testing_results(X_train, Y_train)
#net.plot_testing_results(X_test, Y_test)
'''
Optimized with deep inversion, important hyper-parameters and observations:
1. The learning rate of classifier.
2. The learning rate of optimizer for generator or samples.
3. The size of generator network (finding the correct size is important).
4. The batch-norm normalization.
5. Adding noise to the pre-trained network is not too helpful on 2d-toy dataset.
6. Knowledge distillation does not help much on 2d-toy dataset.
'''
if baseline == True:
net.deepinversion(use_generator = use_generator, \
discrete_label = discrete_label, \
knowledge_distill = knowledge_distill, \
n_iters=1000)
else:
net.deepinversion_improved(use_generator = use_generator, \
discrete_label = discrete_label, \
knowledge_distill = knowledge_distill, \
noisify_network = noisify_network, \
mutual_info = mutual_info, \
batchnorm_transfer = batchnorm_transfer,\
use_discriminator = use_discriminator, \
n_iters = 1000)
|
StarcoderdataPython
|
5174
|
import os.path
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.TCCShellNub import TCCShellNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'tcc'
def start(poller):
stop()
initCmds = ('show version', 'show users', 'show time', 'show status', 'show inst/full',
'show object/full', 'show axisconfig', 'show focus', 'axis status', 'show scale',
'mir status')
safeCmds = r'(^show )|(status$)'
d = ASCIIReplyDecoder(EOL='\r', stripChars='\n', CIDfirst=False, debug=1)
e = ASCIICmdEncoder(EOL='\r', debug=1, CIDfirst=False)
tcc = TCCShellNub(poller, [
'/usr/bin/ssh', '-1', '-e', 'none', '-a', '-x', '-i',
os.path.expanduser('~/.ssh/tron'), '-T', 'tccuser@tcc25m'
],
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
name=name,
encoder=e,
decoder=d,
logDir=os.path.join(g.logDir, name),
debug=1)
hub.addActor(tcc)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
|
StarcoderdataPython
|
12152
|
<filename>app/admin.py
from django.contrib import admin
from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel
admin.site.register(Placement_Company_Detail)
admin.site.register(Profile)
admin.site.register(StudentBlogModel)
admin.site.register(ResorcesModel)
|
StarcoderdataPython
|
143492
|
<filename>server/cough2.py
import librosa
import pandas as pd
import os
from keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import load_model
import numpy as np
model = load_model('weights1109_4.best.basic_cnn.hdf5')
featuresdf = pd.read_pickle("featuresdf.pkl")
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical
def coughdetect(coughfile):
# Convert features and corresponding classification labels into numpy arrays
X = np.array(featuresdf.feature.tolist())
y = np.array(featuresdf.class_label.tolist())
# Encode the classification labels
le = LabelEncoder()
yy = to_categorical(le.fit_transform(y))
max_pad_len = 1000
num_rows = 40
num_columns = 1000
num_channels = 1
def extract_features(file_name):
try:
audio, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
mfccs = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40)
pad_width = max_pad_len - mfccs.shape[1]
mfccs = np.pad(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant')
except Exception as e:
print("Error encountered while parsing file: ", file_name)
return None
return mfccs
def print_prediction(file_name):
prediction_feature = extract_features(file_name)
prediction_feature = prediction_feature.reshape(1, num_rows, num_columns, num_channels)
predicted_vector = model.predict_classes(prediction_feature)
predicted_class = le.inverse_transform(predicted_vector)
# print("The predicted class is:", predicted_class[0], '---','\n')
# return predicted_class[0]
# print(predicted_class[0])
if (predicted_class[0]=="cough") :
return 1
else:
return 0
# return predicted_class[0]=="cough"
# predicted_proba_vector = model.predict_proba(prediction_feature)
# predicted_proba = predicted_proba_vector[0]
# for i in range(len(predicted_proba)):
# category = le.inverse_transform(np.array([i]))
# print(category[0], "\t\t : ", format(predicted_proba[i], '.32f') )
return print_prediction(coughfile)==1
|
StarcoderdataPython
|
3232879
|
<reponame>TuDatTr/P4STA<gh_stars>10-100
# Copyright 2019-present <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import rpyc
import subprocess
import time
import traceback
import zipfile
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render
from pathlib import Path
# custom python modules
from core import P4STA_utils
# globals
from management_ui import globals
def setup_devices(request):
if request.method == "POST":
print(request.POST)
setup_devices_cfg = {}
if request.POST.get("enable_stamper") == "on":
setup_devices_cfg["stamper_user"] = request.POST["stamper_user"]
setup_devices_cfg["stamper_ssh_ip"] = request.POST["stamper_ip"]
setup_devices_cfg["selected_stamper"] = request.POST[
"selected_stamper"]
target_cfg = globals.core_conn.root.get_target_cfg(
setup_devices_cfg["selected_stamper"])
setup_devices_cfg["target_specific_dict"] = {}
if "config" in target_cfg \
and "stamper_specific" in target_cfg["config"]:
for cfg in target_cfg["config"]["stamper_specific"]:
if cfg["target_key"] in request.POST:
setup_devices_cfg["target_specific_dict"][
cfg["target_key"]] = request.POST[
cfg["target_key"]]
if request.POST.get(
"enable_ext_host") == "on" and "ext_host_user" in request.POST:
setup_devices_cfg["ext_host_user"] = request.POST["ext_host_user"]
setup_devices_cfg["ext_host_ssh_ip"] = request.POST["ext_host_ip"]
setup_devices_cfg["selected_extHost"] = request.POST[
"selected_extHost"]
setup_devices_cfg["selected_loadgen"] = request.POST[
"selected_loadgen"]
setup_devices_cfg["loadgens"] = []
for i in range(1, 99):
if ("loadgen_user_" + str(i)) in request.POST:
loadgen = {"loadgen_user": request.POST[
"loadgen_user_" + str(i)],
"loadgen_ssh_ip": request.POST[
"loadgen_ip_" + str(i)]}
setup_devices_cfg["loadgens"].append(loadgen)
print("===================================================")
print("=== Setup Device Config from management UI: ======")
print("===================================================")
print(setup_devices_cfg)
# only create install script if button is clicked
if "create_setup_script_button" in request.POST:
globals.core_conn.root.write_install_script(setup_devices_cfg)
# now write config.json with new data
if request.POST.get("enable_stamper") == "on":
path = globals.core_conn.root.get_template_cfg_path(
request.POST["selected_stamper"])
cfg = globals.core_conn.root.open_cfg_file(path)
cfg["stamper_ssh"] = request.POST["stamper_ip"]
cfg["stamper_user"] = request.POST["stamper_user"]
if request.POST.get(
"enable_ext_host") == "on" \
and "ext_host_user" in request.POST:
cfg["ext_host_user"] = request.POST["ext_host_user"]
cfg["ext_host_ssh"] = request.POST["ext_host_ip"]
cfg["selected_extHost"] = request.POST["selected_extHost"]
cfg["selected_loadgen"] = request.POST["selected_loadgen"]
# add all loadgens to loadgen group 1 and 2
cfg["loadgen_groups"] = [
{"group": 1, "loadgens": [], "use_group": "checked"},
{"group": 2, "loadgens": [], "use_group": "checked"}]
grp1 = setup_devices_cfg["loadgens"][
len(setup_devices_cfg["loadgens"]) // 2:]
grp2 = setup_devices_cfg["loadgens"][
:len(setup_devices_cfg["loadgens"]) // 2]
id_c = 1
for loadgen in grp1:
cfg["loadgen_groups"][0]["loadgens"].append(
{"id": id_c, "loadgen_iface": "", "loadgen_ip": "",
"loadgen_mac": "", "real_port": "",
"p4_port": "", "ssh_ip": loadgen["loadgen_ssh_ip"],
"ssh_user": loadgen["loadgen_user"]})
id_c = id_c + 1
id_c = 1
for loadgen in grp2:
cfg["loadgen_groups"][1]["loadgens"].append(
{"id": id_c, "loadgen_iface": "", "loadgen_ip": "",
"loadgen_mac": "", "real_port": "",
"p4_port": "", "ssh_ip": loadgen["loadgen_ssh_ip"],
"ssh_user": loadgen["loadgen_user"]})
id_c = id_c + 1
if globals.core_conn.root.check_first_run():
P4STA_utils.write_config(cfg)
globals.core_conn.root.first_run_finished()
return HttpResponseRedirect("/run_setup_script/")
# cancel case
globals.core_conn.root.first_run_finished()
return HttpResponseRedirect("/")
else: # request the page
print("### Setup Devices #####")
params = {}
params["stampers"] = P4STA_utils.flt(
globals.core_conn.root.get_all_targets())
params["stampers"].sort(key=lambda y: y.lower())
params["extHosts"] = P4STA_utils.flt(
globals.core_conn.root.get_all_extHost())
params["extHosts"].sort(key=lambda y: y.lower())
# bring python on position 1
if "PythonExtHost" in params["extHosts"]:
params["extHosts"].insert(0, params["extHosts"].pop(
params["extHosts"].index("PythonExtHost")))
params["loadgens"] = P4STA_utils.flt(
globals.core_conn.root.get_all_loadGenerators())
params["loadgens"].sort(key=lambda y: y.lower())
params["isFirstRun"] = globals.core_conn.root.check_first_run()
all_target_cfg = {}
for stamper in params["stampers"]:
# directly converting to json style because True
# would be uppercase otherwise => JS needs "true"
all_target_cfg[stamper] = P4STA_utils.flt(
globals.core_conn.root.get_stamper_target_obj(
target_name=stamper).target_cfg)
params["all_target_cfg"] = json.dumps(all_target_cfg)
return render(request, "middlebox/setup_page.html", {**params})
def skip_setup_redirect_to_config(request):
print("First run finished (skip setup): skip_setup_redirect_to_config")
globals.core_conn.root.first_run_finished()
return HttpResponseRedirect("/")
def run_setup_script(request):
def bash_command(cmd):
subprocess.Popen(['/bin/bash', '-c', cmd])
bash_command(
"sudo pkill shellinaboxd; shellinaboxd -p 4201 --disable-ssl "
"-u $(id -u) --service /:${USER}:${USER}:${PWD}:./core/scripts"
"/spawn_install_server_bash.sh")
return render(request, "middlebox/run_setup_script_page.html", {})
def stop_shellinabox_redirect_to_config(request):
def bash_command(cmd):
subprocess.Popen(['/bin/bash', '-c', cmd])
bash_command("sudo pkill shellinaboxd;")
print("stop_shellinabox_redirect_to_config")
return HttpResponseRedirect("/")
def setup_ssh_checker(request):
ssh_works = False
ping_works = (os.system("timeout 1 ping " + request.POST[
"ip"] + " -c 1") == 0) # if ping works it should be true
if ping_works:
answer = P4STA_utils.execute_ssh(request.POST["user"],
request.POST["ip"], "echo ssh_works")
answer = list(answer)
if len(answer) > 0 and answer[0] == "ssh_works":
ssh_works = True
return JsonResponse({"ping_works": ping_works, "ssh_works": ssh_works})
|
StarcoderdataPython
|
3379077
|
<reponame>zacksoliman/conditional-image-generation<gh_stars>0
# Original Version: <NAME> (http://carpedm20.github.io)
# Source: https://raw.githubusercontent.com/carpedm20/DCGAN-tensorflow/master/model.py
# Modifications for image inpainting: <NAME>
import os
import scipy.misc
import numpy as np
from model import DCGAN
from utils import pp, visualize, to_json
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("image_size", 64, "The size of image to use")
flags.DEFINE_string("dataset", "../../../datasets/coco/train2014", "Dataset directory.")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_string("z_dist", "gaussian", "Distribution to sample noise from [gaussian]")
FLAGS = flags.FLAGS
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, z_dist=FLAGS.z_dist, batch_size=FLAGS.batch_size,
checkpoint_dir=FLAGS.checkpoint_dir)
dcgan.train(FLAGS)
|
StarcoderdataPython
|
45749
|
<filename>api/utils/custom_jwt.py
import datetime
from rest_framework_jwt.settings import api_settings
from api.serializers import UserSerializer
def jwt_response_payload_handler(token, user=None, request=None):
""" Custom response payload handler.
This function controlls the custom payload after login or token refresh. This data is returned through the web API.
"""
return {
'access_token': token,
'token_type': 'Bearer',
'expired_at': datetime.datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,
'user': UserSerializer(user, context={'request': request}).data
}
|
StarcoderdataPython
|
16729
|
<reponame>shansb/boss_grabbing<filename>boss_grabbing/pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from boss_grabbing.sqlite import Sqlite
class BossGrabbingPipeline(object):
def process_item(self, item, spider):
print("process")
count = Sqlite.select_db(item['url'])[0][0]
print("count:" + str(count))
if count == 0:
Sqlite.insert_db(item)
return item
|
StarcoderdataPython
|
117774
|
<reponame>delepoulle/rawls
import shutil
from setuptools import setup
import distutils.command.check
class TestCommand(distutils.command.check.check):
"""Custom test command."""
def run(self):
# run tests using doctest
import doctest
# filters folder
from rawls import scene
from rawls import rawls
from rawls import stats
from rawls import utils
print("==============================")
print("Runs test command...")
# pass test using doctest
doctest.testmod(scene)
print('Scene module')
doctest.testmod(rawls)
print('Rawls module')
doctest.testmod(stats)
print('Stats module')
doctest.testmod(utils)
print('Utils module')
distutils.command.check.check.run(self)
setup(
name='rawls',
version='1.1.7',
description='RAW Light Simulation file reader/converter package',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Utilities'
],
url='https://github.com/prise-3d/rawls',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['rawls', 'rawls.scene'],
install_requires=[
'numpy',
'Pillow',
'scipy',
'astropy',
'ipfml',
'pandas',
'OpenEXR'
],
cmdclass={
'test': TestCommand,
},
zip_safe=False)
|
StarcoderdataPython
|
1648559
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlexUsageComponentInstanceJson(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'guid': 'str',
'region': 'str',
'provider': 'str',
'flex_subscription_id': 'str',
'creation_time': 'str',
'usage_date': 'str',
'peak_usage': 'int'
}
attribute_map = {
'guid': 'guid',
'region': 'region',
'provider': 'provider',
'flex_subscription_id': 'flexSubscriptionId',
'creation_time': 'creationTime',
'usage_date': 'usageDate',
'peak_usage': 'peakUsage'
}
def __init__(self, guid=None, region=None, provider=None, flex_subscription_id=None, creation_time=None, usage_date=None, peak_usage=None):
"""
FlexUsageComponentInstanceJson - a model defined in Swagger
"""
self._guid = None
self._region = None
self._provider = None
self._flex_subscription_id = None
self._creation_time = None
self._usage_date = None
self._peak_usage = None
if guid is not None:
self.guid = guid
if region is not None:
self.region = region
if provider is not None:
self.provider = provider
if flex_subscription_id is not None:
self.flex_subscription_id = flex_subscription_id
if creation_time is not None:
self.creation_time = creation_time
if usage_date is not None:
self.usage_date = usage_date
if peak_usage is not None:
self.peak_usage = peak_usage
@property
def guid(self):
"""
Gets the guid of this FlexUsageComponentInstanceJson.
:return: The guid of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""
Sets the guid of this FlexUsageComponentInstanceJson.
:param guid: The guid of this FlexUsageComponentInstanceJson.
:type: str
"""
self._guid = guid
@property
def region(self):
"""
Gets the region of this FlexUsageComponentInstanceJson.
:return: The region of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""
Sets the region of this FlexUsageComponentInstanceJson.
:param region: The region of this FlexUsageComponentInstanceJson.
:type: str
"""
self._region = region
@property
def provider(self):
"""
Gets the provider of this FlexUsageComponentInstanceJson.
:return: The provider of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""
Sets the provider of this FlexUsageComponentInstanceJson.
:param provider: The provider of this FlexUsageComponentInstanceJson.
:type: str
"""
self._provider = provider
@property
def flex_subscription_id(self):
"""
Gets the flex_subscription_id of this FlexUsageComponentInstanceJson.
:return: The flex_subscription_id of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._flex_subscription_id
@flex_subscription_id.setter
def flex_subscription_id(self, flex_subscription_id):
"""
Sets the flex_subscription_id of this FlexUsageComponentInstanceJson.
:param flex_subscription_id: The flex_subscription_id of this FlexUsageComponentInstanceJson.
:type: str
"""
self._flex_subscription_id = flex_subscription_id
@property
def creation_time(self):
"""
Gets the creation_time of this FlexUsageComponentInstanceJson.
:return: The creation_time of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""
Sets the creation_time of this FlexUsageComponentInstanceJson.
:param creation_time: The creation_time of this FlexUsageComponentInstanceJson.
:type: str
"""
self._creation_time = creation_time
@property
def usage_date(self):
"""
Gets the usage_date of this FlexUsageComponentInstanceJson.
:return: The usage_date of this FlexUsageComponentInstanceJson.
:rtype: str
"""
return self._usage_date
@usage_date.setter
def usage_date(self, usage_date):
"""
Sets the usage_date of this FlexUsageComponentInstanceJson.
:param usage_date: The usage_date of this FlexUsageComponentInstanceJson.
:type: str
"""
self._usage_date = usage_date
@property
def peak_usage(self):
"""
Gets the peak_usage of this FlexUsageComponentInstanceJson.
:return: The peak_usage of this FlexUsageComponentInstanceJson.
:rtype: int
"""
return self._peak_usage
@peak_usage.setter
def peak_usage(self, peak_usage):
"""
Sets the peak_usage of this FlexUsageComponentInstanceJson.
:param peak_usage: The peak_usage of this FlexUsageComponentInstanceJson.
:type: int
"""
self._peak_usage = peak_usage
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlexUsageComponentInstanceJson):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
StarcoderdataPython
|
57498
|
#32. Faça um programa que leia um ano qualquer e mostre se ele é bissexto.
import datetime
def Main032():
hoje = int(input('Que ano gostaria de saber se é bissexto?'))
#OU hoje = datetime.date.today().year
if hoje % 4 == 0 and hoje % 100 != 0 or hoje % 400 == 0:
print(f'O ano {hoje} é Bissexto!')
else:
print(f'O ano {hoje} NÃO é Bissexto!')
Main032()
|
StarcoderdataPython
|
106875
|
<reponame>SHIVJITH/Odoo_Machine_Test<gh_stars>0
# -*- coding: utf-8 -*-
from collections import defaultdict
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class Base(models.AbstractModel):
_inherit = 'base'
def _valid_field_parameter(self, field, name):
return name == 'sparse' or super()._valid_field_parameter(field, name)
class IrModelFields(models.Model):
_inherit = 'ir.model.fields'
ttype = fields.Selection(selection_add=[
('serialized', 'serialized'),
], ondelete={'serialized': 'cascade'})
serialization_field_id = fields.Many2one('ir.model.fields', string='Serialization Field',
ondelete='cascade', domain="[('ttype','=','serialized'), ('model_id', '=', model_id)]",
help="If set, this field will be stored in the sparse structure of the "
"serialization field, instead of having its own database column. "
"This cannot be changed after creation.",
)
def write(self, vals):
# Limitation: renaming a sparse field or changing the storing system is
# currently not allowed
if 'serialization_field_id' in vals or 'name' in vals:
for field in self:
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise UserError(_('Changing the storing system for field "%s" is not allowed.', field.name))
if field.serialization_field_id and (field.name != vals['name']):
raise UserError(_('Renaming sparse field "%s" is not allowed', field.name))
return super(IrModelFields, self).write(vals)
def _reflect_fields(self, model_names):
super()._reflect_fields(model_names)
# set 'serialization_field_id' on sparse fields; it is done here to
# ensure that the serialized field is reflected already
cr = self._cr
# retrieve existing values
query = """
SELECT model, name, id, serialization_field_id
FROM ir_model_fields
WHERE model IN %s
"""
cr.execute(query, [tuple(model_names)])
existing = {row[:2]: row[2:] for row in cr.fetchall()}
# determine updates, grouped by value
updates = defaultdict(list)
for model_name in model_names:
for field_name, field in self.env[model_name]._fields.items():
field_id, current_value = existing[(model_name, field_name)]
try:
value = existing[(model_name, field.sparse)][0] if field.sparse else None
except KeyError:
msg = _("Serialization field %r not found for sparse field %s!")
raise UserError(msg % (field.sparse, field))
if current_value != value:
updates[value].append(field_id)
if not updates:
return
# update fields
query = "UPDATE ir_model_fields SET serialization_field_id=%s WHERE id IN %s"
for value, ids in updates.items():
cr.execute(query, [value, tuple(ids)])
records = self.browse(id_ for ids in updates.values() for id_ in ids)
self.pool.post_init(records.modified, ['serialization_field_id'])
def _instanciate_attrs(self, field_data):
attrs = super(IrModelFields, self)._instanciate_attrs(field_data)
if attrs and field_data.get('serialization_field_id'):
serialization_record = self.browse(field_data['serialization_field_id'])
attrs['sparse'] = serialization_record.name
return attrs
class TestSparse(models.TransientModel):
_name = 'sparse_fields.test'
_description = 'Sparse fields Test'
data = fields.Serialized()
boolean = fields.Boolean(sparse='data')
integer = fields.Integer(sparse='data')
float = fields.Float(sparse='data')
char = fields.Char(sparse='data')
selection = fields.Selection([('one', 'One'), ('two', 'Two')], sparse='data')
partner = fields.Many2one('res.partner', sparse='data')
|
StarcoderdataPython
|
122657
|
<reponame>gwangyi/pygritia
"""Pavement for Pygritia"""
import shlex
import sys
import paver.doctools # pylint: disable=unused-import
import paver.virtual # pylint: disable=unused-import
from paver.easy import * # pylint: disable=unused-wildcard-import,wildcard-import
from paver.options import Bunch
from paver.path import path
from paver.setuputils import setup
if sys.version_info < (3, 7):
sys.exit("Pygritia requires Python >= 3.7")
options = environment.options # pylint: disable=invalid-name
HERE = path(__file__).dirname().abspath()
setup_params = dict( # pylint: disable=invalid-name
name="pygritia",
version="0.2.0",
description="Pygritia: Lazy Symbolic Evaluation",
long_description=open(HERE / 'README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/gwangyi/pygritia",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
package_data={'pygritia': ['py.typed']},
packages=['pygritia'],
)
options(
minilib=Bunch(
extra_files=['doctools'],
versioned_name=False,
extra_packages=[],
),
sphinx=Bunch(
docroot='sphinx',
builddir='build',
sourcedir='source',
apidoc_opts=['-e'],
),
)
setup(**setup_params)
@task
@no_help
def env():
"""Ready env"""
import os
def pathify(key, *args):
paths = os.environ.get(key, '').split(os.path.pathsep)
os.environ[key] = os.path.pathsep.join(list(args) + paths)
pathify('MYPYPATH', HERE)
pathify('PYTHONPATH', HERE, *sys.path)
@task
@needs(['env'])
def test():
"""Run unittest"""
try:
import pytest # type: ignore
except ImportError:
raise BuildFailure('install pytest to test')
pytestopts = ['--cov=' + setup_params['name'], '--cov-report=html', '--cov-report=term']
dry('pytest {}'.format(' '.join(pytestopts)), pytest.main, pytestopts)
@task
@needs(['env'])
def typecheck():
"""Run mypy"""
try:
import mypy.main
except ImportError:
raise BuildFailure('install mypy to typecheck')
mypyopts = ['--strict', '-p', setup_params['name']]
dry('mypy {}'.format(' '.join(mypyopts)), mypy.main.main, None, mypyopts)
@task
@needs(['env'])
def lint():
"""Run mypy and pylint"""
try:
import pylint.lint # type: ignore
except ImportError:
raise BuildFailure('install pylint to lint')
pylintopts = ['pavement.py', 'paverlib', setup_params['name']]
dry('pylint {}'.format(' '.join(pylintopts)), pylint.lint.Run, pylintopts)
@task
@needs(['env'])
@consume_args
def shell(args):
"""Run shell"""
import os
sh(' '.join(shlex.quote(arg) for arg in args)
if args else os.environ.get('SHELL', '/bin/bash'))
@task
@needs(['env'])
def nvim():
"""Launch neovim with env"""
import os
os.environ['BULLETTRAIN_VIRTUALENV_PREFIX'] = 'py'
sh('nvim "+bot sp +terminal" +NERDTreeToggle')
@task
@needs(['paverlib.doctools.apidoc', 'paverlib.doctools.html'])
def html():
"""Override html task to copy result to 'docs' directory"""
import shutil
try:
shutil.rmtree(HERE / 'docs')
except FileNotFoundError:
pass
shutil.copytree(
HERE /
options.sphinx.docroot /
options.sphinx.builddir /
'html',
HERE /
'docs')
@task
@needs('generate_setup', 'minilib', 'setuptools.command.sdist')
def sdist():
"""Overrides sdist to make sure that our setup.py is generated."""
|
StarcoderdataPython
|
43248
|
from __future__ import print_function
from numpy import pi, arange, sin, cos
import numpy as np
import os.path
import time
from bokeh.objects import (Plot, DataRange1d, LinearAxis, DatetimeAxis,
ColumnDataSource, Glyph, PanTool, WheelZoomTool)
from bokeh.glyphs import Circle
from bokeh import session
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
glyph_renderer = Glyph(
data_source=source,
xdata_range=xdr,
ydata_range=ydr,
glyph=circle,
)
plot = Plot(x_range=xdr, y_range=ydr, data_sources=[source],
border=80)
xaxis = DatetimeAxis(plot=plot, dimension=0, location="min")
yaxis = LinearAxis(plot=plot, dimension=1, location="min")
pantool = PanTool(dataranges=[xdr, ydr], dimensions=["width", "height"])
wheelzoomtool = WheelZoomTool(dataranges=[xdr, ydr], dimensions=("width", "height"))
plot.renderers.append(glyph_renderer)
plot.tools = [pantool, wheelzoomtool]
sess = session.HTMLFileSession("dateaxis.html")
sess.add(plot, recursive=True)
sess.plotcontext.children.append(plot)
sess.save(js="absolute", css="absolute")
sess.dumpjson(file="dateaxis.json")
print("Wrote %s" % sess.filename)
if __name__ == "__main__":
sess.view()
|
StarcoderdataPython
|
1715705
|
import torch
from torch import nn
import torch.nn.functional as F
from BahdanauAttnDecoderRNN import BahdanauAttnDecoderRNN
from CNNModels import CnnTextClassifier
class CtrlGenModel(nn.Module):
def __init__(self,config,vocab_size,batch_size,weights_matrix):
super(CtrlGenModel,self).__init__()
#64*16(17)*100
embed_size = config.model["embedder"]["dim"]
hidden_size = config.model["encoder"]["rnn_cell"]["kwargs"]["num_units"]
self.hidden_size = hidden_size
num_layers = 2
self.softmax = F.log_softmax
self.embedder = nn.Embedding(vocab_size, embed_size)
self.embedder.load_state_dict({'weight': weights_matrix})
#Classifier pretrained Embedding
self.clas_embedder = nn.Embedding(vocab_size,embed_size)
self.clas_embedder.load_state_dict({'weight': weights_matrix})
self.vocab_size = vocab_size
self.vocab_tensor = torch.LongTensor([i for i in range(vocab_size)])
self.batch_size = batch_size
#The number layer can be two
self.encoder = nn.GRU(input_size = embed_size,hidden_size = hidden_size,dropout = 0.5,batch_first = True)
self.dropout = nn.Dropout(0.5)
self.dim_c = config.model["dim_c"]
self.label_connector = nn.Sequential(nn.Linear(1,hidden_size),nn.Linear(hidden_size,self.dim_c))
self.connector = nn.Linear(700,hidden_size)
self.decoder = BahdanauAttnDecoderRNN(hidden_size,embed_size,vocab_size,dropout_p=0.5)
#Classifier
self.classifier = CnnTextClassifier(num_filters = 128,vocab_size = vocab_size,emb_size = embed_size,num_classes = 2)
self.lm = nn.GRU(input_size = embed_size,hidden_size = hidden_size,dropout = 0.5,batch_first = True)
self.lm_output = nn.Linear(hidden_size,vocab_size)
self.lm_embedder = nn.Embedding(vocab_size,embed_size)
self.lm_embedder.load_state_dict({'weight': weights_matrix})
def forward(self, inputs,sentence_length,if_dis = False,if_eval = False,if_lm = False,gamma = 1):
if if_dis:
probs,classes = self.classifier(self.clas_embedder(inputs["text_ids"]))
return probs,classes
#Train the language model
#Initial hidden state should be (num_layers * num_directions, batch, hidden_size)
if if_lm:
hidden_state_1 = inputs["hidden"]
hidden_state_2 = inputs["labels"].view(-1,1).float().expand(self.batch_size,(self.hidden_size-300))
hidden_state_lm = torch.cat((hidden_state_2,hidden_state_1),1).unsqueeze(0)
text_embedding = self.lm_embedder(inputs["text_ids"])
lm_outputs,_ = self.lm(text_embedding,hidden_state_lm)
lm_outputs = self.lm_output(lm_outputs)
return lm_outputs
input_length = len(inputs["text_ids"])
# Change the vocab_tensor
vocab_tensor = self.vocab_tensor.expand(input_length,self.vocab_size)
enc_text_ids = inputs["text_ids"][:,1:]
#enc_inputs shape(64,16,100)
#enc_outputs shape(64,16,700)
#final_state shape(1,64,700)
text_embedding = self.embedder(enc_text_ids)
enc_outputs,final_state = self.encoder(text_embedding)
#Get the final_state
z = final_state[0,:,self.dim_c:]
labels = inputs["labels"].view(-1,1).float()
c = self.label_connector(labels)
c_ = self.label_connector(1-labels)
h = torch.cat((c,z),1)
h_ = torch.cat((c_,z),1)
#h 64*700
#(self,embedding, word_input, initial_state, encoder_outputs):
#get the regular decoder result each time using the target input as inupt to calculate the loss_ae
decoder_hidden = self.connector(h).unsqueeze(0)
decoder_outputs = torch.Tensor(sentence_length,input_length,self.vocab_size)
for di in range(len(inputs["text_ids"])):
decoder_output,decoder_hidden = self.decoder(embedding = self.embedder,word_input = inputs["text_ids"][:,di], initial_state = decoder_hidden ,encoder_outputs= enc_outputs)
#print("decoder_output: ",decoder_output.shape)
decoder_outputs[di] = decoder_output
#soft_output.sample id called soft_outputs 64 16 9657
if if_eval:
decoder_gumbel_hidden = self.connector(h_).unsqueeze(0)
soft_outputs_ = torch.Tensor(sentence_length,input_length,self.vocab_size)
decoder_soft_outputs,decoder_gumbel_hidden = self.decoder(embedding = self.embedder,word_input = inputs["text_ids"][:,0],initial_state = decoder_gumbel_hidden,encoder_outputs = enc_outputs,gumbel = True,gamma = gamma)
soft_outputs_[0] = decoder_soft_outputs
for di in range(1,sentence_length):
decoder_soft_outputs,decoder_gumbel_hidden = self.decoder(embedding = self.embedder,word_input = torch.argmax(decoder_soft_outputs,1),initial_state = decoder_gumbel_hidden,encoder_outputs = enc_outputs,gumbel = True,gamma = gamma)
soft_outputs_[di] = decoder_soft_outputs
clas_input = torch.bmm(soft_outputs_.transpose(0,1),self.clas_embedder(vocab_tensor))
probs,classes = self.classifier(clas_input)
else:
decoder_gumbel_hidden = self.connector(h_).unsqueeze(0)
soft_outputs_ = torch.Tensor(sentence_length,input_length,self.vocab_size)
decoder_soft_outputs,decoder_gumbel_hidden = self.decoder(embedding = self.embedder,word_input = inputs["text_ids"][:,0],initial_state = decoder_gumbel_hidden,encoder_outputs = enc_outputs,gumbel = True,gamma = gamma)
soft_outputs_[0] = decoder_soft_outputs
for di in range(1,sentence_length):
decoder_soft_outputs,decoder_gumbel_hidden = self.decoder(embedding = self.embedder,word_input = torch.argmax(decoder_soft_outputs,1),initial_state = decoder_gumbel_hidden,encoder_outputs = enc_outputs,gumbel = True,gamma = gamma)
soft_outputs_[di] = decoder_soft_outputs
soft_outputs_new = soft_outputs_.transpose(0,1)
#soft_outputs_new is 64*17*9431
clas_input = torch.bmm(soft_outputs_new,self.clas_embedder(vocab_tensor))
probs,classes = self.classifier(clas_input)
#language model for the input remove the last output which is generated by EOS and cat the first BOS
hidden_state_1 = inputs["hidden2"]
hidden_state_2 =(1-inputs["labels"]).view(-1,1).float().expand(self.batch_size,(self.hidden_size-300))
hidden_state_lm = torch.cat((hidden_state_2,hidden_state_1),1).unsqueeze(0)
lm_input_new = torch.bmm(soft_outputs_new,self.lm_embedder(vocab_tensor))
lm_test_input = torch.cat((self.lm_embedder(inputs["text_ids"][:,0]).unsqueeze(1),lm_input_new[:,:-1,:]),1)
lm_outputs,_ = self.lm(lm_test_input,hidden_state_lm)
lm_outputs = self.lm_output(lm_outputs)
lm_outputs = lm_outputs.transpose(0,1)
return decoder_outputs,soft_outputs_,probs,classes,lm_outputs
|
StarcoderdataPython
|
3220491
|
"""This module contains the general information for BiosVfCbsDfCmnDramNps ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfCbsDfCmnDramNpsConsts:
VP_CBS_DF_CMN_DRAM_NPS_AUTO = "Auto"
VP_CBS_DF_CMN_DRAM_NPS_NPS0 = "NPS0"
VP_CBS_DF_CMN_DRAM_NPS_NPS1 = "NPS1"
VP_CBS_DF_CMN_DRAM_NPS_NPS2 = "NPS2"
VP_CBS_DF_CMN_DRAM_NPS_NPS4 = "NPS4"
VP_CBS_DF_CMN_DRAM_NPS_PLATFORM_DEFAULT = "platform-default"
class BiosVfCbsDfCmnDramNps(ManagedObject):
"""This is BiosVfCbsDfCmnDramNps class."""
consts = BiosVfCbsDfCmnDramNpsConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfCbsDfCmnDramNps", "biosVfCbsDfCmnDramNps", "nodes-per-socket", VersionMeta.Version421a, "InputOutput", 0x1f, [], ["admin"], ['biosPlatformDefaults', 'biosSettings'], [], [None]),
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version421a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version421a, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version421a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version421a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_cbs_df_cmn_dram_nps": MoPropertyMeta("vp_cbs_df_cmn_dram_nps", "vpCbsDfCmnDramNps", "string", VersionMeta.Version421a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Auto", "NPS0", "NPS1", "NPS2", "NPS4", "platform-default"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"vpCbsDfCmnDramNps": "vp_cbs_df_cmn_dram_nps",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
self.vp_cbs_df_cmn_dram_nps = None
ManagedObject.__init__(self, "BiosVfCbsDfCmnDramNps", parent_mo_or_dn, **kwargs)
|
StarcoderdataPython
|
190888
|
<filename>Fundamentals/Exercises/Data_Types_Variables_More/1_exchange_integers.py
# Read two integer numbers and, after that, exchange their values. Print the variable values before and after the exchange, as shown below:
a = int(input())
b = int(input())
print(f'Before:\na = {a}\nb = {b}')
a, b = b, a
print(f'After:\na = {a}\nb = {b}')
|
StarcoderdataPython
|
1775448
|
<reponame>cys3c/viper-shell<filename>application/modules/post/windows-priv-check/wpc/shares.py<gh_stars>1-10
from wpc.share import share
import win32net
import wpc.conf
class shares:
def __init__(self):
self.shares = []
pass
def get_all(self):
if self.shares == []:
resume = 1;
while resume:
resume = 0
sharelist = None
try:
(sharelist, total, resume) = win32net.NetShareEnum(wpc.conf.remote_server, 0, resume, 9999)
except:
print "[E] Can't check shares - not enough privs?"
if sharelist:
for shareitem in sharelist:
s = share(shareitem['netname'])
self.shares.append(s)
return self.shares
|
StarcoderdataPython
|
3351709
|
<filename>tests/urlpatterns_reverse/included_named_urls.py
from django.conf.urls import include, url
from .views import empty_view
urlpatterns = [
url(r'^$', empty_view, name="named-url3"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="named-url4"),
url(r'^(?P<one>[0-9]+)|(?P<two>[0-9]+)/$', empty_view),
url(r'^included/', include('urlpatterns_reverse.included_named_urls2')),
]
|
StarcoderdataPython
|
3336471
|
<gh_stars>0
## LSDMap_VectorTools.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with vector data using shapely
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## FJC
## 26/06/17
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from . import LSDMap_GDALIO as LSDMap_IO
from shapely.geometry import Point, Polygon
import os
from os.path import exists
from osgeo import ogr, osr
import LSDPlottingTools as LSDPT
import gdal as gdal
from osgeo.gdalconst import GA_ReadOnly
from LSDMapFigure import PlottingHelpers as Helper
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# BASIN FUNCTIONS
# These functions do various operations on basin polygons
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
def GetBasinOutlines(DataDirectory, basins_fname):
"""
This function takes in the raster of basins and gets a dict of basin polygons,
where the key is the basin key and the value is a shapely polygon of the basin.
IMPORTANT: In this case the "basin key" is usually the junction number:
this function will use the raster values as keys and in general
the basin rasters are output based on junction indices rather than keys
Args:
DataDirectory (str): the data directory with the basin raster
basins_fname (str): the basin raster
Returns:
list of shapely polygons with the basins
Author: FJC
"""
# read in the basins raster
this_fname = basins_fname.split('.')
print(basins_fname)
OutputShapefile = this_fname[0]+'.shp'
# polygonise the raster
BasinDict = LSDMap_IO.PolygoniseRaster(DataDirectory, basins_fname, OutputShapefile)
return BasinDict
def GetMultipleBasinOutlines(DataDirectory):
"""
This function takes in multiple rasters of basins and gets a dict of basin polygons,
where the key is the basin key derived from the file name and the value is a shapely polygon of the basin.
IMPORTANT: In this case the "basin key" is usually the junction number:
this function will use the raster values as keys and in general
the basin rasters are output based on junction indices rather than keys
Args:
DataDirectory (str): the data directory with the basin raster
Returns:
list of shapely polygons with the basins
Author: MDH
"""
# get a list of basins and declare the dictionary to populate
basin_dict = Helper.MapBasinsToKeys(DataDirectory)
BasinsDict = {}
#loop across the basins
for outlet_jn, basin_key in basin_dict.iteritems():
this_fname = "basin"+str(outlet_jn)+"_AllBasins.bil"
TempBasins = GetBasinOutlines(DataDirectory,this_fname)
for temp_outlet, temp_basin_key in TempBasins.iteritems():
if len(TempBasins) > 1:
print("WARNING: MULTIPLE BASINS IN basin #", outlet_jn)
TempBasins[int(outlet_jn)] = TempBasins.pop(temp_outlet)
BasinsDict.update(TempBasins)
return BasinsDict
def GetBasinCentroids(DataDirectory, basins_fname):
"""
This function takes in the raster of basins and returns a dict where the
key is the basin key and the value is the shapely point of the centroid
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of centroid points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# get the centroids
CentroidDict = {}
for basin_key, basin in BasinDict.iteritems():
CentroidDict[basin_key] = Point(basin.centroid)
return CentroidDict
def GetPointWithinBasins(DataDirectory,basins_fname):
"""
This function takes in the raster of basin and returns a dict where the
key is the basin key and the value is a shapely point that is representative
of the basin (guaranteed to be within the polygon)
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# get the centroids
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
PointDict[basin_key] = Point(basin.representative_point())
return PointDict
def GetPointsWithinMultipleBasins(DataDirectory,basins_fname):
"""
This function takes in rasters of basins and returns a dict where the
key is the basin key and the value is a shapely point that is representative
of the basin (guaranteed to be within the polygon)
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetMultipleBasinOutlines(DataDirectory)
print("BASIN DICT IS")
print(BasinDict)
# get the centroids
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
PointDict[basin_key] = Point(basin.representative_point())
print("POINT DICT IS")
print(PointDict)
return PointDict
def GetPointWithinBasinsBuffered(DataDirectory,basins_fname, basin_list = [], buffer_frac=0.1):
"""
This function takes in the raster of basins, and buffers each basin
(makes each one smaller). It then gets the centroid of each buffered
basin and returns as a dict where the key is the basin key and the value
is a shapely point that is the centroid of the buffered basin.
In most cases the "basin key" is actually the junction index: it comes
from the basins labeled within the basin raster, which is output with
junction indices rather than junction keys
This doesn't work at the moment - need to think of a way to specify the buffer
distance appropriately.
Args:
DataDirectory (str): the data directory with the basin raster
fname_prefix (str): the prefix for the DEM
buffer_frac (float): the fraction of the basin to be removed by the
buffer, default = 0.1
Returns:
dict of representative points
Author: FJC
"""
# get the basin polygons
BasinDict = GetBasinOutlines(DataDirectory, basins_fname)
# buffer and get the centre of the buffered polygons
PointDict = {}
for basin_key, basin in BasinDict.iteritems():
# get the x and y lengths of the basin and append to list
print("This basin key is: "+str(basin_key))
lengths = []
bounds = basin.bounds
lengths.append(bounds[2] - bounds[0])
lengths.append(bounds[3] - bounds[1])
print(min(lengths))
# buffer with a fraction of the minimum length
new_basin = Polygon(basin.buffer(min(lengths)*buffer_frac*-1))
# get the centroid of the buffered basin
PointDict[basin_key] = Point(new_basin.centroid)
return PointDict
##### This part is copied from the LSD_GeologyTools.py file to make the functions accessible from another scripts and thus easier to ingest, it will be cleaned up at some points.
##### the aim of those functions is to raterize a lithologic raster
def readFile(filename):
print("Hey buddy, Reading the file: "+filename)
filehandle = gdal.Open(filename, GA_ReadOnly )
if filehandle == None:
raise Exception("Unable to read the data file")
band1 = filehandle.GetRasterBand(1)
geotransform = filehandle.GetGeoTransform()
geoproj = filehandle.GetProjection()
Z = band1.ReadAsArray()
xsize = filehandle.RasterXSize
ysize = filehandle.RasterYSize
return xsize,ysize,geotransform,geoproj,Z
def writeFile(filename,geotransform,geoprojection,data):
(x,y) = data.shape
format = "GTiff".encode('utf-8')
noDataValue = -9999
driver = gdal.GetDriverByName(format)
# you can change the dataformat but be sure to be able to store negative values including -9999
dst_datatype = gdal.GDT_Float32
#print(data)
dst_ds = driver.Create(filename,y,x,1,dst_datatype)
dst_ds.GetRasterBand(1).WriteArray(data)
dst_ds.GetRasterBand(1).SetNoDataValue( noDataValue )
dst_ds.SetGeoTransform(geotransform)
dst_ds.SetProjection(geoprojection)
return 1
def Rasterize_BGS_geologic_maps(shapefile_name):
# The shapefile to be rasterized:
print('Rasterize ' + shapefile_name)
#get path and filename seperately
shapefilefilepath = LSDPT.GetPath(shapefile_name)
shapefilename = LSDPT.GetFileNameNoPath(shapefile_name)
shapefileshortname = LSDPT.GetFilePrefix(shapefile_name)
print("Shapefile name is: "+shapefilename)
# now get the the fields from the shapefile
daShapefile = shapefile_name
dataSource = ogr.Open(daShapefile)
daLayer = dataSource.GetLayer(0)
# lets see what the layers are
print("Let me tell you what the names of the fields are!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
# The raster file to be created and receive the rasterized shapefile
outrastername = shapefileshortname + '.tif'
outraster = shapefilefilepath+os.sep+ outrastername
outcsv = shapefilefilepath+os.sep+shapefileshortname+'_lithokey.csv'
print("Full name of out raster is: "+outraster)
# Rasterize!!
system_call = 'gdal_rasterize -a BGSREF -l ' + shapefileshortname +' -tr 90 -90 -a_nodata -9999 ' + shapefile_name + ' ' + outraster
print("System call is: ")
print(system_call)
os.system(system_call)
# now convert the raster to UTM, as well as delete the stupid TIF
# The raster file to be created and receive the rasterized shapefile
outrastername_bil = shapefileshortname + '.bil'
outraster_bil = shapefilefilepath+os.sep+ outrastername_bil
print("Full name of out raster is: "+outraster_bil)
# This assumes UTM zone 30, because why would we do any work in East Anglia?
system_call2 = 'gdalwarp -t_srs EPSG:32630 -of ENVI -dstnodata -9999 ' + outraster + ' ' + outraster_bil
os.system(system_call2)
# Now get rid of the tif
system_call3 = 'rm '+ outraster
os.system(system_call3)
# Make a key for the bedrock
geol_dict = dict()
for feature in daLayer:
ID = feature.GetField("BGSREF")
GEOL = feature.GetField("RCS_D")
if ID not in geol_dict:
print("I found a new rock type, ID: "+ str(ID)+ " and rock type: " + str(GEOL))
geol_dict[ID] = GEOL
print("The rocks are: ")
print(geol_dict)
with open(outcsv, 'wb') as f:
f.write('ID,rocktype\n')
for key in geol_dict:
f.write(str(key)+','+ str(geol_dict[key])+'\n')
print("All done")
def Rasterize_geologic_maps_pythonic(shapefile_name, raster_resolution = 400, geol_field = "xx"):
# The shapefile to be rasterized:
print('Rasterize ' + shapefile_name)
#get path and filename seperately
shapefilefilepath = LSDPT.GetPath(shapefile_name)
shapefilename = LSDPT.GetFileNameNoPath(shapefile_name)
shapefileshortname = LSDPT.GetFilePrefix(shapefile_name)
print("Shapefile name is: "+shapefilename)
# now get the the fields from the shapefile
daShapefile = shapefile_name
dataSource = ogr.Open(daShapefile)
daLayer = dataSource.GetLayer(0)
# lets see what the layers are
print("Let me tell you what the names of the fields are!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
# The raster file to be created and receive the rasterized shapefile
outrastername = shapefileshortname + '.tif'
print("The new raster is: "+outrastername)
outraster = shapefilefilepath+ outrastername
outcsv = shapefilefilepath+shapefileshortname+'_lithokey.csv'
print("Full name of out raster is: "+outraster)
# Create the destination data source
inGridSize=float(raster_resolution)
xMin, xMax, yMin, yMax = daLayer.GetExtent()
xRes = int((xMax - xMin) / inGridSize)
yRes = int((yMax - yMin) / inGridSize)
rasterDS = gdal.GetDriverByName(str('GTiff')).Create(outraster, xRes, yRes, 1, gdal.GDT_Byte)
# Define spatial reference
NoDataVal = -9999
rasterDS.SetProjection(daLayer.GetSpatialRef().ExportToWkt())
rasterDS.SetGeoTransform((xMin, inGridSize, 0, yMax, 0, -inGridSize))
rBand = rasterDS.GetRasterBand(1)
rBand.SetNoDataValue(NoDataVal)
rBand.Fill(NoDataVal)
# Rasterize
gdal.RasterizeLayer(rasterDS, [1], daLayer, options = ["ATTRIBUTE=GEOL_CODE"])
# Make a key for the bedrock
geol_dict = dict()
geol_field = str(geol_field)
for feature in daLayer:
print(feature)
ID = feature.GetField(geol_field)
print(ID)
GEOL = feature.GetField('GEOL_CODE')
if ID not in geol_dict:
print("I found a new rock type, ID: "+ str(ID)+ " and rock type: " + str(GEOL))
geol_dict[ID] = GEOL
print("The rocks are: ")
print(geol_dict)
with open(outcsv, 'w') as f:
f.write('ID,rocktype\n')
for key in geol_dict:
f.write(str(key)+','+ str(geol_dict[key])+'\n')
print("Done rasterizing!")
return outraster
def Correct_Raterized_GLIM_map(tifname):
# And now for a hack that converts to
print("The raster name is: "+tifname)
[xsize,ysize,geotransform,geoproj,Z] = readFile(tifname)
print("Before data check")
print(Z)
print("Data type is: "+ str(Z.dtype))
X = Z.astype(int)
# Set large negative values to -9999
X[X<=0] = -9999
#Z[np.isnan(Z)]= -9999
print("After_data_check")
print(X)
#get path and filename seperately
filepath = LSDPT.GetPath(tifname)
#filename = LSDPT.GetFileNameNoPath(tifname)
fileshortname = LSDPT.GetFilePrefix(tifname)
outraster2 = filepath+fileshortname + '2.tif'
writeFile(outraster2,geotransform,geoproj,X)
def geologic_maps_modify_shapefile(shapefile_name, geol_field = "xx"):
# The shapefile to be rasterized:
print('Rasterize ' + shapefile_name)
#get path and filename seperately
shapefilefilepath = LSDPT.GetPath(shapefile_name)
#shapefilename = LSDPT.GetFileNameNoPath(shapefile_name)
shapefileshortname = LSDPT.GetFilePrefix(shapefile_name)
# get the new shapefile name
new_shapefile_name = shapefilefilepath+shapefileshortname+"_new.shp"
# copy the shapefile into the new shapefile--we don't wwant to mess up the original data
print("The New Shapefile name is: "+new_shapefile_name)
Copy_Shapefile(shapefile_name,new_shapefile_name)
# New shapefile is opened for writing.
dataSource = ogr.Open(new_shapefile_name,1)
daLayer = dataSource.GetLayer(0)
# add a new field
new_field = ogr.FieldDefn(str("GEOL_CODE"), ogr.OFTInteger)
daLayer.CreateField(new_field)
# lets see what the layers are
print("Let me tell you what the names of the fields are after I added one!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
# Make a key for the bedrock
geol_dict = dict()
geol_iterator = 0
#geol_field = geol_field.encode('utf-8')
for feature in daLayer:
GEOL = feature.GetField(geol_field)
if GEOL not in geol_dict:
geol_iterator = geol_iterator+1
print("I found a new rock type, GEOL: "+ str(GEOL)+ " and rock type: " + str(geol_iterator))
geol_dict[GEOL] = geol_iterator
# now get the geol code
this_geol_code = geol_dict[GEOL]
# set the feature
feature.SetField("GEOL_CODE", this_geol_code)
# need to update the layer
daLayer.SetFeature(feature)
print("The rocks are: ")
print(geol_dict)
print("All done")
return new_shapefile_name, geol_dict
def Copy_Shapefile(shapefile_name,new_shapefile_name):
"""
Sweet Jesus why is this so difficult?
"""
if exists(shapefile_name) is False:
raise Exception('[Errno 2] No such file or directory: \'' + shapefile_name + '\'')
# get the short name of the new shapefile
shapefileshortname = LSDPT.GetFilePrefix(new_shapefile_name)
print("The shortname is: "+shapefileshortname)
# read in the data
src = ogr.Open(shapefile_name)
daLayer = src.GetLayer(0)
# lets see what the layers are
print("Let me tell you what the names of the fields are!")
layerDefinition = daLayer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
geom_type = layerDefinition.GetGeomType()
# get rid of previous copies
if exists(new_shapefile_name):
os.remove(new_shapefile_name)
# get the driver and create a new data source
cliffbaggu = "ESRI shapefile"
cliffbaggu = str(cliffbaggu)
driver = ogr.GetDriverByName(cliffbaggu)
#src.Destroy()
# Now write to the the outfile
out_ds = driver.CreateDataSource(new_shapefile_name)
# create the output layer
#out_lyr = out_ds.CreateLayer("yo",srs = daLayer.GetSpatialRef(),geom_type=ogr.wkbPolygon)
out_lyr = out_ds.CreateLayer(str("yo"),srs = daLayer.GetSpatialRef(),geom_type=geom_type)
# Add input Layer Fields to the output Layer if it is the one we want
for i in range(0, layerDefinition.GetFieldCount()):
fieldDefn = layerDefinition.GetFieldDefn(i)
#fieldName = fieldDefn.GetName()
out_lyr.CreateField(fieldDefn)
# Get the output Layer's Feature Definition
outLayerDefn = out_lyr.GetLayerDefn()
# Add features to the ouput Layer
for inFeature in daLayer:
# Create output Feature
outFeature = ogr.Feature(outLayerDefn)
# add in geometries
geom = inFeature.GetGeometryRef()
outFeature.SetGeometry(geom.Clone())
# Add new feature to output Layer
# Add field values from input Layer
for i in range(0, outLayerDefn.GetFieldCount()):
fieldDefn = outLayerDefn.GetFieldDefn(i)
#fieldName = fieldDefn.GetName()
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(),
inFeature.GetField(i))
out_lyr.CreateFeature(outFeature)
#out_ds.Destroy()
def rasterize_shapefile(path_to_shp, res = 30, field = ""):
"""
I am going to lead the rasterization of the shapefile into a tif raster.
I'll work on the bil version at some points.
@param:
path_to_shapefile (str) the path and name of the shapefile
@returns: Nothing but write a raster
@Author: BG
@date: 28/09/2017
"""
print("I will raterize your shapefile:")
shapefile_name = path_to_shp
print(shapefile_name)
#launching the rasterization
new_shapefile_name, geol_dict = geologic_maps_modify_shapefile(shapefile_name, geol_field = field)
tifname = Rasterize_geologic_maps_pythonic(new_shapefile_name,raster_resolution = res, geol_field = field)
Correct_Raterized_GLIM_map(tifname)
print("Now removing the temporary files")
os.remove(new_shapefile_name)
os.remove(tifname)
print("done with the rasterization")
|
StarcoderdataPython
|
194617
|
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import itertools
class SOM(object):
def __init__(self,h,w,dim_feat):
"""
Construction of a zero-filled SOM.
h,w,dim_feat: constructs a (h,w,dim_feat) SOM.
"""
self.shape = (h,w,dim_feat)
self.som = np.zeros((h,w,dim_feat))
# Training parameters
self.L0 = 0.0
self.lam = 0.0
self.sigma0 = 0.0
self.data = []
self.hit_score = np.zeros((h,w))
def train(self,data,L0,lam,sigma0,initializer=np.random.rand,frames=None):
"""
Training procedure for a SOM.
data: a N*d matrix, N the number of examples,
d the same as dim_feat=self.shape[2].
L0,lam,sigma0: training parameters.
initializer: a function taking h,w and dim_feat (*self.shape) as
parameters and returning an initial (h,w,dim_feat) tensor.
frames: saves intermediate frames if not None.
"""
self.L0 = L0
self.lam = lam
self.sigma0 = sigma0
self.som = initializer(*self.shape)
self.data = data
for t in itertools.count():
if frames != None:
frames.append(self.som.copy())
if self.sigma(t) < 0.5:
print("final t:", t)
#print("quantization error:", self.quant_err())
break
i_data = np.random.choice(range(len(data)))
bmu = self.find_bmu(data[i_data])
self.hit_score[bmu] += 1
self.update_som(bmu,data[i_data],t)
def quant_err(self):
"""
Computes the quantization error of the SOM.
It uses the data fed at last training.
"""
bmu_dists = []
for input_vector in self.data:
bmu = self.find_bmu(input_vector)
bmu_feat = self.som[bmu]
bmu_dists.append(np.linalg.norm(input_vector-bmu_feat))
return np.array(bmu_dists).mean()
def find_bmu(self, input_vec):
"""
Find the BMU of a given input vector.
input_vec: a d=dim_feat=self.shape[2] input vector.
"""
list_bmu = []
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist = np.linalg.norm((input_vec-self.som[y,x]))
list_bmu.append(((y,x),dist))
list_bmu.sort(key=lambda x: x[1])
return list_bmu[0][0]
def update_som(self,bmu,input_vector,t):
"""
Calls the update rule on each cell.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist_to_bmu = np.linalg.norm((np.array(bmu)-np.array((y,x))))
self.update_cell((y,x),dist_to_bmu,input_vector,t)
def update_cell(self,cell,dist_to_bmu,input_vector,t):
"""
Computes the update rule on a cell.
cell: (y,x) cell's coordinates.
dist_to_bmu: L2 distance from cell to bmu.
input_vector: current data vector.
t: current time.
"""
self.som[cell] += self.N(dist_to_bmu,t)*self.L(t)*(input_vector-self.som[cell])
def update_bmu(self,bmu,input_vector,t):
"""
Update rule for the BMU.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
self.som[bmu] += self.L(t)*(input_vector-self.som[bmu])
def L(self, t):
"""
Learning rate formula.
t: current time.
"""
return self.L0*np.exp(-t/self.lam)
def N(self,dist_to_bmu,t):
"""
Computes the neighbouring penalty.
dist_to_bmu: L2 distance to bmu.
t: current time.
"""
curr_sigma = self.sigma(t)
return np.exp(-(dist_to_bmu**2)/(2*curr_sigma**2))
def sigma(self, t):
"""
Neighbouring radius formula.
t: current time.
"""
return self.sigma0*np.exp(-t/self.lam)
|
StarcoderdataPython
|
179457
|
import cv2
import numpy as np
def diff_density(image1, image2, x=0, y=0, w=-1, h=-1):
#Gives how diff image1 is from image2 in a ROI (x,y,width,height)
if(image1 is None or image2 is None):
print("Input not compatible", image1, image2)
exit()
roi1 = image1[y:y+h, x:x+w]
roi2 = image2[y:y+h, x:x+w]
diff_array = cv2.absdiff(roi1, roi2)
# print(np.sum(diff_array/float(w*h)))
return np.sum(diff_array/float(w*h))
|
StarcoderdataPython
|
57095
|
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
import parser
import subprocess
import sys
import wx
from entrypoint2 import entrypoint
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin, ListCtrlAutoWidthMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(
self, parent, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
class Repository(wx.Frame):
def __init__(self, parent, id, title, mandoc):
self.command = mandoc['command']
wx.Frame.__init__(self, parent, id, title, size=(600, 400))
panel = wx.Panel(self, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
leftPanel = wx.Panel(panel, -1)
rightPanel = wx.Panel(panel, -1)
self.log = wx.TextCtrl(rightPanel, -1, style=wx.TE_MULTILINE)
self.list = CheckListCtrl(rightPanel)
self.list.InsertColumn(0, 'flag', width=140)
self.list.InsertColumn(1, 'short flag')
self.list.InsertColumn(2, 'help')
for i in mandoc['options']:
flags = i[0]
flags.sort(key=len, reverse=True)
index = self.list.InsertStringItem(sys.maxint, flags[0])
self.list.SetStringItem(
index, 1, flags[1] if len(flags) > 1 else '')
self.list.SetStringItem(index, 2, i[1])
vbox2 = wx.BoxSizer(wx.VERTICAL)
sel = wx.Button(leftPanel, -1, 'Select All', size=(100, -1))
des = wx.Button(leftPanel, -1, 'Deselect All', size=(100, -1))
apply = wx.Button(leftPanel, -1, 'Run', size=(100, -1))
self.cb_close = wx.CheckBox(leftPanel, -1, 'Close', size=(100, -1))
self.cb_close.SetToolTip(
wx.ToolTip("close GUI after running the command"))
self.cb_term = wx.CheckBox(
leftPanel, -1, 'new terminal', size=(100, -1))
self.cb_term.SetToolTip(wx.ToolTip("run command in new terminal"))
bt_exit = wx.Button(leftPanel, -1, 'Exit', size=(100, -1))
self.Bind(wx.EVT_BUTTON, self.OnSelectAll, id=sel.GetId())
self.Bind(wx.EVT_BUTTON, self.OnDeselectAll, id=des.GetId())
self.Bind(wx.EVT_BUTTON, self.OnApply, id=apply.GetId())
self.Bind(wx.EVT_BUTTON, self.OnExit, id=bt_exit.GetId())
vbox2.Add(sel, 0, wx.TOP, 5)
vbox2.Add(des)
vbox2.Add(apply)
vbox2.Add(self.cb_close)
vbox2.Add(self.cb_term)
vbox2.Add(bt_exit)
leftPanel.SetSizer(vbox2)
vbox.Add(self.list, 1, wx.EXPAND | wx.TOP, 3)
vbox.Add((-1, 10))
vbox.Add(self.log, 0.5, wx.EXPAND)
vbox.Add((-1, 10))
rightPanel.SetSizer(vbox)
hbox.Add(leftPanel, 0, wx.EXPAND | wx.RIGHT, 5)
hbox.Add(rightPanel, 1, wx.EXPAND)
hbox.Add((3, -1))
panel.SetSizer(hbox)
self.Centre()
self.Show(True)
self.list.OnCheckItem = self.OnCheckItem
cmd = self.cmd()
self.log.SetValue(cmd)
def OnSelectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i)
def OnDeselectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i, False)
def OnApply(self, event):
# print os.getcwd()
cmd = self.log.GetValue()
term = 'xterm'
if self.cb_term.IsChecked():
cmd = '%s -hold -e "%s"' % (term, cmd)
# os.system( cmd )
subprocess.Popen(cmd, shell=1)
if self.cb_close.IsChecked():
exit(0)
def OnExit(self, event):
exit(0)
def cmd(self):
count = self.list.GetItemCount()
cmd = self.command + ' '
for row in range(count):
item = self.list.GetItem(itemId=row, col=0)
if self.list.IsChecked(row):
cmd += item.GetText() + ' '
return cmd
def OnCheckItem(self, index, flag):
cmd = self.cmd()
self.log.SetValue(cmd)
@entrypoint
def main(command):
mandoc = parser.command_info(command)
app = wx.App()
Repository(None, -1, 'mangui', mandoc)
app.MainLoop()
|
StarcoderdataPython
|
19958
|
class Solution(object):
def _dfs(self,num,res,n):
if num>n:
return
res.append(num)
num=num*10
if num<=n:
for i in xrange(10):
self._dfs(num+i,res,n)
def sovleOn(self,n):
res=[]
cur=1
for i in xrange(1,n+1):
res.append(cur)
if cur*10<=n:
cur=cur*10
# if the num not end with 9,plus 1
# since if 19 the next should 2 not 20
elif cur%10!=9 and cur+1<=n:
cur+=1
else:
# get the 199--2 499--5
while (cur/10)%10==9:
cur/=10
cur=cur/10+1
return res
def lexicalOrder(self, n):
"""
:type n: int
:rtype: List[int]
"""
return self.sovleOn(n)
res=[]
for i in xrange(1,10):
self._dfs(i,res,n)
return res
|
StarcoderdataPython
|
61591
|
'''
任务调度器
给你一个用字符数组 tasks 表示的 CPU 需要执行的任务列表。其中每个字母表示一种不同种类的任务。任务可以以任意顺序执行,并且每个任务都可以在 1 个单位时间内执行完。
在任何一个单位时间,CPU 可以完成一个任务,或者处于待命状态。
然而,两个 相同种类 的任务之间必须有长度为整数 n 的冷却时间,因此至少有连续 n 个单位时间内 CPU 在执行不同的任务,或者在待命状态。
你需要计算完成所有任务所需要的 最短时间 。
提示:
1 <= task.length <= 10^4
tasks[i] 是大写英文字母
n 的取值范围为 [0, 100]
'''
from typing import List
'''
思路:哈希+链表计数
任务执行窗口大小为n,每次从待办任务中拿出任务数最多的n个任务依次执行。
可以用哈希+链表实现每n个周期从待办任务中取出次数最多的n个任务。这里偷个懒用collections.counter
时间复杂度:O(m*n)
空间复杂度:O(m)
'''
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
if n == 0:
return len(tasks)
import collections
counter = collections.Counter(tasks)
empty = collections.Counter()
times = 0
while len(counter) > 0:
topTasks = counter.most_common(n + 1)
counter.subtract(map(lambda t: t[0], topTasks))
counter = counter - empty # 去掉为0的任务
if len(counter) > 0:
times += n + 1
else:
times += len(topTasks)
return times
s = Solution()
print(s.leastInterval(tasks=["A", "A", "A", "B", "B", "B"], n=2))
print(s.leastInterval(tasks=["A", "A", "A", "B", "B", "B"], n=0))
print(s.leastInterval(tasks=["A", "A", "A", "A", "A", "A", "B", "C", "D", "E", "F", "G"], n=2))
|
StarcoderdataPython
|
1741170
|
<filename>pyauto/pyauto_correct.py
"""
Refer: https://github.com/PandaWhoCodes/pyautocorrect and https://github.com/phatpiglet/autocorrect/ and https://pypi.org/project/autocorrect/
Works in mappy2 env but might have to shift this to py3.
WHat about Unicode?
"""
import pyautocorrect
print(pyautocorrect.correct("this is a simple taste to see if this works peiperly"))
print(pyautocorrect.correct("Spellin is difficult, whch is wyh you need to study everyday."))
|
StarcoderdataPython
|
1654702
|
from typing import List, Optional
from sqlalchemy import desc, func
from sqlalchemy.ext.asyncio.session import AsyncSession
from sqlalchemy.sql.expression import select
from app.database.dbo.mottak import Arkivuttrekk as Arkivuttrekk_DBO
from app.domain.models.Arkivuttrekk import Arkivuttrekk
async def create(db: AsyncSession, arkivuttrekk: Arkivuttrekk) -> Arkivuttrekk_DBO:
dbo = Arkivuttrekk_DBO(**vars(arkivuttrekk))
db.add(dbo)
await db.flush()
return dbo
async def update(db: AsyncSession, id_: int, updated_fields: dict) -> Optional[Arkivuttrekk_DBO]:
arkivuttrekk = await db.get(Arkivuttrekk_DBO, id_)
if not arkivuttrekk:
return None
for key, value in updated_fields.items():
setattr(arkivuttrekk, key, value)
await db.flush()
return arkivuttrekk
async def get_all(db: AsyncSession, skip: int, limit: int) -> List[Arkivuttrekk_DBO]:
query = await db.execute(
select(Arkivuttrekk_DBO)
.order_by(desc(Arkivuttrekk_DBO.id))
# None is equivalent to no limit.
.limit(None if limit == -1 else limit)
.offset(skip)
)
return query.scalars().all()
async def get_count(db: AsyncSession) -> int:
return await db.scalar(select(func.count(Arkivuttrekk_DBO.id)))
async def get_by_id(db: AsyncSession, id_: int) -> Optional[Arkivuttrekk_DBO]:
return await db.get(Arkivuttrekk_DBO, id_)
|
StarcoderdataPython
|
1715382
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright © 2021 <NAME>
from __future__ import print_function, unicode_literals
from hpl.parser import property_parser
from hplrv.rendering import TemplateRenderer
def main():
p = property_parser()
r = TemplateRenderer()
text = [
'#id: p1 globally: no b {x > 0}',
]
hp = [p.parse(ti) for ti in text]
topics = {
'a': 'geometry_msgs/Point',
'b': 'geometry_msgs/Point',
}
py = r.render_rospy_node(hp, topics)
print(py)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3357853
|
#!/usr/bin/python
"""
TCP Communications Module
"""
import asyncore
import socket
import cPickle as pickle
from time import time as _time, sleep as _sleep
from StringIO import StringIO
from .debugging import ModuleLogger, DebugContents, bacpypes_debugging
from .core import deferred
from .task import FunctionTask, OneShotFunction
from .comm import PDU, Client, Server
from .comm import ServiceAccessPoint, ApplicationServiceElement
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
REBIND_SLEEP_INTERVAL = 2.0
#
# PickleActorMixIn
#
class PickleActorMixIn:
def __init__(self, *args):
if _debug: PickleActorMixIn._debug("__init__ %r", args)
super(PickleActorMixIn, self).__init__(*args)
# keep an upstream buffer
self.pickleBuffer = ''
def indication(self, pdu):
if _debug: PickleActorMixIn._debug("indication %r", pdu)
# pickle the data
pdu.pduData = pickle.dumps(pdu.pduData)
# continue as usual
super(PickleActorMixIn, self).indication(pdu)
def response(self, pdu):
if _debug: PickleActorMixIn._debug("response %r", pdu)
# add the data to our buffer
self.pickleBuffer += pdu.pduData
# build a file-like object around the buffer
strm = StringIO(self.pickleBuffer)
pos = 0
while (pos < strm.len):
try:
# try to load something
msg = pickle.load(strm)
except:
break
# got a message
rpdu = PDU(msg)
rpdu.update(pdu)
super(PickleActorMixIn, self).response(rpdu)
# see where we are
pos = strm.tell()
# save anything left over, if there is any
if (pos < strm.len):
self.pickleBuffer = self.pickleBuffer[pos:]
else:
self.pickleBuffer = ''
bacpypes_debugging(PickleActorMixIn)
#
# TCPClient
#
# This class is a mapping between the client/server pattern and the
# socket API. The ctor is given the address to connect as a TCP
# client. Because objects of this class sit at the bottom of a
# protocol stack they are accessed as servers.
#
class TCPClient(asyncore.dispatcher):
def __init__(self, peer):
if _debug: TCPClient._debug("__init__ %r", peer)
asyncore.dispatcher.__init__(self)
# ask the dispatcher for a socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# save the peer
self.peer = peer
# create a request buffer
self.request = ''
# try to connect
try:
if _debug: TCPClient._debug(" - initiate connection")
self.connect(peer)
except socket.error, err:
if _debug: TCPClient._debug(" - connect socket error: %r", err)
# pass along to an error handler
self.handle_error(err)
def handle_connect(self):
if _debug: TCPClient._debug("handle_connect")
def handle_connect_event(self):
if _debug: TCPClient._debug("handle_connect_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if _debug: TCPClient._debug(" - err: %r", err)
# check for connection refused
if (err == 0):
if _debug: TCPClient._debug(" - no error")
elif (err == 111):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
self.handle_error(socket.error(111, "connection refused"))
return
# pass along
asyncore.dispatcher.handle_connect_event(self)
def readable(self):
return self.connected
def handle_read(self):
if _debug: TCPClient._debug("handle_read")
try:
msg = self.recv(65536)
if _debug: TCPClient._debug(" - received %d octets", len(msg))
# no socket means it was closed
if not self.socket:
if _debug: TCPClient._debug(" - socket was closed")
else:
# send the data upstream
deferred(self.response, PDU(msg))
except socket.error, err:
if (err.args[0] in (61, 111)):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPClient._debug(" - recv socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def writable(self):
return (len(self.request) != 0)
def handle_write(self):
if _debug: TCPClient._debug("handle_write")
try:
sent = self.send(self.request)
if _debug: TCPClient._debug(" - sent %d octets, %d remaining", sent, len(self.request) - sent)
self.request = self.request[sent:]
except socket.error, err:
if (err.args[0] == 32):
if _debug: TCPClient._debug(" - broken pipe to %r", self.peer)
return
elif (err.args[0] in (61, 111)):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPClient._debug(" - send socket error: %s", err)
# pass along to a handler
self.handle_error(err)
def handle_write_event(self):
if _debug: TCPClient._debug("handle_write_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if _debug: TCPClient._debug(" - err: %r", err)
# check for connection refused
if (err in (61, 111)):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
self.handle_error(socket.error(err, "connection refused"))
self.handle_close()
return
# pass along
asyncore.dispatcher.handle_write_event(self)
def handle_close(self):
if _debug: TCPClient._debug("handle_close")
# close the socket
self.close()
# make sure other routines know the socket is closed
self.socket = None
def handle_error(self, error=None):
"""Trap for TCPClient errors, otherwise continue."""
if _debug: TCPClient._debug("handle_error %r", error)
# core does not take parameters
asyncore.dispatcher.handle_error(self)
def indication(self, pdu):
"""Requests are queued for delivery."""
if _debug: TCPClient._debug("indication %r", pdu)
self.request += pdu.pduData
bacpypes_debugging(TCPClient)
#
# TCPClientActor
#
# Actors are helper objects for a director. There is one actor for
# each connection.
#
class TCPClientActor(TCPClient):
def __init__(self, director, peer):
if _debug: TCPClientActor._debug("__init__ %r %r", director, peer)
TCPClient.__init__(self, peer)
# keep track of the director
self.director = director
# add a timer
self.timeout = director.timeout
if self.timeout > 0:
self.timer = FunctionTask(self.idle_timeout)
self.timer.install_task(_time() + self.timeout)
else:
self.timer = None
# this may have a flush state
self.flushTask = None
# tell the director this is a new actor
self.director.add_actor(self)
def handle_error(self, error=None):
"""Trap for TCPClient errors, otherwise continue."""
if _debug: TCPClientActor._debug("handle_error %r", error)
# pass along to the director
if error is not None:
self.director.actor_error(self, error)
else:
TCPClient.handle_error(self)
def handle_close(self):
if _debug: TCPClientActor._debug("handle_close")
# if there's a flush task, cancel it
if self.flushTask:
self.flushTask.suspend_task()
# cancel the timer
if self.timer:
self.timer.suspend_task()
# tell the director this is gone
self.director.del_actor(self)
# pass the function along
TCPClient.handle_close(self)
def idle_timeout(self):
if _debug: TCPClientActor._debug("idle_timeout")
# shut it down
self.handle_close()
def indication(self, pdu):
if _debug: TCPClientActor._debug("indication %r", pdu)
# additional downstream data is tossed while flushing
if self.flushTask:
if _debug: TCPServerActor._debug(" - flushing")
return
# reschedule the timer
if self.timer:
self.timer.install_task(_time() + self.timeout)
# continue as usual
TCPClient.indication(self, pdu)
def response(self, pdu):
if _debug: TCPClientActor._debug("response %r", pdu)
# put the peer address in as the source
pdu.pduSource = self.peer
# reschedule the timer
if self.timer:
self.timer.install_task(_time() + self.timeout)
# process this as a response from the director
self.director.response(pdu)
def flush(self):
if _debug: TCPClientActor._debug("flush")
# clear out the old task
self.flushTask = None
# if the outgoing buffer has data, re-schedule another attempt
if self.request:
self.flushTask = OneShotFunction(self.flush)
return
# close up shop, all done
self.handle_close()
bacpypes_debugging(TCPClientActor)
#
# TCPPickleClientActor
#
class TCPPickleClientActor(PickleActorMixIn, TCPClientActor):
pass
#
# TCPClientDirector
#
# A client director presents a connection pool as one virtual
# interface. If a request should be sent to an address and there
# is no connection already established for it, it will create one
# and maintain it. PDU's from TCP clients have no source address,
# so one is provided by the client actor.
#
class TCPClientDirector(Server, ServiceAccessPoint, DebugContents):
_debug_contents = ('timeout', 'actorClass', 'clients', 'reconnect')
def __init__(self, timeout=0, actorClass=TCPClientActor, sid=None, sapID=None):
if _debug: TCPClientDirector._debug("__init__ timeout=%r actorClass=%r sid=%r sapID=%r", timeout, actorClass, sid, sapID)
Server.__init__(self, sid)
ServiceAccessPoint.__init__(self, sapID)
# check the actor class
if not issubclass(actorClass, TCPClientActor):
raise TypeError("actorClass must be a subclass of TCPClientActor")
self.actorClass = actorClass
# save the timeout for actors
self.timeout = timeout
# start with an empty client pool
self.clients = {}
# no clients automatically reconnecting
self.reconnect = {}
def add_actor(self, actor):
"""Add an actor when a new one is connected."""
if _debug: TCPClientDirector._debug("add_actor %r", actor)
self.clients[actor.peer] = actor
# tell the ASE there is a new client
if self.serviceElement:
self.sap_request(add_actor=actor)
def del_actor(self, actor):
"""Remove an actor when the socket is closed."""
if _debug: TCPClientDirector._debug("del_actor %r", actor)
del self.clients[actor.peer]
# tell the ASE the client has gone away
if self.serviceElement:
self.sap_request(del_actor=actor)
# see if it should be reconnected
if actor.peer in self.reconnect:
connect_task = FunctionTask(self.connect, actor.peer)
connect_task.install_task(_time() + self.reconnect[actor.peer])
def actor_error(self, actor, error):
if _debug: TCPClientDirector._debug("actor_error %r %r", actor, error)
# tell the ASE the actor had an error
if self.serviceElement:
self.sap_request(actor_error=actor, error=error)
def get_actor(self, address):
""" Get the actor associated with an address or None. """
return self.clients.get(address, None)
def connect(self, address, reconnect=0):
if _debug: TCPClientDirector._debug("connect %r reconnect=%r", address, reconnect)
if address in self.clients:
return
# create an actor, which will eventually call add_actor
client = self.actorClass(self, address)
if _debug: TCPClientDirector._debug(" - client: %r", client)
# if it should automatically reconnect, save the timer value
if reconnect:
self.reconnect[address] = reconnect
def disconnect(self, address):
if _debug: TCPClientDirector._debug("disconnect %r", address)
if address not in self.clients:
return
# if it would normally reconnect, don't bother
if address in self.reconnect:
del self.reconnect[address]
# close it
self.clients[address].handle_close()
def indication(self, pdu):
"""Direct this PDU to the appropriate server, create a
connection if one hasn't already been created."""
if _debug: TCPClientDirector._debug("indication %r", pdu)
# get the destination
addr = pdu.pduDestination
# get the client
client = self.clients.get(addr, None)
if not client:
client = self.actorClass(self, addr)
# send the message
client.indication(pdu)
bacpypes_debugging(TCPClientDirector)
#
# TCPServer
#
class TCPServer(asyncore.dispatcher):
def __init__(self, sock, peer):
if _debug: TCPServer._debug("__init__ %r %r", sock, peer)
asyncore.dispatcher.__init__(self, sock)
# save the peer
self.peer = peer
# create a request buffer
self.request = ''
def handle_connect(self):
if _debug: TCPServer._debug("handle_connect")
def readable(self):
return 1
def handle_read(self):
if _debug: TCPServer._debug("handle_read")
try:
msg = self.recv(65536)
if _debug: TCPServer._debug(" - received %d octets", len(msg))
# no socket means it was closed
if not self.socket:
if _debug: TCPServer._debug(" - socket was closed")
else:
# send the data upstream
deferred(self.response, PDU(msg))
except socket.error, err:
if (err.args[0] == 111):
if _debug: TCPServer._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPServer._debug(" - recv socket error: %s", err)
# pass along to a handler
self.handle_error(err)
def writable(self):
return (len(self.request) != 0)
def handle_write(self):
if _debug: TCPServer._debug("handle_write")
try:
sent = self.send(self.request)
if _debug: TCPServer._debug(" - sent %d octets, %d remaining", sent, len(self.request) - sent)
self.request = self.request[sent:]
except socket.error, err:
if (err.args[0] == 111):
if _debug: TCPServer._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPServer._debug(" - send socket error: %s", err)
# pass along to a handler
self.handle_error(err)
def handle_close(self):
if _debug: TCPServer._debug("handle_close")
if not self:
if _debug: TCPServer._debug(" - self is None")
return
if not self.socket:
if _debug: TCPServer._debug(" - socket already closed")
return
self.close()
self.socket = None
def handle_error(self, error=None):
"""Trap for TCPServer errors, otherwise continue."""
if _debug: TCPServer._debug("handle_error %r", error)
# core does not take parameters
asyncore.dispatcher.handle_error(self)
def indication(self, pdu):
"""Requests are queued for delivery."""
if _debug: TCPServer._debug("indication %r", pdu)
self.request += pdu.pduData
bacpypes_debugging(TCPServer)
#
# TCPServerActor
#
class TCPServerActor(TCPServer):
def __init__(self, director, sock, peer):
if _debug: TCPServerActor._debug("__init__ %r %r %r", director, sock, peer)
TCPServer.__init__(self, sock, peer)
# keep track of the director
self.director = director
# add a timer
self.timeout = director.timeout
if self.timeout > 0:
self.timer = FunctionTask(self.idle_timeout)
self.timer.install_task(_time() + self.timeout)
else:
self.timer = None
# this may have a flush state
self.flushTask = None
# tell the director this is a new actor
self.director.add_actor(self)
def handle_error(self, error=None):
"""Trap for TCPServer errors, otherwise continue."""
if _debug: TCPServerActor._debug("handle_error %r", error)
# pass along to the director
if error is not None:
self.director.actor_error(self, error)
else:
TCPServer.handle_error(self)
def handle_close(self):
if _debug: TCPServerActor._debug("handle_close")
# if there's a flush task, cancel it
if self.flushTask:
self.flushTask.suspend_task()
# tell the director this is gone
self.director.del_actor(self)
# pass it down
TCPServer.handle_close(self)
def idle_timeout(self):
if _debug: TCPServerActor._debug("idle_timeout")
# shut it down
self.handle_close()
def indication(self, pdu):
if _debug: TCPServerActor._debug("indication %r", pdu)
# additional downstream data is tossed while flushing
if self.flushTask:
if _debug: TCPServerActor._debug(" - flushing")
return
# reschedule the timer
if self.timer:
self.timer.install_task(_time() + self.timeout)
# continue as usual
TCPServer.indication(self, pdu)
def response(self, pdu):
if _debug: TCPServerActor._debug("response %r", pdu)
# upstream data is tossed while flushing
if self.flushTask:
if _debug: TCPServerActor._debug(" - flushing")
return
# save the source
pdu.pduSource = self.peer
# reschedule the timer
if self.timer:
self.timer.install_task(_time() + self.timeout)
# process this as a response from the director
self.director.response(pdu)
def flush(self):
if _debug: TCPServerActor._debug("flush")
# clear out the old task
self.flushTask = None
# if the outgoing buffer has data, re-schedule another attempt
if self.request:
self.flushTask = OneShotFunction(self.flush)
return
# close up shop, all done
self.handle_close()
bacpypes_debugging(TCPServerActor)
#
# TCPPickleServerActor
#
class TCPPickleServerActor(PickleActorMixIn, TCPServerActor):
pass
#
# TCPServerDirector
#
class TCPServerDirector(asyncore.dispatcher, Server, ServiceAccessPoint, DebugContents):
_debug_contents = ('port', 'timeout', 'actorClass', 'servers')
def __init__(self, address, listeners=5, timeout=0, reuse=False, actorClass=TCPServerActor, cid=None, sapID=None):
if _debug:
TCPServerDirector._debug("__init__ %r listeners=%r timeout=%r reuse=%r actorClass=%r cid=%r sapID=%r"
, address, listeners, timeout, reuse, actorClass, cid, sapID
)
Server.__init__(self, cid)
ServiceAccessPoint.__init__(self, sapID)
# save the address and timeout
self.port = address
self.timeout = timeout
# check the actor class
if not issubclass(actorClass, TCPServerActor):
raise TypeError("actorClass must be a subclass of TCPServerActor")
self.actorClass = actorClass
# start with an empty pool of servers
self.servers = {}
# continue with initialization
asyncore.dispatcher.__init__(self)
# create a listening port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse:
self.set_reuse_addr()
# try to bind, keep trying for a while if its already in use
hadBindErrors = False
for i in range(30):
try:
self.bind(address)
break
except socket.error, err:
hadBindErrors = True
TCPServerDirector._warning('bind error %r, sleep and try again', err)
_sleep(REBIND_SLEEP_INTERVAL)
else:
TCPServerDirector._error('unable to bind')
raise RuntimeError("unable to bind")
# if there were some bind errors, generate a meesage that all is OK now
if hadBindErrors:
TCPServerDirector._info('bind successful')
self.listen(listeners)
def handle_accept(self):
if _debug: TCPServerDirector._debug("handle_accept")
try:
client, addr = self.accept()
except socket.error:
TCPServerDirector._warning('accept() threw an exception')
return
except TypeError:
TCPServerDirector._warning('accept() threw EWOULDBLOCK')
return
if _debug: TCPServerDirector._debug(" - connection %r, %r", client, addr)
# create a server
server = self.actorClass(self, client, addr)
# add it to our pool
self.servers[addr] = server
# return it to the dispatcher
return server
def handle_close(self):
if _debug: TCPServerDirector._debug("handle_close")
# close the socket
self.close()
def add_actor(self, actor):
if _debug: TCPServerDirector._debug("add_actor %r", actor)
self.servers[actor.peer] = actor
# tell the ASE there is a new server
if self.serviceElement:
self.sap_request(add_actor=actor)
def del_actor(self, actor):
if _debug: TCPServerDirector._debug("del_actor %r", actor)
try:
del self.servers[actor.peer]
except KeyError:
TCPServerDirector._warning("del_actor: %r not an actor", actor)
# tell the ASE the server has gone away
if self.serviceElement:
self.sap_request(del_actor=actor)
def actor_error(self, actor, error):
if _debug: TCPServerDirector._debug("actor_error %r %r", actor, error)
# tell the ASE the actor had an error
if self.serviceElement:
self.sap_request(actor_error=actor, error=error)
def get_actor(self, address):
""" Get the actor associated with an address or None. """
return self.servers.get(address, None)
def indication(self, pdu):
"""Direct this PDU to the appropriate server."""
if _debug: TCPServerDirector._debug("indication %r", pdu)
# get the destination
addr = pdu.pduDestination
# get the server
server = self.servers.get(addr, None)
if not server:
raise RuntimeError("not a connected server")
# pass the indication to the actor
server.indication(pdu)
bacpypes_debugging(TCPServerDirector)
#
# StreamToPacket
#
class StreamToPacket(Client, Server):
def __init__(self, fn, cid=None, sid=None):
if _debug: StreamToPacket._debug("__init__ %r cid=%r, sid=%r", fn, cid, sid)
Client.__init__(self, cid)
Server.__init__(self, sid)
# save the packet function
self.packetFn = fn
# start with an empty set of buffers
self.upstreamBuffer = {}
self.downstreamBuffer = {}
def packetize(self, pdu, streamBuffer):
if _debug: StreamToPacket._debug("packetize %r ...", pdu)
def chop(addr):
if _debug: StreamToPacket._debug("chop %r", addr)
# get the current downstream buffer
buff = streamBuffer.get(addr, '') + pdu.pduData
if _debug: StreamToPacket._debug(" - buff: %r", buff)
# look for a packet
while 1:
packet = self.packetFn(buff)
if _debug: StreamToPacket._debug(" - packet: %r", packet)
if packet is None:
break
yield PDU(packet[0],
source=pdu.pduSource,
destination=pdu.pduDestination,
user_data=pdu.pduUserData,
)
buff = packet[1]
# save what didn't get sent
streamBuffer[addr] = buff
# buffer related to the addresses
if pdu.pduSource:
for pdu in chop(pdu.pduSource):
yield pdu
if pdu.pduDestination:
for pdu in chop(pdu.pduDestination):
yield pdu
def indication(self, pdu):
"""Message going downstream."""
if _debug: StreamToPacket._debug("indication %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.downstreamBuffer):
self.request(packet)
def confirmation(self, pdu):
"""Message going upstream."""
if _debug: StreamToPacket._debug("StreamToPacket.confirmation %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.upstreamBuffer):
self.response(packet)
bacpypes_debugging(StreamToPacket)
#
# StreamToPacketSAP
#
class StreamToPacketSAP(ApplicationServiceElement, ServiceAccessPoint):
def __init__(self, stp, aseID=None, sapID=None):
if _debug: StreamToPacketSAP._debug("__init__ %r aseID=%r, sapID=%r", stp, aseID, sapID)
ApplicationServiceElement.__init__(self, aseID)
ServiceAccessPoint.__init__(self, sapID)
# save a reference to the StreamToPacket object
self.stp = stp
def indication(self, add_actor=None, del_actor=None, actor_error=None, error=None):
if _debug: StreamToPacketSAP._debug("indication add_actor=%r del_actor=%r", add_actor, del_actor)
if add_actor:
# create empty buffers associated with the peer
self.stp.upstreamBuffer[add_actor.peer] = ''
self.stp.downstreamBuffer[add_actor.peer] = ''
if del_actor:
# delete the buffer contents associated with the peer
del self.stp.upstreamBuffer[del_actor.peer]
del self.stp.downstreamBuffer[del_actor.peer]
# chain this along
if self.serviceElement:
self.sap_request(
add_actor=add_actor,
del_actor=del_actor,
actor_error=actor_error, error=error,
)
bacpypes_debugging(StreamToPacketSAP)
|
StarcoderdataPython
|
128822
|
<reponame>Aghassi/rules_spa
"""A macro for creating a webpack federation route module"""
load("@aspect_rules_swc//swc:swc.bzl", "swc")
# Defines this as an importable module area for shared macros and configs
def build_route(name, entry, srcs, data, webpack, federation_shared_config):
"""
Macro that allows easy composition of routes from a multi route spa
Args:
name: name of a route (route)
entry: the entry file to the route
srcs: source files to be transpiled and bundled
data: any dependencies the route needs to build
webpack: the webpack module to invoke. The users must provide their own load statement for webpack before this macro is called
federation_shared_config: a nodejs module file that exposes a map of dependencies to their shared module spec https://webpack.js.org/plugins/module-federation-plugin/#sharing-hints. An example of this is located within this repository under the private/webpack folder.
"""
build_name = name + "_route"
# list of all transpilation targets from SWC to be passed to webpack
deps = [
":transpile_" + files.replace("//", "").replace("/", "_").split(".")[0]
for files in srcs
] + data
# buildifier: disable=no-effect
[
swc(
name = "transpile_" + s.replace("//", "").replace("/", "_").split(".")[0],
args = [
"-C jsc.parser.jsx=true",
"-C jsc.parser.syntax=typescript",
"-C jsc.transform.react.runtime=automatic",
"-C jsc.transform.react.development=false",
"-C module.type=commonjs",
],
srcs = [s],
)
for s in srcs
]
route_config = Label("//spa/private/webpack:webpack.route.config.js")
webpack(
name = name,
args = [
"--env name=" + build_name,
"--env entry=./$(execpath :transpile_" + name + ")",
"--env SHARED_CONFIG=$(location %s)" % federation_shared_config,
"--env BAZEL_SRC_PATH=$(execpath :transpile_" + name + ")",
"--output-path=$(@D)",
"--config=$(rootpath %s)" % route_config,
],
data = [
route_config,
federation_shared_config,
Label("//spa/private/webpack:webpack.common.config.js"),
] + deps,
output_dir = True,
visibility = ["//src/client/routes:__pkg__"],
)
|
StarcoderdataPython
|
150970
|
# -*- coding: utf-8 -*-
def main():
n = int(input())
ans = set()
for i in range(n):
ai, bi = map(int, input().split())
if ai > bi:
ans.add((bi, ai))
else:
ans.add((ai, bi))
print(len(ans))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1685480
|
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test host_remote_agent.py."""
from absl.testing import absltest
from pysc2.env import host_remote_agent
from pysc2.lib import remote_controller
from pysc2.lib import run_parallel
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
NUM_MATCHES = 2
STEPS = 100
class TestHostRemoteAgent(utils.TestCase):
def testVsBot(self):
bot_first = True
for _ in range(NUM_MATCHES):
with host_remote_agent.VsBot() as game:
game.create_game(
map_name="Simple64",
bot_difficulty=sc_pb.VeryHard,
bot_first=bot_first)
controller = remote_controller.RemoteController(
host=game.host,
port=game.host_port)
join = sc_pb.RequestJoinGame(options=sc_pb.InterfaceOptions(raw=True))
join.race = sc_common.Random
controller.join_game(join)
for _ in range(STEPS):
controller.step()
response_observation = controller.observe()
if response_observation.player_result:
break
controller.leave()
controller.close()
bot_first = not bot_first
def testVsAgent(self):
parallel = run_parallel.RunParallel()
for _ in range(NUM_MATCHES):
with host_remote_agent.VsAgent() as game:
game.create_game("Simple64")
controllers = [
remote_controller.RemoteController(
host=host,
port=host_port)
for host, host_port in zip(game.hosts, game.host_ports)]
join = sc_pb.RequestJoinGame(options=sc_pb.InterfaceOptions(raw=True))
join.race = sc_common.Random
join.shared_port = 0
join.server_ports.game_port = game.lan_ports[0]
join.server_ports.base_port = game.lan_ports[1]
join.client_ports.add(
game_port=game.lan_ports[2],
base_port=game.lan_ports[3])
parallel.run((c.join_game, join) for c in controllers)
for _ in range(STEPS):
parallel.run(c.step for c in controllers)
response_observations = [c.observe() for c in controllers]
if response_observations[0].player_result:
break
parallel.run(c.leave for c in controllers)
parallel.run(c.close for c in controllers)
if __name__ == "__main__":
absltest.main()
|
StarcoderdataPython
|
3289452
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import ast
import pasta
import pytest
from sagemaker.cli.compatibility.v2.modifiers import serde
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call, ast_import
@pytest.mark.parametrize(
"src, expected",
[
("sagemaker.predictor._CsvSerializer()", True),
("sagemaker.predictor._JsonSerializer()", True),
("sagemaker.predictor._NpySerializer()", True),
("sagemaker.predictor._CsvDeserializer()", True),
("sagemaker.predictor.BytesDeserializer()", True),
("sagemaker.predictor.StringDeserializer()", True),
("sagemaker.predictor.StreamDeserializer()", True),
("sagemaker.predictor._NumpyDeserializer()", True),
("sagemaker.predictor._JsonDeserializer()", True),
("sagemaker.predictor.OtherClass()", False),
("sagemaker.amazon.common.numpy_to_record_serializer()", True),
("sagemaker.amazon.common.record_deserializer()", True),
("_CsvSerializer()", True),
("_JsonSerializer()", True),
("_NpySerializer()", True),
("_CsvDeserializer()", True),
("BytesDeserializer()", True),
("StringDeserializer()", True),
("StreamDeserializer()", True),
("_NumpyDeserializer()", True),
("_JsonDeserializer()", True),
("numpy_to_record_serializer()", True),
("record_deserializer()", True),
("OtherClass()", False),
],
)
def test_constructor_node_should_be_modified(src, expected):
modifier = serde.SerdeConstructorRenamer()
node = ast_call(src)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"src, expected",
[
("sagemaker.predictor._CsvSerializer()", "serializers.CSVSerializer()"),
("sagemaker.predictor._JsonSerializer()", "serializers.JSONSerializer()"),
("sagemaker.predictor._NpySerializer()", "serializers.NumpySerializer()"),
("sagemaker.predictor._CsvDeserializer()", "deserializers.CSVDeserializer()"),
("sagemaker.predictor.BytesDeserializer()", "deserializers.BytesDeserializer()"),
("sagemaker.predictor.StringDeserializer()", "deserializers.StringDeserializer()",),
("sagemaker.predictor.StreamDeserializer()", "deserializers.StreamDeserializer()",),
("sagemaker.predictor._NumpyDeserializer()", "deserializers.NumpyDeserializer()"),
("sagemaker.predictor._JsonDeserializer()", "deserializers.JSONDeserializer()"),
(
"sagemaker.amazon.common.numpy_to_record_serializer()",
"sagemaker.amazon.common.RecordSerializer()",
),
(
"sagemaker.amazon.common.record_deserializer()",
"sagemaker.amazon.common.RecordDeserializer()",
),
("_CsvSerializer()", "serializers.CSVSerializer()"),
("_JsonSerializer()", "serializers.JSONSerializer()"),
("_NpySerializer()", "serializers.NumpySerializer()"),
("_CsvDeserializer()", "deserializers.CSVDeserializer()"),
("BytesDeserializer()", "deserializers.BytesDeserializer()"),
("StringDeserializer()", "deserializers.StringDeserializer()"),
("StreamDeserializer()", "deserializers.StreamDeserializer()"),
("_NumpyDeserializer()", "deserializers.NumpyDeserializer()"),
("_JsonDeserializer()", "deserializers.JSONDeserializer()"),
("numpy_to_record_serializer()", "RecordSerializer()"),
("record_deserializer()", "RecordDeserializer()"),
],
)
def test_constructor_modify_node(src, expected):
modifier = serde.SerdeConstructorRenamer()
node = ast_call(src)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
assert isinstance(modified_node, ast.Call)
@pytest.mark.parametrize(
"src, expected",
[
("sagemaker.predictor.csv_serializer", True,),
("sagemaker.predictor.json_serializer", True,),
("sagemaker.predictor.npy_serializer", True,),
("sagemaker.predictor.csv_deserializer", True,),
("sagemaker.predictor.json_deserializer", True,),
("sagemaker.predictor.numpy_deserializer", True,),
("csv_serializer", True,),
("json_serializer", True,),
("npy_serializer", True,),
("csv_deserializer", True,),
("json_deserializer", True,),
("numpy_deserializer", True,),
],
)
def test_name_node_should_be_modified(src, expected):
modifier = serde.SerdeObjectRenamer()
node = ast_call(src)
assert modifier.node_should_be_modified(node) is True
@pytest.mark.parametrize(
"src, expected",
[
("sagemaker.predictor.csv_serializer", "serializers.CSVSerializer()"),
("sagemaker.predictor.json_serializer", "serializers.JSONSerializer()"),
("sagemaker.predictor.npy_serializer", "serializers.NumpySerializer()"),
("sagemaker.predictor.csv_deserializer", "deserializers.CSVDeserializer()"),
("sagemaker.predictor.json_deserializer", "deserializers.JSONDeserializer()"),
("sagemaker.predictor.numpy_deserializer", "deserializers.NumpyDeserializer()"),
("csv_serializer", "serializers.CSVSerializer()"),
("json_serializer", "serializers.JSONSerializer()"),
("npy_serializer", "serializers.NumpySerializer()"),
("csv_deserializer", "deserializers.CSVDeserializer()"),
("json_deserializer", "deserializers.JSONDeserializer()"),
("numpy_deserializer", "deserializers.NumpyDeserializer()"),
],
)
def test_name_modify_node(src, expected):
modifier = serde.SerdeObjectRenamer()
node = ast_call(src)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
assert isinstance(modified_node, ast.Call)
@pytest.mark.parametrize(
"src, expected",
[
("from sagemaker.predictor import _CsvSerializer", True),
("from sagemaker.predictor import _JsonSerializer", True),
("from sagemaker.predictor import _NpySerializer", True),
("from sagemaker.predictor import _CsvDeserializer", True),
("from sagemaker.predictor import BytesDeserializer", True),
("from sagemaker.predictor import StringDeserializer", True),
("from sagemaker.predictor import StreamDeserializer", True),
("from sagemaker.predictor import _NumpyDeserializer", True),
("from sagemaker.predictor import _JsonDeserializer", True),
("from sagemaker.predictor import csv_serializer", True),
("from sagemaker.predictor import json_serializer", True),
("from sagemaker.predictor import npy_serializer", True),
("from sagemaker.predictor import csv_deserializer", True),
("from sagemaker.predictor import json_deserializer", True),
("from sagemaker.predictor import numpy_deserializer", True),
("from sagemaker.predictor import RealTimePredictor, _CsvSerializer", True),
("from sagemaker.predictor import RealTimePredictor", False),
("from sagemaker.amazon.common import numpy_to_record_serializer", False),
],
)
def test_import_from_predictor_node_should_be_modified(src, expected):
modifier = serde.SerdeImportFromPredictorRenamer()
node = ast_import(src)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"src, expected",
[
("from sagemaker.predictor import _CsvSerializer", None),
("from sagemaker.predictor import _JsonSerializer", None),
("from sagemaker.predictor import _NpySerializer", None),
("from sagemaker.predictor import _CsvDeserializer", None),
("from sagemaker.predictor import BytesDeserializer", None),
("from sagemaker.predictor import StringDeserializer", None),
("from sagemaker.predictor import StreamDeserializer", None),
("from sagemaker.predictor import _NumpyDeserializer", None),
("from sagemaker.predictor import _JsonDeserializer", None),
("from sagemaker.predictor import csv_serializer", None),
("from sagemaker.predictor import json_serializer", None),
("from sagemaker.predictor import npy_serializer", None),
("from sagemaker.predictor import csv_deserializer", None),
("from sagemaker.predictor import json_deserializer", None),
("from sagemaker.predictor import numpy_deserializer", None),
(
"from sagemaker.predictor import RealTimePredictor, _NpySerializer",
"from sagemaker.predictor import RealTimePredictor",
),
],
)
def test_import_from_predictor_modify_node(src, expected):
modifier = serde.SerdeImportFromPredictorRenamer()
node = ast_import(src)
modified_node = modifier.modify_node(node)
assert expected == (modified_node and pasta.dump(modified_node))
@pytest.mark.parametrize(
"import_statement, expected",
[
("from sagemaker.amazon.common import numpy_to_record_serializer", True),
("from sagemaker.amazon.common import record_deserializer", True),
("from sagemaker.amazon.common import write_spmatrix_to_sparse_tensor", False),
],
)
def test_import_from_amazon_common_node_should_be_modified(import_statement, expected):
modifier = serde.SerdeImportFromAmazonCommonRenamer()
node = ast_import(import_statement)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"import_statement, expected",
[
(
"from sagemaker.amazon.common import numpy_to_record_serializer",
"from sagemaker.amazon.common import RecordSerializer",
),
(
"from sagemaker.amazon.common import record_deserializer",
"from sagemaker.amazon.common import RecordDeserializer",
),
(
"from sagemaker.amazon.common import numpy_to_record_serializer, record_deserializer",
"from sagemaker.amazon.common import RecordSerializer, RecordDeserializer",
),
(
"from sagemaker.amazon.common import write_spmatrix_to_sparse_tensor, numpy_to_record_serializer",
"from sagemaker.amazon.common import write_spmatrix_to_sparse_tensor, RecordSerializer",
),
],
)
def test_import_from_amazon_common_modify_node(import_statement, expected):
modifier = serde.SerdeImportFromAmazonCommonRenamer()
node = ast_import(import_statement)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
@pytest.mark.parametrize(
"src, expected",
[
("serializers.CSVSerializer()", True),
("serializers.JSONSerializer()", True),
("serializers.NumpySerializer()", True),
("pass", False),
],
)
def test_serializer_module_node_should_be_modified(src, expected):
modifier = serde.SerializerImportInserter()
node = pasta.parse(src)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"src, expected",
[
(
"serializers.CSVSerializer()",
"from sagemaker import serializers\nserializers.CSVSerializer()",
),
(
"serializers.JSONSerializer()",
"from sagemaker import serializers\nserializers.JSONSerializer()",
),
(
"serializers.NumpySerializer()",
"from sagemaker import serializers\nserializers.NumpySerializer()",
),
(
"pass\nimport random\nserializers.CSVSerializer()",
"pass\nfrom sagemaker import serializers\nimport random\nserializers.CSVSerializer()",
),
],
)
def test_serializer_module_modify_node(src, expected):
modifier = serde.SerializerImportInserter()
node = pasta.parse(src)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
@pytest.mark.parametrize(
"src, expected",
[
("deserializers.CSVDeserializer()", True),
("deserializers.BytesDeserializer()", True),
("deserializers.StringDeserializer()", True),
("deserializers.StreamDeserializer()", True),
("deserializers.NumpyDeserializer()", True),
("deserializers.JSONDeserializer()", True),
("pass", False),
],
)
def test_deserializer_module_node_should_be_modified(src, expected):
modifier = serde.DeserializerImportInserter()
node = pasta.parse(src)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"src, expected",
[
(
"deserializers.CSVDeserializer()",
"from sagemaker import deserializers\ndeserializers.CSVDeserializer()",
),
(
"deserializers.BytesDeserializer()",
"from sagemaker import deserializers\ndeserializers.BytesDeserializer()",
),
(
"deserializers.StringDeserializer()",
"from sagemaker import deserializers\ndeserializers.StringDeserializer()",
),
(
"deserializers.StreamDeserializer()",
"from sagemaker import deserializers\ndeserializers.StreamDeserializer()",
),
(
"deserializers.NumpyDeserializer()",
"from sagemaker import deserializers\ndeserializers.NumpyDeserializer()",
),
(
"deserializers.JSONDeserializer()",
"from sagemaker import deserializers\ndeserializers.JSONDeserializer()",
),
(
"pass\nimport random\ndeserializers.CSVDeserializer()",
"pass\nfrom sagemaker import deserializers\nimport random\ndeserializers.CSVDeserializer()",
),
],
)
def test_deserializer_module_modify_node(src, expected):
modifier = serde.DeserializerImportInserter()
node = pasta.parse(src)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
@pytest.mark.parametrize(
"src, expected",
[
('estimator.create_model(entry_point="inference.py")', False),
("estimator.create_model(serializer=CSVSerializer())", True),
("estimator.create_model(deserializer=CSVDeserializer())", True),
(
"estimator.create_model(serializer=CSVSerializer(), deserializer=CSVDeserializer())",
True,
),
("estimator.deploy(serializer=CSVSerializer())", False),
],
)
def test_create_model_call_node_should_be_modified(src, expected):
modifier = serde.SerdeKeywordRemover()
node = ast_call(src)
assert modifier.node_should_be_modified(node) is expected
@pytest.mark.parametrize(
"src, expected",
[
(
'estimator.create_model(entry_point="inference.py", serializer=CSVSerializer())',
'estimator.create_model(entry_point="inference.py")',
),
(
'estimator.create_model(entry_point="inference.py", deserializer=CSVDeserializer())',
'estimator.create_model(entry_point="inference.py")',
),
],
)
def test_create_model_call_modify_node(src, expected):
modifier = serde.SerdeKeywordRemover()
node = ast_call(src)
modified_node = modifier.modify_node(node)
assert expected == pasta.dump(modified_node)
|
StarcoderdataPython
|
1663428
|
<reponame>andela/waitress<gh_stars>1-10
def serialize_meal(payload):
return dict(
date=str(payload[0]),
user_id=payload[2],
first_name=payload[3],
last_name=payload[4],
breakfast=payload[6],
lunch=payload[7],
)
|
StarcoderdataPython
|
3256227
|
from tqdm import tqdm
from itertools import chain
from torch.nn import BCEWithLogitsLoss
from torch_geometric.data import DataLoader
from sklearn.metrics import roc_auc_score, average_precision_score
from tensorboardX import SummaryWriter
import os
from models import DGCNN
import argparse
import os.path as osp
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.sparse.csgraph import shortest_path
import torch
import torch.nn.functional as F
# from gae_huawei import train, test
from models import GCN, SGC, GAT
from torch_geometric.nn import GAE
from torch_geometric.data import Data
from torch_geometric.datasets import Planetoid
from torch_geometric.utils import (negative_sampling, add_self_loops,
train_test_split_edges, k_hop_subgraph,
to_scipy_sparse_matrix)
import warnings
warnings.filterwarnings("ignore")
max_z = 0
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_data(path):
alarm_graph = np.load(path, allow_pickle=True)
node_list = list(alarm_graph.nodes)
edge_tmp = []
for edge in list(alarm_graph.edges):
if edge[0] != edge[1]:
edge_tmp.append(edge) # 这一步是为了保证没有self-loops
edge_list = []
for i in range(len(edge_tmp)):
a = node_list.index(edge_tmp[i][0])
b = node_list.index(edge_tmp[i][1])
edge_list.append([a, b])
alarm_names = []
for ne_name in list(alarm_graph.nodes):
for alarm in alarm_graph.nodes[ne_name].keys():
if alarm != 'NE_TYPE' and alarm not in alarm_names:
alarm_names.append(alarm)
labels = np.zeros([len(node_list), 3])
for i in range(len(alarm_graph.nodes)):
if alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'NODEB':
labels[i][0] = 1
elif alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'MICROWAVE':
labels[i][1] = 1
elif alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'ROUTER':
labels[i][2] = 1
label_list = []
for i in range(len(alarm_graph.nodes)):
if alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'NODEB':
label_list.append(1)
elif alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'MICROWAVE':
label_list.append(2)
elif alarm_graph.nodes[list(alarm_graph.nodes)[i]]['NE_TYPE'] == 'ROUTER':
label_list.append(3)
attribute_length = len(alarm_names)
num_of_nodes = len(alarm_graph.nodes)
attribute_one_hot = np.zeros([num_of_nodes, attribute_length])
# one-hot
for i in range(len(alarm_graph.nodes)):
for alarm in alarm_graph.nodes[list(alarm_graph.nodes)[i]].keys():
if alarm != 'NE_TYPE':
attribute_one_hot[i][alarm_names.index(alarm)] = 1
return node_list, edge_list, attribute_one_hot, labels, label_list
def drnl_node_labeling(edge_index, src, dst, num_nodes=None):
global max_z
# Double-radius node labeling (DRNL).
src, dst = (dst, src) if src > dst else (src, dst)
adj = to_scipy_sparse_matrix(edge_index, num_nodes=num_nodes).tocsr()
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
dist2src = shortest_path(adj_wo_dst, directed=False, unweighted=True,
indices=src)
dist2src = np.insert(dist2src, dst, 0, axis=0)
dist2src = torch.from_numpy(dist2src)
dist2dst = shortest_path(adj_wo_src, directed=False, unweighted=True,
indices=dst - 1)
dist2dst = np.insert(dist2dst, src, 0, axis=0)
dist2dst = torch.from_numpy(dist2dst)
dist = dist2src + dist2dst
dist_over_2, dist_mod_2 = dist // 2, dist % 2
z = 1 + torch.min(dist2src, dist2dst)
z += dist_over_2 * (dist_over_2 + dist_mod_2 - 1)
z[src] = 1.
z[dst] = 1.
z[torch.isnan(z)] = 0.
max_z = max(int(z.max()), max_z)
return z.to(torch.long)
def extract_enclosing_subgraphs(data, link_index, edge_index, y):
data_list = []
for src, dst in tqdm(link_index.t().tolist(), desc='Extracting...'):
# src: source dst: destination
sub_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(
[src, dst], num_hops=2, edge_index=edge_index, relabel_nodes=True
)
src, dst = mapping.tolist()
# remove target link from the subgraph
mask1 = (sub_edge_index[0] != src) | (sub_edge_index[1] != dst)
mask2 = (sub_edge_index[0] != dst) | (sub_edge_index[1] != src)
sub_edge_index = sub_edge_index[:, mask1 & mask2]
# calculate node labeling
z = drnl_node_labeling(sub_edge_index, src, dst, num_nodes=sub_nodes.size(0))
sub_data = Data(x=data.x[sub_nodes], z=z, edge_index=sub_edge_index, y=y)
if 'pretrained_features' in data.keys:
sub_data.pretrained_features = data.pretrained_features[sub_nodes]
if 'alarm_features' in data.keys:
sub_data.alarm_features = data.alarm_features[sub_nodes]
data_list.append(sub_data)
return data_list
def load_huawei_dataset():
path = '../data/alarm_project_hitsz/preprocessed/G'
nodes, edge_list, attribute, node_labels, labels = load_data(path)
dataset = Data(x=torch.tensor(attribute, dtype=torch.float),
edge_index=torch.tensor(edge_list, dtype=torch.long).t().contiguous(),
y=torch.tensor(node_labels, dtype=torch.float), labels=labels)
return dataset
def load_disease_dataset():
path = '../data/disease_lp/'
edges = pd.read_csv(path + 'disease_lp.edges.csv')
labels = np.load(path + 'disease_lp.labels.npy')
features = sp.load_npz(path + 'disease_lp.feats.npz').todense()
dataset = Data(
x=torch.tensor(features, dtype=torch.float),
edge_index=torch.tensor(edges.values).t().contiguous(),
y=F.one_hot(torch.tensor(labels))
)
return dataset
def load_cora_dataset():
path = osp.join(osp.dirname(osp.relpath('seal_dataset.py')), '..', 'data', 'Planetoid')
dataset = Planetoid(path, 'Cora')[0]
dataset.y = F.one_hot(dataset.y).to(torch.float)
# 这里做了一个处理,将标签转化维one-hot向量
return dataset
def pre_train(model, data, optimizer):
model.train()
optimizer.zero_grad()
z = model.encode(data.x, data.train_pos_edge_index)
loss = model.recon_loss(z, data.train_pos_edge_index)
loss.backward()
optimizer.step()
return float(loss), z
def pre_test(model, x, train_pos_edge_index, pos_edge_index, neg_edge_index):
model.eval()
with torch.no_grad():
z = model.encode(x, train_pos_edge_index)
return model.test(z, pos_edge_index, neg_edge_index)
def process(args):
seed = 327
set_seed(seed)
print('Loading dataset~~~')
if args.dataset == 'huawei':
dataset = load_huawei_dataset()
if args.use_alarm:
alarm_feature_path = '../data/alarm_construct_graph/embedding_10.pt'
dataset.alarm_features = torch.load(alarm_feature_path)
elif args.dataset == 'disease':
dataset = load_disease_dataset()
elif args.dataset == 'cora':
dataset = load_cora_dataset()
else:
raise ValueError("Invalid dataset type")
data = train_test_split_edges(dataset, val_ratio=args.val_ratio, test_ratio=args.test_ratio)
# =============================================================================================
# the flowing step is just for huawei dataset
if args.dataset == 'huawei' and args.hierarchical:
# data.val_pos_edge_index
edge_index = data.val_pos_edge_index.t().tolist()
mask = [False] * len(edge_index)
for i in range(len(edge_index)):
if (data.labels[edge_index[i][0]] == 1 and data.labels[edge_index[i][1]] != 1) \
or (data.labels[edge_index[i][0]] != 1 and data.labels[edge_index[i][1]] == 1):
mask[i] = True
data.val_pos_edge_index = torch.tensor(edge_index, dtype=torch.long)[mask].t().contiguous()
# data.val_neg_edge_index
edge_index = data.val_neg_edge_index.t().tolist()
mask = [False] * len(edge_index)
for i in range(len(edge_index)):
if (data.labels[edge_index[i][0]] == 1 and data.labels[edge_index[i][1]] != 1) \
or (data.labels[edge_index[i][0]] != 1 and data.labels[edge_index[i][1]] == 1):
mask[i] = True
data.val_neg_edge_index = torch.tensor(edge_index, dtype=torch.long)[mask].t().contiguous()
# data.test_neg_edge_index
edge_index = data.test_pos_edge_index.t().tolist()
mask = [False] * len(edge_index)
for i in range(len(edge_index)):
if (data.labels[edge_index[i][0]] == 1 and data.labels[edge_index[i][1]] != 1) \
or (data.labels[edge_index[i][0]] != 1 and data.labels[edge_index[i][1]] == 1):
mask[i] = True
data.test_pos_edge_index = torch.tensor(edge_index, dtype=torch.long)[mask].t().contiguous()
# data.test_neg_edge_index
edge_index = data.test_neg_edge_index.t().tolist()
mask = [False] * len(edge_index)
for i in range(len(edge_index)):
if (data.labels[edge_index[i][0]] == 1 and data.labels[edge_index[i][1]] != 1) \
or (data.labels[edge_index[i][0]] != 1 and data.labels[edge_index[i][1]] == 1):
mask[i] = True
data.test_neg_edge_index = torch.tensor(edge_index, dtype=torch.long)[mask].t().contiguous()
# =================================================================================================
edge_index, _ = add_self_loops(data.train_pos_edge_index)
data.train_neg_edge_index = negative_sampling(
edge_index=edge_index,
num_nodes=data.num_nodes,
num_neg_samples=data.train_pos_edge_index.size(1)
)
print('The dataset and the split edges are done!!!')
if args.pretrain:
pretrained_data = data.clone()
pretrained_data.train_pos_edge_index = torch.cat(
(pretrained_data.train_pos_edge_index, pretrained_data.train_neg_edge_index), dim=1
)
pretrained_data.train_neg_edge_index = None
# 这个地方如果选择pretrain,应该采取negative injection的方式,重新训练得到特征
print('-' * 60)
print('Pretraining')
if args.pre_encoder == 'GCN':
pre_model = GAE(GCN(dataset.num_features, 32))
else:
raise ValueError('Invalid model type!')
optimizer = torch.optim.Adam(pre_model.parameters(), lr=0.001)
best_auc = 0
patience = 0
for pretrained_epoch in range(1, args.pretrained_epochs):
train_loss, node_embedding = pre_train(pre_model, pretrained_data, optimizer)
val_auc, val_ap = pre_test(pre_model, data.x, data.train_pos_edge_index,
data.val_pos_edge_index, data.val_neg_edge_index)
print(f"Epoch: {pretrained_epoch:03d}, Loss: {train_loss:.4f}, Val_AUC: {val_auc:.4f}, Val_AP: {val_ap:.4f}")
if val_auc > best_auc:
best_auc = val_auc
patience = 0
else:
patience += 1
if patience > args.patience:
break
print('-' * 60)
print('Finished pretraining')
data.pretrained_features = node_embedding.detach()
if args.embedding == 'DRNL':
pass
else:
data.x = data.y
print('Starting extracting subgraphs~~~')
# collect a list of subgraphs of training, validation and test
train_pos_list = extract_enclosing_subgraphs(
data, data.train_pos_edge_index, data.train_pos_edge_index, 1
)
train_neg_list = extract_enclosing_subgraphs(
data, data.train_neg_edge_index, data.train_pos_edge_index, 0
)
val_pos_list = extract_enclosing_subgraphs(
data, data.val_pos_edge_index, data.train_pos_edge_index, 1
)
val_neg_list = extract_enclosing_subgraphs(
data, data.val_neg_edge_index, data.train_pos_edge_index, 0
)
test_pos_list = extract_enclosing_subgraphs(
data, data.test_pos_edge_index, data.train_pos_edge_index, 1
)
test_neg_list = extract_enclosing_subgraphs(
data, data.test_neg_edge_index, data.train_pos_edge_index, 0
)
print('Finished extracting subgraphs.')
if args.embedding == 'DRNL':
# convert labels to one-hot features
for data in chain(train_pos_list, train_neg_list,
val_pos_list, val_neg_list,
test_pos_list, test_neg_list):
data.x = F.one_hot(data.z, max_z + 1).to(torch.float)
elif args.embedding == 'DRNL_SelfFeat':
for data in chain(train_pos_list, train_neg_list,
val_pos_list, val_neg_list,
test_pos_list, test_neg_list):
data.x = torch.cat((F.one_hot(data.z, max_z + 1).to(torch.float), data.x), dim=1)
elif args.embedding == 'SelfFeat':
pass
else:
raise ValueError("Unsupported embedding type.")
if args.pretrain:
for data in chain(train_pos_list, train_neg_list,
val_pos_list, val_neg_list,
test_pos_list, test_neg_list):
data.x = torch.cat((data.x, data.pretrained_features), dim=1)
data.pretrained_features = None
if args.use_alarm:
for data in chain(train_pos_list, train_neg_list,
val_pos_list, val_neg_list,
test_pos_list, test_neg_list):
data.x = torch.cat((data.x, data.alarm_features), dim=1)
data.alarm_features = None
return train_pos_list + train_neg_list, val_pos_list + val_neg_list, test_pos_list + test_neg_list
def train(model, train_loader, device, optimizer, train_dataset):
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
logits = model(data.x, data.edge_index, data.batch)
loss = BCEWithLogitsLoss()(logits.view(-1), data.y.to(torch.float))
loss.backward()
optimizer.step()
total_loss += loss.item() * data.num_graphs
return total_loss / len(train_dataset)
@torch.no_grad()
def test(loader, model, device):
model.eval()
y_pred, y_true = [], []
for data in loader:
data = data.to(device)
logits = model(data.x, data.edge_index, data.batch)
y_pred.append(logits.view(-1).cpu())
y_true.append(data.y.view(-1).cpu().to(torch.float))
return roc_auc_score(torch.cat(y_true), torch.cat(y_pred)), \
average_precision_score(torch.cat(y_true), torch.cat(y_pred))
def run():
parser = argparse.ArgumentParser("Configurations for seal")
parser.add_argument('--dataset', default='huawei', type=str, help='dataset')
parser.add_argument('--embedding', default='DRNL', type=str,
help='node encoding(["DRNL", "DRNL_SelfFeat", "SelfFeat"])')
parser.add_argument('--epochs', default=101, type=int, help='training epochs')
parser.add_argument('--cuda', default=torch.cuda.is_available(), type=bool, help='cuda')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--val_ratio', default=0.05, type=float)
parser.add_argument('--test_ratio', default=0.10, type=float)
parser.add_argument('--batch_size', default=32, type=int, help='batch size')
parser.add_argument('--pretrain', action='store_true')
parser.add_argument('--pretrained_epochs', default=401, type=int)
parser.add_argument('--pre_encoder', default='GCN', type=str, choices=['GCN'])
parser.add_argument('--patience', default=50, type=int, help='early stop steps')
parser.add_argument('--use_alarm', action='store_true')
parser.add_argument('--hierarchical', action='store_true')
args = parser.parse_args()
print(args)
args.split_ratio = str(int((1-args.val_ratio-args.test_ratio)*100)) \
+ str(int(args.val_ratio*100)) + str(int(args.test_ratio*100))
train_dataset, val_dataset, test_dataset = process(args)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
device = torch.device('cuda:1' if args.cuda else 'cpu')
model = DGCNN(train_dataset, hidden_channels=32, num_layers=3).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
trainWriter = SummaryWriter('../{}/{}/{}/{}/{}'.format(
'runs', 'SEAL', args.dataset + '_' + args.split_ratio + '_pretrained_' + str(args.pretrain),
args.embedding, 'Train'
))
valWriter = SummaryWriter('../{}/{}/{}/{}/{}'.format(
'runs', 'SEAL', args.dataset + '_' + args.split_ratio + '_pretrained_' + str(args.pretrain),
args.embedding, 'Val'
))
best_val_auc = test_auc = test_ap = 0
for epoch in range(1, args.epochs):
loss = train(model, train_loader, device, optimizer, train_dataset)
trainWriter.add_scalar(tag='Train Loss', scalar_value=loss, global_step=epoch)
val_auc, val_ap = test(val_loader, model, device)
valWriter.add_scalar(tag='Val AUC', scalar_value=val_auc, global_step=epoch)
valWriter.add_scalar(tag='Val AP', scalar_value=val_ap, global_step=epoch)
if val_auc > best_val_auc:
best_val_auc = val_auc
test_auc, test_ap = test(test_loader, model, device)
# saving model parameters
state = {'model': model.state_dict(), 'auc': test_auc, 'ap': test_ap, 'epoch': epoch}
save_path = '../checkpoint/SEAL/'
if not osp.exists(save_path):
os.mkdir(save_path)
torch.save(state, osp.join(save_path, args.dataset+'-'+args.split_ratio+'-'+args.embedding+'-'+'ckpt.pth'))
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val_AUC: {val_auc:.4f}, Val_AP: {val_ap:.4f},'
f'Test_AUC: {test_auc:.4f}, Test_AP: {test_ap:.4f}')
if __name__ == "__main__":
run()
|
StarcoderdataPython
|
3295883
|
<reponame>Cli212/Lightweight-Language-Models-to-Generate<filename>prado/model.py
import torch
from typing import Any, Dict, List
import math
import warnings
from functools import partial
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
# from pytorch_lightning.metrics.functional import accuracy, auroc
# from pytorch_lightning.metrics.functional import f1 as f1_score
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.optim.lr_scheduler import ReduceLROnPlateau
try:
from torchqrnn import QRNN
except ImportError:
print("Import QRNN from torchqrnn fail")
from torch.nn import LSTM as QRNN
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class PQRNN(pl.LightningModule):
def __init__(
self,
b: int = 512,
d: int = 64,
num_layers: int = 4,
fc_sizes: List[int] = None,
output_size: int = 2,
lr: float = 0.025,
dropout: float = 0.5,
rnn_type: str = "LSTM",
multilabel: bool = False,
nhead: int = 8,
):
super().__init__()
if fc_sizes is None:
fc_sizes = [128, 64]
self.hyparams: Dict[str, Any] = {
"b": b,
"d": d,
"fc_size": fc_sizes,
"lr": lr,
"output_size": output_size,
"dropout": dropout,
"rnn_type": rnn_type.upper(),
"multilabel": multilabel,
"nhead": nhead,
"n_layers": num_layers
}
layers: List[nn.Module] = []
for x, y in zip([d] + fc_sizes, fc_sizes + [output_size]):
layers.append(nn.ReLU())
layers.append(nn.Linear(x, y))
self.tanh = nn.Hardtanh()
if self.hyparams["rnn_type"] in {"LSTM", "GRU", "QRNN"}:
self.hidden = {
"LSTM": partial(nn.LSTM, bidirectional=True, batch_first=True, num_layers=self.hyparams['n_layers']),
"GRU": partial(nn.GRU, bidirectional=True, batch_first=True, num_layers=self.hyparams['n_layers']),
"QRNN": QRNN,
}[self.hyparams["rnn_type"]](
b*2, d, num_layers=num_layers, dropout=dropout
)
else:
self.pos_encoder = PositionalEncoding(d_model=b, dropout=dropout)
encoder_layers = TransformerEncoderLayer(
d_model=b, nhead=nhead, dropout=dropout
)
self.hidden = TransformerEncoder(
encoder_layers, num_layers=num_layers
)
self.linear = nn.Linear(b, d)
self.output = nn.ModuleList(layers)
self.loss = (
nn.CrossEntropyLoss()
if not self.hyparams["multilabel"]
else nn.BCEWithLogitsLoss()
)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def forward(self, projection, hidden=None):
features = self.tanh(projection)
# features = features.transpose(0, 1)
if self.hyparams["rnn_type"] in {"LSTM", "GRU", "QRNN"}:
output, hidden = self.hidden(features, hidden)
# if self.hyparams["rnn_type"] != "QRNN":
# output = (
# output.T[..., : output.shape[-1] // 2]
# + output[..., output.shape[-1] // 2 :]
# )
else:
features = features * math.sqrt(self.hyparams["b"])
features = self.pos_encoder(features)
output = self.hidden(
features,
self.generate_square_subsequent_mask(features.size(0)).to(
features.device
),
)
output = self.linear(output)
# Sum bidirectional GRU outputs
output = output[:, :, :self.hyparams['d']] + output[:, :, self.hyparams['d']:]
# Return output and final hidden state
return output, hidden
def training_step(self, batch, batch_idx):
projection, _, labels = batch
logits = self.forward(projection)
self.log(
"loss",
self.loss(
logits,
labels.type(
logits.dtype if self.hyparams["multilabel"] else labels.dtype
),
)
.detach()
.cpu()
.item(),
)
return {
"loss": self.loss(
logits,
labels.type(
logits.dtype if self.hyparams["multilabel"] else labels.dtype
),
)
}
def validation_step(self, batch, batch_idx):
projection, _, labels = batch
logits = self.forward(projection)
return {"logits": logits, "labels": labels}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hyparams["lr"])
scheduler = ReduceLROnPlateau(optimizer, "min")
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "val_loss",
}
|
StarcoderdataPython
|
3222127
|
<filename>vue/product_frames/new_product_frame.py
from tkinter import *
from vue.product_frames.product_formular_frame import ProductFormularFrame
from exceptions import Error
class NewProductFrame(ProductFormularFrame):
def __init__(self, product_controller, master=None):
super().__init__(master)
self._product_controller = product_controller
def create_widgets(self):
super().create_widgets()
self.valid = Button(self, text="valid", fg="red",
command=self.valid)
self.cancel = Button(self, text="cancel", fg="red",
command=self.back)
self.valid.grid(row=20, column=1, sticky=E)
self.cancel.grid(row=20, column=2, sticky=W)
def valid(self):
data = self.get_data()
try:
product_data = self._product_controller.create_product(data)
messagebox.showinfo("Success", "Product %s created !" % product_data['name'])
except Error as e:
messagebox.showerror("Error", str(e))
return
self.back()
|
StarcoderdataPython
|
111296
|
<filename>ydlg/main.py
from flask import Blueprint, render_template, request, Response,redirect,url_for
import os
from flask_login import login_user, login_required, current_user
from .utils import *
import youtube_dl
main = Blueprint('main', __name__)
default_download_directory = ""
try:
default_download_directory = os.environ["YDLG_DOWNLOAD_FOLDER"]
except Exception as e:
pass
if default_download_directory == "":
default_download_directory = "./Downloads"
@main.route('/')
@login_required
def index():
return render_template( 'download.html')
@main.route('/download', methods=['POST'])
@login_required
def downloadYoutubeLink():
url = request.form.get("URL")
if not validateYoutubeURL(url):
return render_template('download.html', message="Link it's not valid")
ydl_opts = {
'outtmpl': default_download_directory + '/%(title)s.%(ext)s',
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
return render_template('download.html', message="Done downloading")
|
StarcoderdataPython
|
182057
|
<reponame>respeaker/mycroft_runner_simple
from .runner import PreciseRunner, PreciseEngine, ReadWriteStream
__version__ = '0.3.3'
|
StarcoderdataPython
|
1719064
|
from django.shortcuts import render
# Create your views here.
def landing(request):
return render(request,'login/landing.html')
|
StarcoderdataPython
|
1777399
|
import pandas as pd
from DataHandler.mongoObjects import CollectionManager
from datetime import date
from datetime import datetime as dt
import datedelta
import numpy as np
features = ['Asset Growth', 'Book Value per Share Growth', 'Debt Growth', 'Dividends per Basic Common Share Growth',
'EBIT Growth', 'EPS Diluted Growth', 'EPS Growth', 'Gross Profit Growth', 'Inventory Growth',
'Net Income Growth',
'Operating Cash Flow Growth', 'Trade and Non-Trade Receivables Growth']
def add_fundamentals_to_db():
"""
Adds the fundamental data to the database from a json file
:return:None
"""
fundFile = 'sectorAnalysis/fundamentals/combinedFundamentals.json'
funds = pd.read_json(fundFile)
manager = CollectionManager('10y_Fundamentals', 'AlgoTradingDB')
for index, row in funds.iterrows():
document = row.to_dict()
manager.insert(document, is_dictionary=True)
manager.close()
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, dt):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def get_next_trading_day(dates, days: list):
"""
Ensures that the day was a trading day and gets the data
for that day.
:param price_getter: lambda function
:param day: date
:return: price for that day of trading
"""
newDates = []
for day in days:
day = day
while day not in dates:
day += datedelta.DAY
newDates.append(day)
return newDates
def calculate_performance(ticker, dates1: list, dates2: list):
"""
Gets how much the stock has changed since the last
quarter reportings.
:param ticker: stock ticker
:param date1: beginining of the quarter
:param date2: end of the quarter
:return: percent change in stock price
"""
ticker = ticker.lower()
manager = CollectionManager('5Y_technicals', 'AlgoTradingDB')
prices = manager.find({'ticker': ticker})
dates = [dt.strptime(priceDate, '%Y-%m-%d').date() for priceDate in prices['date']]
pricesStart = [prices[prices['date'] == str(d1)]['vwap'].values[0] for d1 in get_next_trading_day(dates, dates1)]
pricesEnd = [prices[prices['date'] == str(d2)]['vwap'].values[0] for d2 in get_next_trading_day(dates, dates2)]
manager.close()
performances = [((p[0] - p[1]) / p[0]) for p in zip(pricesStart, pricesEnd)]
return performances
def get_historical_fundamentals(ticker: str, d: date, manager: CollectionManager):
"""
Gets all of the fundamental data for a ticker before some date and after
:param ticker: stock ticker
:param d: date
:param manager: collection manager for the fundamentals database
:return: past fundamentals, announcement dates, and future (test) fundamentals
"""
current_day = dt(d.year, d.month, d.day)
allTickersFundamentals = manager.find({'ticker': ticker, 'date': {'$lte': current_day}}).sort_values('date')
test = manager.find({'ticker': ticker, 'date': {'$gte': current_day}}).sort_values('date')
return allTickersFundamentals[features], [announce.date() for announce in
allTickersFundamentals['date'].tolist()], test
def find_best_stock(performances: pd.DataFrame):
"""
Finds the best stock given the performances
:param performances: list of performances
:return: the best stock
"""
best = []
stocks = performances.columns.values
for index, values in performances.iterrows():
maximum = np.argmax([abs(v) for v in values])
stock = stocks[maximum]
best.append(stock)
return best
def get_all_fundamentals(stocks: list, tradeDate: date):
"""
Gets all of the fundamentals for a list of tickers and list of quarters
:param tickers: stocks
:param quarters: list of quarters
:param final: whether this is a final prediction
:return: Xs and ys
"""
manager = CollectionManager('10y_Fundamentals', 'AlgoTradingDB')
tickers_set = set(stocks)
all_fundamental_tickers = set(manager.find({})["ticker"])
tickers = list(tickers_set.intersection(all_fundamental_tickers))
allFundamentals = pd.DataFrame()
performances = pd.DataFrame()
quarters = 17
allTest = pd.DataFrame()
testDates = []
for ticker in tickers:
data, announcementDates, test = get_historical_fundamentals(ticker, tradeDate, manager)
nextAnnouncementDates = announcementDates[1:] + [dt.strptime('2018-02-05', '%Y-%m-%d').date()]
performance = calculate_performance(ticker, announcementDates, nextAnnouncementDates)
if len(testDates) == 0:
testDates = test['date'].tolist()
if len(performance) != 17:
performance = performance[len(performance) - 17:]
performances[ticker] = performance
else:
performances[ticker] = performance
for index, funds in data.iterrows():
tempDF = pd.DataFrame()
tempDF['fundamentals'] = list(funds)[:-1]
tempDF['ticker'] = [ticker for i in range(len(funds) - 1)]
tempDF['quarter'] = [index for j in range(len(funds) - 1)]
allFundamentals = pd.concat([allFundamentals, tempDF])
for index, testFunds in test.iterrows():
temp = pd.DataFrame()
temp['fundamentals'] = list(testFunds)[:-1]
temp['ticker'] = [ticker for k in range(len(testFunds) - 1)]
temp['quarter'] = [index for l in range(len(testFunds) - 1)]
allTest = pd.concat([allTest, temp])
manager.close()
trainingData = []
for quarter in range(quarters):
q = []
for ticker in tickers:
tickerdata = allFundamentals[allFundamentals['ticker'] == ticker]
quarterdata = tickerdata[tickerdata['quarter'] == quarter]['fundamentals']
q.append(quarterdata.tolist())
trainingData.append(np.array(q))
trainingDataX = np.array(trainingData)
trainingDataY = find_best_stock(performances)
allTestY = []
quarterLen = len(allTest['quarter'].unique())
if quarterLen == 1:
fix = allTest.copy()
fix['quarter'] = [2 for i in range(len(fix))]
allTest = pd.concat([allTest, fix])
for testQuarter in range(2):
testQ = []
for tick in tickers:
tickData = allTest[allTest['ticker'] == tick]
testQuarterData = tickData[tickData['quarter'] == testQuarter]['fundamentals']
if testQuarterData.shape[0] != 15:
print('ERROR ' + tick)
testQ.append(testQuarterData.tolist()[:-4])
allTestY.append(np.array(testQ))
return trainingDataX, trainingDataY, np.array(allTestY), testDates, tickers
|
StarcoderdataPython
|
1695747
|
<reponame>yuhaitao1994/LIC2019_Information_Extraction<gh_stars>10-100
# -*- coding:utf-8 -*-
"""
Subject and Object labeling with Bert + Pointer Net
@author:yuhaitao
"""
import collections
import os
import numpy as np
import tensorflow as tf
import codecs
import pickle
import sys
from sklearn import metrics
sys.path.append("../")
from bert.bert_code import modeling, optimization, tokenization
from models_SO import create_model_ptr, InputFeatures_ptr, InputExample
import argparse
def get_args_parser():
parser = argparse.ArgumentParser()
bert_path = '../bert/bert_model'
root_path = '../data/'
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
parser.add_argument('-experiment_name', type=str, default='1',
help='name')
parser.add_argument('-data_dir', type=str, default=os.path.join(root_path, 'SO_data'),
help='train, dev and test data dir')
parser.add_argument('-bert_config_file', type=str,
default=os.path.join(bert_path, 'bert_config.json'))
parser.add_argument('-output_dir', type=str, default=root_path,
help='directory of a pretrained BERT model')
parser.add_argument('-init_checkpoint', type=str, default=os.path.join(bert_path, 'bert_model.ckpt'),
help='Initial checkpoint (usually from a pre-trained BERT model).')
parser.add_argument('-vocab_file', type=str, default=os.path.join(bert_path, 'vocab.txt'),
help='')
parser.add_argument('-max_seq_length', type=int, default=150,
help='The maximum total input sequence length after WordPiece tokenization.')
parser.add_argument('-do_train', type=str2bool, default=False,
help='Whether to run training.')
parser.add_argument('-do_eval', type=str2bool, default=False,
help='Whether to run eval on the dev set.')
parser.add_argument('-do_predict', type=str2bool, default=True,
help='Whether to run the predict in inference mode on the test set.')
parser.add_argument('-batch_size', type=int, default=32,
help='Total batch size for training, eval and predict.')
parser.add_argument('-learning_rate', type=float, default=2e-5,
help='The initial learning rate for Adam.')
parser.add_argument('-num_train_epochs', type=float, default=5,
help='Total number of training epochs to perform.')
parser.add_argument('-dropout_rate', type=float, default=0.5,
help='Dropout rate')
parser.add_argument('-clip', type=float, default=0.5,
help='Gradient clip')
parser.add_argument('-warmup_proportion', type=float, default=0.1,
help='Proportion of training to perform linear learning rate warmup for '
'E.g., 0.1 = 10% of training.')
parser.add_argument('-lstm_size', type=int, default=128,
help='size of lstm units.')
parser.add_argument('-num_layers', type=int, default=1,
help='number of rnn layers, default is 1.')
parser.add_argument('-cell', type=str, default='lstm',
help='which rnn cell used.')
parser.add_argument('-save_checkpoints_steps', type=int, default=1000,
help='save_checkpoints_steps')
parser.add_argument('-save_summary_steps', type=int, default=1000,
help='save_summary_steps.')
parser.add_argument('-filter_adam_var', type=str2bool, default=False,
help='after training do filter Adam params from model and save no Adam params model in file.')
parser.add_argument('-do_lower_case', type=str2bool, default=True,
help='Whether to lower case the input text.')
parser.add_argument('-clean', type=str2bool, default=True)
parser.add_argument('-device_map', type=str, default='1',
help='witch device using to train')
# add labels
parser.add_argument('-label_list', type=str, default='../dict/p_eng',
help='User define labels, can be a file with one label one line or a string using \',\' split')
parser.add_argument('-verbose', action='store_true', default=False,
help='turn on tensorflow logging for debug')
parser.add_argument('-ptr', type=str, default='Ptr',
help='which modle to train')
return parser.parse_args()
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""Reads a BIO data."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
lines = []
for line in f:
data = []
context = line.strip().split('\t')
if len(context) != 4:
continue
data.append(context[0])
data.append(context[1])
data.append(context[2])
data.append(context[3])
lines.append(data)
return lines
class PtrProcessor(DataProcessor):
def __init__(self, output_dir):
self.labels = list()
self.output_dir = output_dir
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_test_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self, labels=None):
if labels is not None:
try:
# 支持从文件中读取标签类型,读取的是p_eng的标签
if os.path.exists(labels) and os.path.isfile(labels):
with codecs.open(labels, 'r', encoding='utf-8') as fd:
for line in fd:
self.labels.append(line.strip().split()[-1])
else:
# 否则通过传入的参数,按照逗号分割
self.labels = labels.split(',')
except Exception as e:
print(e)
return self.labels
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[2] + '&&' + line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
"""
ptr Net做主客体标注
"""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
over = 0
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
over = 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# 生成主客体的首尾指针和实体关系类别的id
sub, obj = example.text_b.split('&&')
sub = tokenizer.tokenize(sub)
obj = tokenizer.tokenize(obj)
sub_head = -1
sub_tail = -1
obj_head = -1
obj_tail = -1
for i in range(len(tokens) - 1):
cut = tokens[i:min(i + len(sub), len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
sub_head = i
sub_tail = i + len(cut) - 1
break
cut = tokens[i:min(i + len(sub) - 1, len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
sub_head = i
sub_tail = i + len(cut) - 1
break
cut = tokens[i:min(i + len(sub) + 1, len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
sub_head = i
sub_tail = i + len(cut) - 1
break
if sub_head == -1:
sub_head = sub_tail = len(tokens) - 1
# print(tokens)
# print(sub)
# raise ValueError
for i in range(len(tokens) - 1):
cut = tokens[i:min(i + len(obj), len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
obj_head = i
obj_tail = i + len(cut) - 1
break
cut = tokens[i:min(i + len(obj) - 1, len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
obj_head = i
obj_tail = i + len(cut) - 1
break
cut = tokens[i:min(i + len(obj) + 1, len(tokens) - 1)]
if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
obj_head = i
obj_tail = i + len(cut) - 1
break
if obj_head == -1:
obj_head = obj_tail = len(tokens) - 1
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
tf.logging.info("pointer: %d %d %d %d" %
(sub_head, sub_tail, obj_head, obj_tail))
feature = InputFeatures_ptr(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
sub_ptr=[sub_head, sub_tail],
obj_ptr=[obj_head, obj_tail])
return feature, over
def filed_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""
将数据转化为TF_Record 结构,作为模型数据输入
:param examples: 样本
:param label_list:标签list
:param max_seq_length: 预先设定的最大序列长度
:param tokenizer: tokenizer 对象
:param output_file: tf.record 输出路径
:param mode:
:return:
"""
writer = tf.python_io.TFRecordWriter(output_file)
Over = 0
# 遍历训练数据
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" %
(ex_index, len(examples)))
# 对于每一个训练样本,
feature, over = convert_single_example(
ex_index, example, label_list, max_seq_length, tokenizer)
Over += over
def create_int_feature(values):
f = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["sub_ptr"] = create_int_feature(feature.sub_ptr)
features["obj_ptr"] = create_int_feature(feature.obj_ptr)
# tf.train.Example/Feature 是一种协议,方便序列化???
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
print("over length:{:.5f}".format(Over / ex_index))
def file_based_dataset(input_file, batch_size, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"sub_ptr": tf.FixedLenFeature([2], tf.int64),
"obj_ptr": tf.FixedLenFeature([2], tf.int64)
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=10000)
d = d.apply(tf.data.experimental.map_and_batch(lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_calls=8, # 并行处理数据的CPU核心数量,不要大于你机器的核心数
drop_remainder=drop_remainder))
d = d.prefetch(buffer_size=4)
return d
def get_last_checkpoint(model_path):
if not os.path.exists(os.path.join(model_path, 'checkpoint')):
tf.logging.info('checkpoint file not exits:'.format(
os.path.join(model_path, 'checkpoint')))
return None
last = None
with codecs.open(os.path.join(model_path, 'checkpoint'), 'r', encoding='utf-8') as fd:
for line in fd:
line = line.strip().split(':')
if len(line) != 2:
continue
if line[0] == 'model_checkpoint_path':
last = line[1][2:-1]
break
return last
def adam_filter(model_path):
"""
去掉模型中的Adam相关参数,这些参数在测试的时候是没有用的
:param model_path:
:return:
"""
last_name = get_last_checkpoint(model_path)
if last_name is None:
return
with tf.Session(graph=tf.Graph()) as sess:
imported_meta = tf.train.import_meta_graph(
os.path.join(model_path, last_name + '.meta'))
imported_meta.restore(sess, os.path.join(model_path, last_name))
need_vars = []
for var in tf.global_variables():
if 'adam_v' not in var.name and 'adam_m' not in var.name:
need_vars.append(var)
saver = tf.train.Saver(need_vars)
saver.save(sess, os.path.join(model_path, 'model.ckpt'))
def result_to_pair(writer, examples, data_file, result, tokenizer):
f = open(data_file, 'r')
for line, prediction, example in zip(f, result, examples):
line = line.strip()
tokens = tokenizer.tokenize(example.text_a)
sub = ''.join(
''.join(tokens[max(0, prediction[0] - 1):prediction[1]]).split('#'))
obj = ''.join(
''.join(tokens[max(0, prediction[2] - 1):prediction[3]]).split('#'))
writer.write(line + '\t' + '\t'.join([str(s) for s in list(prediction)]) +
'\t' + sub + '\t' + obj + '\n')
def train_and_eval(args, processor, tokenizer, bert_config, sess_config, label_list):
"""
训练和评估函数
"""
# 生成tf_record文件
train_examples = processor.get_train_examples(args.data_dir)
eval_examples = processor.get_dev_examples(args.data_dir)
num_train_steps = int(
len(train_examples) * 1.0 / args.batch_size * args.num_train_epochs)
if num_train_steps < 1:
raise AttributeError('training data is so small...')
num_warmup_steps = int(num_train_steps * args.warmup_proportion)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", args.batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", args.batch_size)
# 写入tfrecord
train_file = os.path.join(args.output_dir, "train.tf_record")
if not os.path.exists(train_file):
filed_based_convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, train_file)
eval_file = os.path.join(args.output_dir, "eval.tf_record")
if not os.path.exists(eval_file):
filed_based_convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, eval_file)
"""
-------------分割线-------------
"""
# 存储路径
log_dir = os.path.join(args.output_dir, 'log')
save_dir = os.path.join(args.output_dir, 'model')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# # 加载数据
# train_file = os.path.join(args.output_dir, "train.tf_record")
# eval_file = os.path.join(args.output_dir, "eval.tf_record")
# if not os.path.exists(train_file) or not os.path.exists(eval_file):
# raise ValueError
# 生成dataset
train_data = file_based_dataset(input_file=train_file, batch_size=args.batch_size,
seq_length=args.max_seq_length, is_training=True, drop_remainder=False)
eval_data = file_based_dataset(input_file=eval_file, batch_size=args.batch_size,
seq_length=args.max_seq_length, is_training=False, drop_remainder=False)
train_iter = train_data.make_one_shot_iterator().get_next()
# 开启计算图
with tf.Session(config=sess_config) as sess:
# 构造模型
input_ids = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='input_ids')
input_mask = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='input_mask')
segment_ids = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='segment_ids')
label_ids = tf.placeholder(
shape=[None], dtype=tf.int32, name='label_ids')
sub_ptr = tf.placeholder(
shape=[None, 2], dtype=tf.int32, name='sub_ptr')
obj_ptr = tf.placeholder(
shape=[None, 2], dtype=tf.int32, name='obj_ptr')
is_training = tf.get_variable(
"is_training", shape=[], dtype=tf.bool, trainable=False)
total_loss, pred_ids = create_model_ptr(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, sub_ptr, obj_ptr, len(label_list))
# 优化器
train_op = optimization.create_optimizer(
total_loss, args.learning_rate, num_train_steps, num_warmup_steps, False)
sess.run(tf.global_variables_initializer())
# 加载bert原始模型
tvars = tf.trainable_variables()
if args.init_checkpoint:
(assignment_map, initialized_variable_names) = \
modeling.get_assignment_map_from_checkpoint(
tvars, args.init_checkpoint)
tf.train.init_from_checkpoint(args.init_checkpoint, assignment_map)
# 打印加载模型的参数
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
# 初始化存储和log
writer = tf.summary.FileWriter(log_dir, sess.graph)
saver = tf.train.Saver()
# 定义一些全局变量
best_eval_acc = 0.0
patience = 0
# 开始训练
sess.run(tf.assign(is_training, tf.constant(True, dtype=tf.bool)))
for go in range(1, num_train_steps + 1):
# feed
train_batch = sess.run(train_iter)
loss, op = sess.run([total_loss, train_op], feed_dict={
input_ids: train_batch['input_ids'], input_mask: train_batch['input_mask'],
segment_ids: train_batch['segment_ids'], label_ids: train_batch['label_ids'],
sub_ptr: train_batch['sub_ptr'], obj_ptr: train_batch['obj_ptr']})
print(loss)
if go % args.save_summary_steps == 0:
# 训练log
writer.add_summary(tf.Summary(value=[tf.Summary.Value(
tag="loss/train_loss", simple_value=loss / args.batch_size), ]), sess.run(tf.train.get_global_step()))
writer.flush()
if go % args.save_checkpoints_steps == 0:
# 验证集评估
sess.run(tf.assign(is_training, tf.constant(False, dtype=tf.bool)))
eval_loss_total = 0.0
eval_preds_total = np.array([[0] * 4], dtype=np.int32)
eval_truth_total = np.array([[0] * 4], dtype=np.int32)
# 重新生成一次验证集数据
eval_data = eval_data.repeat()
eval_iter = eval_data.make_one_shot_iterator().get_next()
# for _ in range(0, int(len(eval_examples) / args.batch_size) + 1):
# eval集太大,这样每次用全部的话太耗费时间
for _ in range(int(len(eval_examples) / args.batch_size) + 1):
# eval feed
eval_batch = sess.run(eval_iter)
eval_loss, eval_preds, eval_sub, eval_obj = sess.run([total_loss, pred_ids, sub_ptr, obj_ptr], feed_dict={
input_ids: eval_batch['input_ids'], input_mask: eval_batch['input_mask'],
segment_ids: eval_batch['segment_ids'], label_ids: eval_batch['label_ids'],
sub_ptr: eval_batch['sub_ptr'], obj_ptr: eval_batch['obj_ptr']})
# 统计结果
eval_loss_total += eval_loss
eval_preds_total = np.concatenate(
(eval_preds_total, eval_preds))
eval_truth_total = np.concatenate(
(eval_truth_total, np.concatenate((eval_sub, eval_obj), -1)))
# 处理评估结果,计算recall与f1
eval_preds_total = eval_preds_total[1:]
eval_truth_total = eval_truth_total[1:]
eval_f1 = metrics.f1_score(
eval_truth_total.reshape(-1), eval_preds_total.reshape(-1), average='macro')
eval_acc = metrics.accuracy_score(
eval_truth_total.reshape(-1), eval_preds_total.reshape(-1))
eval_loss_aver = eval_loss_total / len(eval_examples)
# 评估实体关系分类的指标
# 评估log
writer.add_summary(tf.Summary(value=[tf.Summary.Value(
tag="loss/eval_loss", simple_value=eval_loss_aver), ]), sess.run(tf.train.get_global_step()))
writer.add_summary(tf.Summary(value=[tf.Summary.Value(
tag="eval/f1", simple_value=eval_f1), ]), sess.run(tf.train.get_global_step()))
writer.add_summary(tf.Summary(value=[tf.Summary.Value(
tag="eval/acc", simple_value=eval_acc), ]), sess.run(tf.train.get_global_step()))
writer.flush()
# early stopping 与 模型保存
if eval_acc <= best_eval_acc:
patience += 1
if patience >= 100:
print("early stoping!")
return
if eval_acc > best_eval_acc:
patience = 0
best_eval_acc = eval_acc
saver.save(sess, os.path.join(save_dir, "model_{}_acc_{:.4f}.ckpt".format(
sess.run(tf.train.get_global_step()), best_eval_acc)))
sess.run(tf.assign(is_training, tf.constant(False, dtype=tf.bool)))
def predict(args, processor, tokenizer, bert_config, sess_config, label_list):
"""
预测函数
"""
# 生成3个examples
predict_examples = processor.get_test_examples(args.data_dir)
predict_file = os.path.join(args.output_dir, "predict.tf_record")
filed_based_convert_examples_to_features(
predict_examples, label_list, args.max_seq_length, tokenizer, predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d", len(predict_examples))
tf.logging.info(" Batch size = %d", args.batch_size)
eval_examples = processor.get_dev_examples(args.data_dir)
eval_file = os.path.join(args.output_dir, "eval.tf_record")
# 生成数据集
eval_data = file_based_dataset(input_file=eval_file, batch_size=args.batch_size,
seq_length=args.max_seq_length, is_training=False, drop_remainder=False)
predict_data = file_based_dataset(input_file=predict_file, batch_size=args.batch_size,
seq_length=args.max_seq_length, is_training=False, drop_remainder=False)
eval_iter = eval_data.make_one_shot_iterator().get_next()
predict_iter = predict_data.make_one_shot_iterator().get_next()
# 开启计算图
with tf.Session(config=sess_config) as sess:
# 从文件中读取计算图
save_dir = os.path.join(args.output_dir, 'model')
# saver = tf.train.import_meta_graph(
# tf.train.latest_checkpoint(save_dir) + ".meta")
# sess.run(tf.global_variables_initializer())
# 打印张量名
# tensor_list = [
# n.name for n in tf.get_default_graph().as_graph_def().node if 'keep_prob' in n.name]
# print(tensor_list)
# 构造模型
input_ids = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='input_ids')
input_mask = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='input_mask')
segment_ids = tf.placeholder(
shape=[None, args.max_seq_length], dtype=tf.int32, name='segment_ids')
label_ids = tf.placeholder(
shape=[None], dtype=tf.int32, name='label_ids')
sub_ptr = tf.placeholder(
shape=[None, 2], dtype=tf.int32, name='sub_ptr')
obj_ptr = tf.placeholder(
shape=[None, 2], dtype=tf.int32, name='obj_ptr')
is_training = tf.get_variable(
"is_training", shape=[], dtype=tf.bool, trainable=False)
total_loss, pred_ids = create_model_ptr(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, sub_ptr, obj_ptr, len(label_list))
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(save_dir))
# # 通过张量名获取模型的占位符和参数
# input_ids = tf.get_default_graph().get_tensor_by_name('input_ids:0')
# input_mask = tf.get_default_graph().get_tensor_by_name('input_mask:0')
# segment_ids = tf.get_default_graph().get_tensor_by_name('segment_ids:0')
# label_ids = tf.get_default_graph().get_tensor_by_name('label_ids:0')
# sub_ptr = tf.get_default_graph().get_tensor_by_name('sub_ptr:0')
# obj_ptr = tf.get_default_graph().get_tensor_by_name('obj_ptr:0')
# is_t = tf.get_default_graph().get_tensor_by_name('is_training:0')
# 找到输出
# pred_ids = tf.get_default_graph().get_tensor_by_name('loss/pred_ids:0')
sess.run(tf.assign(is_training, tf.constant(False, dtype=tf.bool)))
# eval集预测
eval_total = np.array([[0] * 4], dtype=np.int32)
eval_truth_total = np.array([[0] * 4], dtype=np.int32)
for _ in range(0, int(len(eval_examples) / args.batch_size) + 1):
# predict feed
eval_batch = sess.run(eval_iter)
eval_res, eval_sub, eval_obj = sess.run([pred_ids, sub_ptr, obj_ptr], feed_dict={
input_ids: eval_batch['input_ids'], input_mask: eval_batch['input_mask'],
segment_ids: eval_batch['segment_ids'], label_ids: eval_batch['label_ids'],
sub_ptr: eval_batch['sub_ptr'], obj_ptr: eval_batch['obj_ptr']})
eval_total = np.concatenate((eval_total, eval_res))
eval_truth_total = np.concatenate(
(eval_truth_total, np.concatenate((eval_sub, eval_obj), -1)))
# 处理评估结果,计算recall与f1
eval_total = eval_total[1:]
eval_truth_total = eval_truth_total[1:]
eval_acc = metrics.accuracy_score(
eval_truth_total.reshape(-1), eval_total.reshape(-1))
print("Eval_Acc:{:.5f}".format(eval_acc))
output_eval_file = os.path.join(args.output_dir, "prediction_dev.txt")
with codecs.open(output_eval_file, 'w', encoding='utf-8') as writer:
result_to_pair(writer, eval_examples, os.path.join(
args.data_dir, 'dev.txt'), eval_total, tokenizer)
# test集预测
predict_total = np.array([[0] * 4], dtype=np.int32)
for _ in range(0, int(len(predict_examples) / args.batch_size) + 1):
# predict feed
predict_batch = sess.run(predict_iter)
predict_res = sess.run(pred_ids, feed_dict={
input_ids: predict_batch['input_ids'], input_mask: predict_batch['input_mask'],
segment_ids: predict_batch['segment_ids'], label_ids: predict_batch['label_ids'],
sub_ptr: predict_batch['sub_ptr'], obj_ptr: predict_batch['obj_ptr']})
predict_total = np.concatenate((predict_total, predict_res))
# 处理评估结果,计算recall与f1
predict_total = predict_total[1:]
output_predict_file = os.path.join(
args.output_dir, "prediction_test.txt")
with codecs.open(output_predict_file, 'w', encoding='utf-8') as writer:
result_to_pair(writer, predict_examples, os.path.join(
args.data_dir, 'test.txt'), predict_total, tokenizer)
if __name__ == '__main__':
"""
开始执行
"""
args = get_args_parser()
args.output_dir = os.path.join(
args.output_dir, 'SO_model_' + args.experiment_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.device_map
tf.logging.set_verbosity(tf.logging.INFO)
if True:
param_str = '\n'.join(['%20s = %s' % (k, v)
for k, v in sorted(vars(args).items())])
print('usage: %s\n%20s %s\n%s\n%s\n' %
(' '.join(sys.argv), 'ARG', 'VALUE', '_' * 50, param_str))
processors = {
"Ptr": PtrProcessor
}
bert_config = modeling.BertConfig.from_json_file(args.bert_config_file)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_length, bert_config.max_position_embeddings))
# 在re train 的时候,才删除上一轮产出的文件,在predicted 的时候不做clean
if args.clean and args.do_train:
print('hahaha')
if os.path.exists(args.output_dir):
def del_file(path):
ls = os.listdir(path)
for i in ls:
c_path = os.path.join(path, i)
if os.path.isdir(c_path):
del_file(c_path)
else:
os.remove(c_path)
try:
del_file(args.output_dir)
except Exception as e:
print(e)
print('pleace remove the files of output dir and data.conf')
exit(-1)
# check output dir exists
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# 创建ptr dataprocessor对象
processor = processors[args.ptr](args.output_dir)
label_list = processor.get_labels(labels=args.label_list)
print(len(label_list))
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
session_config = tf.ConfigProto(
log_device_placement=False,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
allow_soft_placement=True)
if args.do_train and args.do_eval:
train_and_eval(args=args, processor=processor, tokenizer=tokenizer,
bert_config=bert_config, sess_config=session_config, label_list=label_list)
# if args.filter_adam_var:
# adam_filter(os.path.join(args.output_dir, 'model'))
if args.do_predict:
predict(args=args, processor=processor, tokenizer=tokenizer,
bert_config=bert_config, sess_config=session_config, label_list=label_list)
|
StarcoderdataPython
|
1785672
|
<reponame>samarmohan/tutoring-api
from django.urls import include, path
from rest_framework import routers
from .api import TutorAPI, TuteeAPI
router = routers.DefaultRouter()
router.register(r'tutors', TutorAPI)
router.register(r'tutees', TuteeAPI)
urlpatterns = [
path('', include(router.urls)),
]
|
StarcoderdataPython
|
3288475
|
<gh_stars>0
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_vs.fpocket.fpocket import FPocket # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
@task(input_pdb_path=FILE_IN, output_pockets_zip=FILE_OUT, output_summary=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _fpocket(input_pdb_path, output_pockets_zip, output_summary, properties, **kwargs):
task_config.pop_pmi(os.environ)
try:
FPocket(input_pdb_path=input_pdb_path, output_pockets_zip=output_pockets_zip, output_summary=output_summary, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def fpocket(input_pdb_path, output_pockets_zip, output_summary, properties=None, **kwargs):
if (output_pockets_zip is None or os.path.exists(output_pockets_zip)) and \
(output_summary is None or os.path.exists(output_summary)) and \
True:
print("WARN: Task FPocket already executed.")
else:
_fpocket( input_pdb_path, output_pockets_zip, output_summary, properties, **kwargs)
|
StarcoderdataPython
|
1653129
|
from finnhub_api.client import FinnHubClient
|
StarcoderdataPython
|
1606887
|
from typing import TYPE_CHECKING, Tuple
if TYPE_CHECKING:
from destiny_timelost.side import Side
class Link:
def __init__(self, *sides: Tuple["Side", ...]) -> None:
self.sides = sorted(sides, key=lambda side: side.idx)
@property
def first_side(self) -> "Side":
return self.sides[0]
@property
def second_side(self) -> "Side":
return self.sides[1]
def __eq__(self, other) -> bool:
return (self.first_side, self.second_side) == (
other.first_side,
other.second_side,
)
def __hash__(self) -> int:
return hash((self.first_side, self.second_side))
|
StarcoderdataPython
|
3235797
|
<filename>app.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 11:40:09 2020
@author: Mansi
"""
import pygame
import game_config as gc
from pygame import display, event, image
from time import sleep
from animal import Animal
def find_index_from_xy(x, y):
row = y // gc.IMAGE_SIZE
col = x // gc.IMAGE_SIZE
index = row * gc.NUM_TILES_SIDE + col
return row, col, index
pygame.init()
display.set_caption('My Game')
screen = display.set_mode((gc.SCREEN_SIZE, gc.SCREEN_SIZE))
matched = image.load('other_assets/matched.png')
running = True
tiles = [Animal(i) for i in range(0, gc.NUM_TILES_TOTAL)]
current_images_displayed = []
while running:
current_events = event.get()
for e in current_events:
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
row, col, index = find_index_from_xy(mouse_x, mouse_y)
if index not in current_images_displayed:
if len(current_images_displayed) > 1:
current_images_displayed = current_images_displayed[1:] + [index]
else:
current_images_displayed.append(index)
# Display animals
screen.fill((255, 255, 255))
total_skipped = 0
for i, tile in enumerate(tiles):
current_image = tile.image if i in current_images_displayed else tile.box
if not tile.skip:
screen.blit(current_image, (tile.col * gc.IMAGE_SIZE + gc.MARGIN, tile.row * gc.IMAGE_SIZE + gc.MARGIN))
else:
total_skipped += 1
display.flip()
# Check for matches
if len(current_images_displayed) == 2:
idx1, idx2 = current_images_displayed
if tiles[idx1].name == tiles[idx2].name:
tiles[idx1].skip = True
tiles[idx2].skip = True
# display matched message
sleep(0.2)
screen.blit(matched, (0, 0))
display.flip()
sleep(0.5)
current_images_displayed = []
if total_skipped == len(tiles):
running = False
print('Goodbye!')
|
StarcoderdataPython
|
1644194
|
<reponame>GEOS-ESM/mepo
import argparse
import textwrap
class MepoConfigArgParser(object):
def __init__(self, config):
self.config = config.add_subparsers()
self.config.title = 'mepo config sub-commands'
self.config.dest = 'mepo_config_cmd'
self.config.required = True
self.__get()
self.__set()
self.__delete()
self.__print()
def __get(self):
get = self.config.add_parser(
'get',
description = ('Get config `entry` in `.mepoconfig`. '
'Note this uses gitconfig style where `entry` is of the form `section.option`. '
'So to get an `alias` `st` You would run `mepo config get alias.st`'))
get.add_argument(
'entry',
metavar = 'entry',
help = 'Entry to display.')
def __set(self):
set = self.config.add_parser(
'set',
description = ('Set config `entry` to `value` in `.mepoconfig`. '
'Note this uses gitconfig style where `entry` is of the form `section.option`. '
'So to set an `alias` for `status` of `st` You would run `mepo config set alias.st status`'))
set.add_argument(
'entry',
metavar = 'entry',
help = 'Entry to set.')
set.add_argument(
'value',
metavar = 'value',
help = 'Value to set entry to.')
def __delete(self):
delete = self.config.add_parser(
'delete',
description = ('Delete config `entry` in `.mepoconfig`. '
'Note this uses gitconfig style where `entry` is of the form `section.option`. '
'So to delete an `alias` `st` You would run `mepo config delete alias.st`'))
delete.add_argument(
'entry',
metavar = 'entry',
help = 'Entry to delete.')
def __print(self):
print = self.config.add_parser(
'print',
description = 'Print contents of `.mepoconfig`')
|
StarcoderdataPython
|
186963
|
import task_list_storage as storage
import copy
class TaskList:
def __init__(self, listData):
self.name = listData[0]
self.__listData = listData
self.tasks = listData[1:len(listData)-1:2]
self.taskStatus = listData[2:len(listData):2]
# print("PING", listData)
# print("LenListData", len(listData))
# print("self.tasks:", self.tasks)
# print("Lenself.tasks:", len(self.tasks))
# print("self.taskStatus:", self.taskStatus)
# print("Lenself.taskStatus:", len(self.taskStatus))
# def addTaskList(self, name):
# prevList = storage.readData()
# print(prevList)
# def resetTaskList(self, listData):
# self.tasks = listData[1:len(listData[0])-1:2]
# self.taskStatus = listData[2:len(listData[0])-1:2]
# def __str__(self):
# return self.__listData[0]
def getListData(self):
return self.__listData
def taskComplete(self, taskIndex):
# print("Entered Task Complete Method")
# print("PING:",self.__listData)
self.taskStatus[taskIndex] = "True"
self.__listData[taskIndex * 2 + 2] = "True"
storage.updateList(self.__listData)
# print(self.__listData)
def main():
print("\n\r**********************************")
print("Welcome to the task_List Object Tester")
listIndex = 0
# taskIndex = 2
print()
allLists = storage.readLists()
print("All task lists:", allLists)
print("Lookup for index ", listIndex, ": ", allLists[listIndex], sep="")
listdata = []
listdata = storage.readData()
# print("Raw Data:",listdata[0])#Prints out what is in the selected task list
curTask = TaskList(listdata[listIndex])
print("Current Task List:", curTask.name)
print("Tasks in list:", curTask.tasks)
print("Status of tasks in list:", curTask.taskStatus)
print("Ping", storage.readData())
# storage.delTask("p1","p2")#curTask,curTask.tasks[taskIndex])
# taskList, taskName, file = "task_list_OfTasks.csv")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3360880
|
"""
The :mod:`kavica.parser` module includes data file parsers.
"""
from .prvparse import (ControlCZInterruptHandler,
ExtensionPathType,
ParsedArgs,
Parser)
__all__ = ['ControlCZInterruptHandler',
'ExtensionPathType',
'ParsedArgs',
'Parser']
|
StarcoderdataPython
|
4812173
|
<filename>pycave/__init__.py
import logging
import warnings
from .core import NotFittedError
# This is taken from PyTorch Lightning and ensures that logging for this package is enabled
_root_logger = logging.getLogger()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
if not _root_logger.hasHandlers():
_logger.addHandler(logging.StreamHandler())
_logger.propagate = False
# This disables most logs generated by PyTorch Lightning
logging.getLogger("pytorch_lightning").setLevel(logging.WARNING)
warnings.filterwarnings(
action="ignore", message=".*Consider increasing the value of the `num_workers` argument.*"
)
warnings.filterwarnings(
action="ignore", message=".*`LightningModule.configure_optimizers` returned `None`.*"
)
warnings.filterwarnings(
action="ignore", message=".*`LoggerConnector.gpus_metrics` was deprecated in v1.5.*"
)
warnings.filterwarnings(
action="ignore", message=".*Lightning couldn't infer the indices fetched for your dataloader.*"
)
# We also want to define a function which silences info logs
def set_logging_enabled(enabled: bool) -> None:
"""
Enables or disables logging for the entire module. By default, logging is enabled.
Args:
enabled: Whether to enable logging.
"""
_logger.setLevel(logging.INFO if enabled else logging.WARNING)
# Export
__all__ = [
"NotFittedError",
"set_logging_enabled",
]
|
StarcoderdataPython
|
1717029
|
import argparse
import torch
import torchmetrics
from loguru import logger
import head_segmentation.segmentation_pipeline as seg_pipeline
import scripts.training.data_loading as dl
def parse_args() -> None:
# fmt: off
parser = argparse.ArgumentParser("Evaluates segmentation maps predictions on full resolution")
parser.add_argument("-dp", "--dataset_path", required=True, type=str, help="Path to a test dataset.")
parser.add_argument("-mp", "--model_path", required=True, type=str, help="Model's checkpoint to evaluate.")
parser.add_argument("-nn_in", "--nn_image_input_resolution", required=True, type=int, help="Neural Network input image resolution.")
# fmt: on
return parser.parse_args()
def evaluate() -> None:
logger.info("🚀 Evaluation process started.")
args = parse_args()
logger.info("📚 Creating dataset module.")
eval_dataset = dl.CelebAHeadSegmentationDataset(dataset_root=args.dataset_path)
logger.info("🕸 Loading neural network module.")
segmentation_pipeline = seg_pipeline.HumanHeadSegmentationPipeline(
model_path=args.model_path,
image_input_resolution=args.nn_image_input_resolution,
)
cm_metric = torchmetrics.ConfusionMatrix(num_classes=2)
logger.info("🔁 Evaluation loop.")
for image, true_segmap in eval_dataset:
predicted_segmap = segmentation_pipeline.predict(image)
cm_metric(torch.tensor(predicted_segmap), torch.tensor(true_segmap))
cm = cm_metric.compute()
cm = cm.detach()
ious = cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag() + 1e-15)
background_iou, head_iou = ious[0], ious[1]
mIoU = ious.mean()
logger.info(f"📈 Evaluation summary for a model {args.model_path}:")
logger.info(f"\t 🖼 Background class IoU: {background_iou:.6f}")
logger.info(f"\t 👦 Head class IoU: {head_iou:.6f}")
logger.info(f"\t 🤷 mIoU: {mIoU:.6f}")
logger.info("🏁 Evaluation process finished.")
if __name__ == "__main__":
evaluate()
|
StarcoderdataPython
|
41686
|
a = 5
b = 10
my_variable = 56
any_variable_name = 100
string_variable = "hello"
single_quotes = 'strings can have single quotes'
print(string_variable)
print(my_variable)
# print is a method with one parameter—what we want to print
def my_print_method(my_parameter):
print(my_parameter)
my_print_method(string_variable)
def my_multiplication_method(number_one, number_two):
return number_one * number_two
result = my_multiplication_method(a, b)
print(result)
print(my_multiplication_method(56, 75))
my_print_method(my_multiplication_method('b', 5)) # What would this do?
|
StarcoderdataPython
|
1753459
|
<reponame>Sangarshanan/geopandas-view<filename>setup.py<gh_stars>10-100
import setuptools
setuptools.setup(
name="geopandas_view",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.6",
install_requires=["geopandas", "folium", "mapclassify", "matplotlib"],
packages=setuptools.find_packages(),
)
|
StarcoderdataPython
|
139714
|
<reponame>g4brielvs/usaspending-api<filename>usaspending_api/broker/management/commands/update_transactions.py
import logging
from datetime import datetime
from usaspending_api.common.helpers.date_helper import fy
from django.core.management.base import BaseCommand
from django.db import connections, transaction as db_transaction, IntegrityError
from usaspending_api.etl.broker_etl_helpers import dictfetchall
from usaspending_api.awards.models import TransactionNormalized, TransactionFABS, TransactionFPDS
from usaspending_api.awards.models import Award
from usaspending_api.common.helpers.timing_helpers import timer
from usaspending_api.references.models import Agency, SubtierAgency, ToptierAgency
from usaspending_api.etl.management.load_base import format_date, load_data_into_model
from usaspending_api.etl.award_helpers import update_awards, update_procurement_awards, update_assistance_awards
logger = logging.getLogger("console")
exception_logger = logging.getLogger("exceptions")
# Lists to store for update_awards and update_procurement_awards
award_update_id_list = []
award_contract_update_id_list = []
award_assistance_update_id_list = []
subtier_agency_map = {
subtier_agency["subtier_code"]: subtier_agency["subtier_agency_id"]
for subtier_agency in SubtierAgency.objects.values("subtier_code", "subtier_agency_id")
}
subtier_to_agency_map = {
agency["subtier_agency_id"]: {"agency_id": agency["id"], "toptier_agency_id": agency["toptier_agency_id"]}
for agency in Agency.objects.values("id", "toptier_agency_id", "subtier_agency_id")
}
toptier_agency_map = {
toptier_agency["toptier_agency_id"]: toptier_agency["toptier_code"]
for toptier_agency in ToptierAgency.objects.values("toptier_agency_id", "toptier_code")
}
class Command(BaseCommand):
help = "Update historical transaction data for a fiscal year from the Broker."
@staticmethod
def update_transaction_assistance(db_cursor, fiscal_year=None, page=1, limit=500000):
query = "SELECT * FROM published_award_financial_assistance"
arguments = []
fy_begin = "10/01/" + str(fiscal_year - 1)
fy_end = "09/30/" + str(fiscal_year)
if fiscal_year:
if arguments:
query += " AND"
else:
query += " WHERE"
query += " action_date::Date BETWEEN %s AND %s"
arguments += [fy_begin]
arguments += [fy_end]
query += " ORDER BY published_award_financial_assistance_id LIMIT %s OFFSET %s"
arguments += [limit, (page - 1) * limit]
logger.info(
"Executing query on Broker DB => " + query % (arguments[0], arguments[1], arguments[2], arguments[3])
)
db_cursor.execute(query, arguments)
logger.info("Running dictfetchall on db_cursor")
award_financial_assistance_data = dictfetchall(db_cursor)
fabs_normalized_field_map = {
"type": "assistance_type",
"description": "award_description",
"funding_amount": "total_funding_amount",
}
fabs_field_map = {
"officer_1_name": "high_comp_officer1_full_na",
"officer_1_amount": "high_comp_officer1_amount",
"officer_2_name": "high_comp_officer2_full_na",
"officer_2_amount": "high_comp_officer2_amount",
"officer_3_name": "high_comp_officer3_full_na",
"officer_3_amount": "high_comp_officer3_amount",
"officer_4_name": "high_comp_officer4_full_na",
"officer_4_amount": "high_comp_officer4_amount",
"officer_5_name": "high_comp_officer5_full_na",
"officer_5_amount": "high_comp_officer5_amount",
}
logger.info("Getting total rows")
total_rows = len(award_financial_assistance_data) # - rows_loaded
logger.info("Processing " + str(total_rows) + " rows of assistance data")
# ROW ITERATION STARTS HERE
award_bulk = []
transaction_assistance_bulk = []
transaction_normalized_bulk = []
awarding_agency_list = []
funding_agency_list = []
logger.info("Getting award objects for {} rows...".format(len(award_financial_assistance_data)))
for index, row in enumerate(award_financial_assistance_data, 1):
# If awarding toptier agency code (aka CGAC) is not supplied on the D2 record,
# use the sub tier code to look it up. This code assumes that all incoming
# records will supply an awarding subtier agency code
if row["awarding_agency_code"] is None or len(row["awarding_agency_code"].strip()) < 1:
awarding_subtier_agency_id = subtier_agency_map[row["awarding_sub_tier_agency_c"]]
awarding_toptier_agency_id = subtier_to_agency_map[awarding_subtier_agency_id]["toptier_agency_id"]
awarding_toptier_code = toptier_agency_map[awarding_toptier_agency_id]
row["awarding_agency_code"] = awarding_toptier_code
# If funding toptier agency code (aka CGAC) is empty, try using the sub
# tier funding code to look it up. Unlike the awarding agency, we can't
# assume that the funding agency subtier code will always be present.
if row["funding_agency_code"] is None or len(row["funding_agency_code"].strip()) < 1:
funding_subtier_agency_id = subtier_agency_map.get(row["funding_sub_tier_agency_co"])
if funding_subtier_agency_id is not None:
funding_toptier_agency_id = subtier_to_agency_map[funding_subtier_agency_id]["toptier_agency_id"]
funding_toptier_code = toptier_agency_map[funding_toptier_agency_id]
else:
funding_toptier_code = None
row["funding_agency_code"] = funding_toptier_code
# Find the award that this award transaction belongs to. If it doesn't exist, create it.
awarding_agency = Agency.get_by_toptier_subtier(
row["awarding_agency_code"], row["awarding_sub_tier_agency_c"]
)
funding_agency = Agency.get_by_toptier_subtier(
row["funding_agency_code"], row["funding_sub_tier_agency_co"]
)
awarding_agency_list.append(awarding_agency)
funding_agency_list.append(funding_agency)
# award.save() is called in Award.get_or_create_summary_award by default
created, award = Award.get_or_create_summary_award(
awarding_agency=awarding_agency,
fain=row.get("fain"),
uri=row.get("uri"),
generated_unique_award_id=row.get("unique_award_key"),
save=False,
)
award_bulk.append(award)
award_update_id_list.append(award.id)
award_assistance_update_id_list.append(award.id)
logger.info("Bulk creating {} award rows...".format(len(award_bulk)))
try:
Award.objects.bulk_create(award_bulk)
except IntegrityError:
logger.info("!!! DUPLICATES FOUND. Continuing... ")
logger.info("Getting transaction_normalized for {} rows...".format(len(award_financial_assistance_data)))
for index, row in enumerate(award_financial_assistance_data, 1):
parent_txn_value_map = {
"award": award_bulk[index - 1],
"awarding_agency": awarding_agency_list[index - 1],
"funding_agency": funding_agency_list[index - 1],
"period_of_performance_start_date": format_date(row["period_of_performance_star"]),
"period_of_performance_current_end_date": format_date(row["period_of_performance_curr"]),
"action_date": format_date(row["action_date"]),
}
transaction_dict = load_data_into_model(
TransactionNormalized(), # thrown away
row,
field_map=fabs_normalized_field_map,
value_map=parent_txn_value_map,
as_dict=True,
)
transaction_normalized = TransactionNormalized.get_or_create_transaction(**transaction_dict)
transaction_normalized.fiscal_year = fy(transaction_normalized.action_date)
transaction_normalized_bulk.append(transaction_normalized)
logger.info("Bulk creating {} TransactionNormalized rows...".format(len(transaction_normalized_bulk)))
try:
TransactionNormalized.objects.bulk_create(transaction_normalized_bulk)
except IntegrityError:
logger.info("Tried and failed to insert duplicate transaction_normalized row. Continuing... ")
for index, row in enumerate(award_financial_assistance_data, 1):
financial_assistance_data = load_data_into_model(
TransactionFABS(), row, field_map=fabs_field_map, as_dict=True # thrown away
)
transaction_assistance = TransactionFABS(
transaction=transaction_normalized_bulk[index - 1], **financial_assistance_data
)
transaction_assistance_bulk.append(transaction_assistance)
logger.info("Bulk creating TransactionFABS rows...")
try:
TransactionFABS.objects.bulk_create(transaction_assistance_bulk)
except IntegrityError:
logger.info("!!! DUPLICATES FOUND. Continuing... ")
######################################################
@staticmethod
def update_transaction_contract(db_cursor, fiscal_year=None, page=1, limit=500000):
# logger.info("Getting IDs for what's currently in the DB...")
# current_ids = TransactionFPDS.objects
#
# if fiscal_year:
# current_ids = current_ids.filter(action_date__fy=fiscal_year)
#
# current_ids = current_ids.values_list('detached_award_procurement_id', flat=True)
query = "SELECT * FROM detached_award_procurement"
arguments = []
fy_begin = "10/01/" + str(fiscal_year - 1)
fy_end = "09/30/" + str(fiscal_year)
if fiscal_year:
if arguments:
query += " AND"
else:
query += " WHERE"
query += " action_date::Date BETWEEN %s AND %s"
arguments += [fy_begin]
arguments += [fy_end]
query += " ORDER BY detached_award_procurement_id LIMIT %s OFFSET %s"
arguments += [limit, (page - 1) * limit]
logger.info(
"Executing query on Broker DB => " + query % (arguments[0], arguments[1], arguments[2], arguments[3])
)
db_cursor.execute(query, arguments)
logger.info("Running dictfetchall on db_cursor")
procurement_data = dictfetchall(db_cursor)
fpds_normalized_field_map = {"type": "contract_award_type", "description": "award_description"}
fpds_field_map = {
"officer_1_name": "high_comp_officer1_full_na",
"officer_1_amount": "high_comp_officer1_amount",
"officer_2_name": "high_comp_officer2_full_na",
"officer_2_amount": "high_comp_officer2_amount",
"officer_3_name": "high_comp_officer3_full_na",
"officer_3_amount": "high_comp_officer3_amount",
"officer_4_name": "high_comp_officer4_full_na",
"officer_4_amount": "high_comp_officer4_amount",
"officer_5_name": "high_comp_officer5_full_na",
"officer_5_amount": "high_comp_officer5_amount",
}
logger.info("Getting total rows")
total_rows = len(procurement_data) # - rows_loaded
logger.info("Processing " + str(total_rows) + " rows of procurement data")
start_time = datetime.now()
for index, row in enumerate(procurement_data, 1):
with db_transaction.atomic():
if not (index % 100):
logger.info(
"D1 File Load: Loading row {} of {} ({})".format(
str(index), str(total_rows), datetime.now() - start_time
)
)
# If awarding toptier agency code (aka CGAC) is not supplied on the D2 record,
# use the sub tier code to look it up. This code assumes that all incoming
# records will supply an awarding subtier agency code
if row["awarding_agency_code"] is None or len(row["awarding_agency_code"].strip()) < 1:
awarding_subtier_agency_id = subtier_agency_map[row["awarding_sub_tier_agency_c"]]
awarding_toptier_agency_id = subtier_to_agency_map[awarding_subtier_agency_id]["toptier_agency_id"]
awarding_toptier_code = toptier_agency_map[awarding_toptier_agency_id]
row["awarding_agency_code"] = awarding_toptier_code
# If funding toptier agency code (aka CGAC) is empty, try using the sub
# tier funding code to look it up. Unlike the awarding agency, we can't
# assume that the funding agency subtier code will always be present.
if row["funding_agency_code"] is None or len(row["funding_agency_code"].strip()) < 1:
funding_subtier_agency_id = subtier_agency_map.get(row["funding_sub_tier_agency_co"])
if funding_subtier_agency_id is not None:
funding_toptier_agency_id = subtier_to_agency_map[funding_subtier_agency_id][
"toptier_agency_id"
]
funding_toptier_code = toptier_agency_map[funding_toptier_agency_id]
else:
funding_toptier_code = None
row["funding_agency_code"] = funding_toptier_code
# Find the award that this award transaction belongs to. If it doesn't exist, create it.
awarding_agency = Agency.get_by_toptier_subtier(
row["awarding_agency_code"], row["awarding_sub_tier_agency_c"]
)
created, award = Award.get_or_create_summary_award(
awarding_agency=awarding_agency,
piid=row.get("piid"),
fain=row.get("fain"),
uri=row.get("uri"),
parent_award_piid=row.get("parent_award_id"),
generated_unique_award_id=row.get("unique_award_key"),
)
award.save()
award_update_id_list.append(award.id)
award_contract_update_id_list.append(award.id)
parent_txn_value_map = {
"award": award,
"awarding_agency": awarding_agency,
"funding_agency": Agency.get_by_toptier_subtier(
row["funding_agency_code"], row["funding_sub_tier_agency_co"]
),
"period_of_performance_start_date": format_date(row["period_of_performance_star"]),
"period_of_performance_current_end_date": format_date(row["period_of_performance_curr"]),
"action_date": format_date(row["action_date"]),
}
transaction_dict = load_data_into_model(
TransactionNormalized(), # thrown away
row,
field_map=fpds_normalized_field_map,
value_map=parent_txn_value_map,
as_dict=True,
)
transaction = TransactionNormalized.get_or_create_transaction(**transaction_dict)
transaction.save()
contract_instance = load_data_into_model(
TransactionFPDS(), row, field_map=fpds_field_map, as_dict=True # thrown away
)
transaction_contract = TransactionFPDS(transaction=transaction, **contract_instance)
# catch exception and do nothing if we see
# "django.db.utils.IntegrityError: duplicate key value violates unique constraint"
try:
transaction_contract.save()
except IntegrityError:
pass
def add_arguments(self, parser):
parser.add_argument(
"--fiscal_year", dest="fiscal_year", nargs="+", type=int, help="Year for which to run the historical load"
)
parser.add_argument(
"--assistance",
action="store_true",
dest="assistance",
default=False,
help="Runs the historical loader only for Award Financial Assistance (Assistance) data",
)
parser.add_argument(
"--contracts",
action="store_true",
dest="contracts",
default=False,
help="Runs the historical loader only for Award Procurement (Contract) data",
)
parser.add_argument("--page", dest="page", nargs="+", type=int, help="Page for batching and parallelization")
parser.add_argument("--limit", dest="limit", nargs="+", type=int, help="Limit for batching and parallelization")
# @transaction.atomic
def handle(self, *args, **options):
logger.info("Starting historical data load...")
db_cursor = connections["data_broker"].cursor()
fiscal_year = options.get("fiscal_year")
page = options.get("page")
limit = options.get("limit")
if fiscal_year:
fiscal_year = fiscal_year[0]
logger.info("Processing data for Fiscal Year " + str(fiscal_year))
else:
fiscal_year = 2017
page = page[0] if page else 1
limit = limit[0] if limit else 500000
if not options["assistance"]:
with timer("D1 historical data load", logger.info):
self.update_transaction_contract(db_cursor=db_cursor, fiscal_year=fiscal_year, page=page, limit=limit)
if not options["contracts"]:
with timer("D2 historical data load", logger.info):
self.update_transaction_assistance(db_cursor=db_cursor, fiscal_year=fiscal_year, page=page, limit=limit)
with timer("updating awards to reflect their latest associated transaction info", logger.info):
update_awards(tuple(award_update_id_list))
with timer("updating assistance-specific awards to reflect their latest transaction info", logger.info):
update_assistance_awards(tuple(award_assistance_update_id_list))
with timer("updating contract-specific awards to reflect their latest transaction info", logger.info):
update_procurement_awards(tuple(award_contract_update_id_list))
# Done!
logger.info("FINISHED")
|
StarcoderdataPython
|
3303124
|
# coding: utf8
"""Utility functions for managing the sdk."""
import logging
import os
import platform
import subprocess
import sys
import urlparse
import requests
import semantic_version
from grow.common import config
from grow.common import utils
from xtermcolor import colorize
VERSION = config.VERSION
RELEASES_API = 'https://api.github.com/repos/grow/grow/releases'
INSTALLER_COMMAND = ('/usr/bin/python -c "$(curl -fsSL '
'https://raw.github.com/grow/grow/master/install.py)"')
PLATFORM = None
if 'Linux' in platform.system():
PLATFORM = 'linux'
elif 'Darwin' in platform.system():
PLATFORM = 'mac'
class Error(Exception):
pass
class LatestVersionCheckError(Error):
pass
def get_this_version():
return VERSION
def get_latest_version():
try:
releases = requests.get(RELEASES_API).json()
if 'message' in releases:
text = 'Error while downloading release information: {}'.format(
releases['message'])
logging.error(colorize(text, ansi=198))
raise LatestVersionCheckError(str(text))
for release in releases:
if release['prerelease']:
continue
for each_asset in release['assets']:
if PLATFORM in each_asset.get('name', '').lower():
return release['tag_name']
except LatestVersionCheckError:
raise
except Exception as e:
logging.error(colorize(str(e), ansi=198))
text = 'Unable to check for the latest version: {}'.format(str(e))
logging.error(colorize(text, ansi=198))
raise LatestVersionCheckError(str(e))
def check_sdk_version(pod):
sdk_version = get_this_version()
requires_version = pod.grow_version
if requires_version is None:
return
if (semantic_version.Version(sdk_version)
not in semantic_version.Spec(requires_version)):
text = 'ERROR! Pod requires Grow SDK version: {}'.format(
requires_version)
logging.error(colorize(text, ansi=197))
raise LatestVersionCheckError(str(text))
def check_for_sdk_updates(auto_update_prompt=False):
try:
theirs = get_latest_version()
yours = config.VERSION
except LatestVersionCheckError:
return
if theirs <= yours:
return
url = 'https://github.com/grow/grow/releases/tag/{}'.format(theirs)
logging.info('')
logging.info(' Please update to the newest version of the Grow SDK.')
logging.info(' See release notes: {}'.format(url))
logging.info(' Your version: {}, latest version: {}'.format(
colorize(yours, ansi=226), colorize(theirs, ansi=82)))
if utils.is_packaged_app() and auto_update_prompt:
if raw_input('Auto update now? [y/N]: ').lower() != 'y':
return
if subprocess.call(INSTALLER_COMMAND, shell=True) == 0:
logging.info('Restarting...')
os.execl(sys.argv[0], *sys.argv) # Restart on successful install.
else:
text = (
'In-place update failed. Update manually or use:\n'
' curl https://install.grow.io | bash')
logging.error(text)
sys.exit(-1)
else:
logging.info(' Update using: ' +
colorize('pip install --upgrade grow', ansi=200))
print ''
def get_popen_args(pod):
node_modules_path = os.path.join(pod.root, 'node_modules', '.bin')
env = os.environ.copy()
env['PATH'] = str(os.environ['PATH'] + os.path.pathsep + node_modules_path)
if pod.env.name:
env['GROW_ENVIRONMENT_NAME'] = pod.env.name
args = {
'cwd': pod.root,
'env': env,
}
if os.name == 'nt':
args['shell'] = True
return args
def install(pod, gerrit=None):
if gerrit or has_gerrit_remote(pod) and gerrit is not False:
install_gerrit_commit_hook(pod)
if pod.file_exists('/package.json'):
if pod.file_exists('/yarn.lock'):
success = install_yarn(pod)
else:
success = install_npm(pod)
if not success:
return
if pod.file_exists('/bower.json'):
success = install_bower(pod)
if not success:
return
if pod.file_exists('/gulpfile.js'):
success = install_gulp(pod)
if pod.file_exists('/extensions.txt'):
success = install_extensions(pod)
def has_gerrit_remote(pod):
KNOWN_GERRIT_HOSTS = (
'googlesource.com',
)
repo = utils.get_git_repo(pod.root)
if repo is None:
return False
for remote in repo.remotes:
url = remote.config_reader.get('url')
result = urlparse.urlparse(url)
if result.netloc.endswith(KNOWN_GERRIT_HOSTS):
return True
def install_gerrit_commit_hook(pod):
error_message = '[✘] There was an error installing the Gerrit commit hook.'
args = get_popen_args(pod)
curl_command = (
'curl -sLo '
'`git rev-parse --git-dir`/hooks/commit-msg '
'https://gerrit-review.googlesource.com/tools/hooks/commit-msg')
chmod_command = 'chmod +x `git rev-parse --git-dir`/hooks/commit-msg'
process = subprocess.Popen(curl_command, shell=True, **args)
code = process.wait()
if code:
pod.logger.error(error_message)
return False
process = subprocess.Popen(chmod_command, shell=True, **args)
code = process.wait()
if code:
pod.logger.error(error_message)
return False
pod.logger.info('[✓] Finished: Installed Gerrit Code Review commit hook.')
return True
def install_npm(pod):
args = get_popen_args(pod)
npm_status_command = 'npm --version > /dev/null 2>&1'
npm_not_found = subprocess.call(
npm_status_command, shell=True, **args) == 127
if npm_not_found:
if PLATFORM == 'linux':
pod.logger.error('[✘] The "npm" command was not found.')
pod.logger.error(
' On Linux, you can install npm using: apt-get install nodejs')
elif PLATFORM == 'mac':
pod.logger.error('[✘] The "npm" command was not found.')
pod.logger.error(
' Using brew (https://brew.sh), you can install using: brew install node')
pod.logger.error(
' If you do not have brew, you can download Node.js from https://nodejs.org')
else:
pod.logger.error('[✘] The "npm" command was not found.')
pod.logger.error(' Download Node.js from https://nodejs.org')
return
pod.logger.info('[✓] "npm" is installed.')
npm_command = 'npm install'
process = subprocess.Popen(npm_command, shell=True, **args)
code = process.wait()
if not code:
pod.logger.info('[✓] Finished: npm install.')
return True
pod.logger.error('[✘] There was an error running "npm install".')
def install_bower(pod):
args = get_popen_args(pod)
bower_status_command = 'bower --version > /dev/null 2>&1'
bower_not_found = subprocess.call(
bower_status_command, shell=True, **args) == 127
if bower_not_found:
pod.logger.error('[✘] The "bower" command was not found.')
pod.logger.error(
' Either add bower to package.json or install globally using:'
' sudo npm install -g bower')
return
pod.logger.info('[✓] "bower" is installed.')
bower_command = 'bower install'
process = subprocess.Popen(bower_command, shell=True, **args)
code = process.wait()
if not code:
pod.logger.info('[✓] Finished: bower install.')
return True
pod.logger.error('[✘] There was an error running "bower install".')
def install_gulp(pod):
args = get_popen_args(pod)
gulp_status_command = 'gulp --version > /dev/null 2>&1'
gulp_not_found = subprocess.call(
gulp_status_command, shell=True, **args) == 127
if gulp_not_found:
pod.logger.error('[✘] The "gulp" command was not found.')
pod.logger.error(
' Either add gulp to package.json or install globally using:'
' sudo npm install -g gulp')
return
pod.logger.info('[✓] "gulp" is installed.')
return True
def install_extensions(pod):
args = get_popen_args(pod)
pip_status_command = 'pip --version > /dev/null 2>&1'
pip_not_found = subprocess.call(
pip_status_command, shell=True, **args) == 127
if pip_not_found:
pod.logger.error('[✘] The "pip" command was not found.')
return
extensions_dir = pod.extensions_dir
pod.logger.info('[✓] "pip" is installed.')
command = 'pip install -U -t {} -r extensions.txt'
pip_command = command.format(extensions_dir)
process = subprocess.Popen(pip_command, shell=True, **args)
code = process.wait()
if not code:
init_file_name = '/{}/__init__.py'.format(extensions_dir)
if not pod.file_exists(init_file_name):
pod.write_file(init_file_name, '')
text = '[✓] Installed: extensions.txt -> {}'
pod.logger.info(text.format(extensions_dir))
return True
pod.logger.error('[✘] There was an error running "{}".'.format(pip_command))
def install_yarn(pod):
args = get_popen_args(pod)
yarn_status_command = 'yarn --version > /dev/null 2>&1'
yarn_not_found = subprocess.call(
yarn_status_command, shell=True, **args) == 127
if yarn_not_found:
pod.logger.error('[✘] The "yarn" command was not found.')
pod.logger.error(' Please install using: yarn install -g yarn')
return
pod.logger.info('[✓] "yarn" is installed.')
yarn_command = 'yarn install'
process = subprocess.Popen(yarn_command, shell=True, **args)
code = process.wait()
if not code:
pod.logger.info('[✓] Finished: yarn install.')
return True
pod.logger.error('[✘] There was an error running "yarn install".')
|
StarcoderdataPython
|
2881
|
import json
import multiprocessing as mp
import re
from argparse import ArgumentParser
from enum import Enum, auto
import javalang
from functools import partial
PRED_TOKEN = 'PRED'
modifiers = ['public', 'private', 'protected', 'static']
class TargetType(Enum):
seq = auto()
tree = auto()
@staticmethod
def from_string(s):
try:
return TargetType[s]
except KeyError:
raise ValueError()
target_type = TargetType.seq
RE_WORDS = re.compile(r'''
# Find words in a string. Order matters!
[A-Z]+(?=[A-Z][a-z]) | # All upper case before a capitalized word
[A-Z]?[a-z]+ | # Capitalized words / all lower case
[A-Z]+ | # All upper case
\d+ | # Numbers
_ |
\" |
.+
''', re.VERBOSE)
TREE_SPLIT = re.compile(r'([(),])')
def split_subtokens(str):
return [subtok for subtok in RE_WORDS.findall(str) if not subtok == '_']
def subtokenize(s):
failed = False
try:
tokens = list(javalang.tokenizer.tokenize(s))
except:
try:
tokens = list(javalang.tokenizer.tokenize(s + '()'))[:-2]
except:
try:
tokens = list(javalang.tokenizer.tokenize('(' + s + ')'))[1:-1]
except:
tokens = s.split()
failed = True
if failed:
return [' _ '.join(split_subtokens(i)) for i in tokens if not i in modifiers]
else:
return [' _ '.join(split_subtokens(i.value)) for i in tokens if not i.value in modifiers]
def subtokenize_tree(s):
return ' '.join([sub for sub in re.split(TREE_SPLIT, s) if len(sub) > 0])
def process_line(target_type, max_targets, max_nodes, line):
obj = json.loads(line)
left_context = obj['left_context']
right_context = obj['right_context']
target_seq = obj['target_seq']
num_targets = obj['num_targets']
num_nodes = obj['num_nodes']
if max_targets is not None and num_targets > max_targets:
return None, None
if max_nodes is not None and num_nodes > max_nodes:
return None, None
if target_type is TargetType.seq:
target_pred = ' '.join(subtokenize(target_seq)).lower()
elif target_type is TargetType.tree:
target_pred = subtokenize_tree(obj['linearized_tree'])
source = '{} {} {}'.format(' '.join(subtokenize(left_context)[-200:]).lower(), PRED_TOKEN, ' '.join(subtokenize(right_context)[:200]).lower())
return source, target_pred
def process_file(file_path, data_file_role, dataset_name, target_type, max_targets, max_nodes):
total_examples = 0
source_output_path = '{}.{}.{}.source.txt'.format(dataset_name, target_type, data_file_role)
target_output_path = '{}.{}.{}.target.txt'.format(dataset_name, target_type, data_file_role)
with open(source_output_path, 'w') as source_output_file:
with open(target_output_path, 'w') as target_output_file:
with open(file_path, 'r') as file:
subtokenize_line = partial(process_line, target_type, max_targets, max_nodes)
with mp.Pool(64) as pool:
if data_file_role in ['test', 'val']:
examples = [process_line(target_type, max_targets, max_nodes, line) for line in file]
else:
examples = pool.imap_unordered(subtokenize_line, file, chunksize=100)
#examples = [process_line(target_type, max_targets, max_nodes, line) for line in file]
for source_seq, target_seq in examples:
if source_seq is None or target_seq is None:
continue
source_output_file.write(source_seq + '\n')
target_output_file.write(target_seq + '\n')
total_examples += 1
#print(source_seq, target_seq)
print('File: ' + file_path)
print('Total examples: ' + str(total_examples))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-trd", "--train_data", dest="train_data_path",
help="path to training data file", required=True)
parser.add_argument("-ted", "--test_data", dest="test_data_path",
help="path to test data file", required=True)
parser.add_argument("-vd", "--val_data", dest="val_data_path",
help="path to validation data file", required=True)
parser.add_argument("-o", "--output_name", dest="output_name",
help="output name - the base name for the created dataset", metavar="FILE", required=True,
default='data')
parser.add_argument("--target_type", dest="target_type", type=TargetType.from_string, choices=list(TargetType), required=True)
parser.add_argument("--max_targets", dest="max_targets", type=int, required=False, default=40)
parser.add_argument("--max_nodes", dest="max_nodes", type=int, required=False, default=None)
parser.add_argument('--local', action='store_true')
args = parser.parse_args()
train_data_path = args.train_data_path
test_data_path = args.test_data_path
val_data_path = args.val_data_path
for data_file_path, data_role in zip([train_data_path, test_data_path, val_data_path], ['train', 'test', 'val']):
process_file(file_path=data_file_path, data_file_role=data_role, dataset_name=args.output_name,
target_type=args.target_type, max_targets=args.max_targets, max_nodes=args.max_nodes)
|
StarcoderdataPython
|
4811179
|
# -*- coding:utf-8 -*-
from apriori import Apriori
import pickle
import sys
def main():
if len(sys.argv) != 2:
print("USAGE python main.py [load_data|load_target]")
sys.exit()
mode = sys.argv[1]
if mode == "load_data":
c = Control()
c.proc_load()
elif mode == "load_target":
c = Control()
c.proc_macth()
c.view_support_list()
class Control:
def __init__(self):
self.csv_data_file = "data.csv"
self.csv_target_file = "target.csv"
self.pickle_file = "pickle.dump"
self.tran_dict = {}
self.key_list = []
self.value_list = []
self.tran_list = []
self.target_list = []
self.support_list = []
def proc_load(self):
self.load_data_csv()
self.go_apriori()
self.save_pickel()
def proc_macth(self):
self.load_pickel()
self.load_target_csv()
self.go_macth()
return self.result_list
def load_data_csv(self):
key_set = set()
value_set = set()
f = open(self.csv_data_file, "r")
for line in f:
#CSV分解
line_sp = line.split(",")
#一列目をキー
key = line_sp[0]
#二列目をバリュー(改行削除)
value = line_sp[1].strip("\n")
#ユニークを取る
key_set = key_set.union(set([key]))
value_set = value_set.union(set([value]))
#トランザクションデータに変換
if self.tran_dict.has_key(key):
self.tran_dict[key].append(value)
else:
self.tran_dict[key]=[value]
f.close()
#後処理
#キーリスト
self.key_list = list(key_set)
#バリューリスト
self.value_list = list(value_set)
#トランザクションリスト
self.tran_list = self.tran_dict.values()
def go_apriori(self):
a = Apriori(self.value_list, self.tran_list)
self.support_list = a.go_analyze()
def load_target_csv(self):
f = open(self.csv_target_file, "r")
for line in f:
#改行削除
value = line.strip("\n")
self.target_list.append(value)
f.close()
def go_macth(self):
key = self.target_list
key_set = set(key)
user_support_list = []
#xを含むトランザクションを見つける
for support in self.support_list:
support_key = support[0]
support_key_set = set(support_key)
support_value = support[1]
macth_key_set = support_key_set.intersection(key_set)
#すべてのキーを持っている、かつ、キーの数とサポートのキーが同じ
if (len(macth_key_set) == len(key_set))and (len(support_key_set) == len(key_set)):
base_value = support_value
#xかつyのトランザクションを見つける
for support in self.support_list:
support_key = support[0]
support_key_set = set(support_key)
support_value = support[1]
macth_key_set = support_key_set.intersection(key_set)
#すべてのキーを持っている、かつ、キーの数よりサポートのキーが多い(レコメンド対象)
if (len(macth_key_set) == len(key_set))and (len(support_key_set) > len(key_set)):
recommend_key = list(support_key_set.difference(key_set))
recommend_key_set = set(recommend_key)
confidence = support_value / base_value
#yを含むトランザクションを見つける
for sub_support in self.support_list:
sub_support_key = sub_support[0]
sub_support_key_set = set(sub_support_key)
sub_support_value = sub_support[1]
sub_macth_key_set = sub_support_key_set.intersection(recommend_key_set)
#すべてのキーを持っている、かつ、キーの数とサポートのキーが同じ
if (len(sub_macth_key_set) == len(recommend_key_set))and (len(sub_support_key_set) == len(recommend_key_set)):
sub_value = sub_support_value
lift = confidence / sub_value
user_support_list.append([recommend_key, lift, confidence, support_value])
break
self.result_list = sorted(user_support_list, key=lambda x:x[1], reverse=True)
def view_support_list(self):
for line in self.result_list:
print(line[0],line[1])
def save_pickel(self):
f = open(self.pickle_file, "w")
pickle.dump(self.support_list, f)
f.close()
def load_pickel(self):
f = open(self.pickle_file, "r")
self.support_list = pickle.load(f)
f.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3239827
|
<reponame>oischinger/server
"""Custom API implementation using websockets."""
import asyncio
import logging
import os
from base64 import b64encode
from typing import Any, Dict, Optional, Union
import aiofiles
import jwt
import ujson
from aiohttp import WSMsgType, web
from aiohttp.http_websocket import WSMessage
from music_assistant.helpers.errors import AuthenticationError
from music_assistant.helpers.images import get_image_url, get_thumb_file
from music_assistant.helpers.logger import HistoryLogHandler
from music_assistant.helpers.typing import MusicAssistant
from music_assistant.helpers.web import (
api_route,
async_json_response,
async_json_serializer,
parse_arguments,
)
from music_assistant.models.media_types import MediaType
LOGGER = logging.getLogger("api")
@api_route("log")
async def get_log(tail: int = 200) -> str:
"""Return current application log."""
for handler in logging.getLogger().handlers:
if isinstance(handler, HistoryLogHandler):
return handler.get_history()[-tail:]
@api_route("images/{media_type}/{provider}/{item_id}")
async def get_media_item_image_url(
mass: MusicAssistant, media_type: MediaType, provider: str, item_id: str
) -> str:
"""Return image URL for given media item."""
if provider == "url":
return None
return await get_image_url(mass, item_id, provider, media_type)
@api_route("images/thumb")
async def get_image_thumb(mass: MusicAssistant, url: str, size: int = 150) -> str:
"""Get (resized) thumb image for given URL as base64 string."""
img_file = await get_thumb_file(mass, url, size)
if img_file:
async with aiofiles.open(img_file, "rb") as _file:
img_data = await _file.read()
return "data:image/png;base64," + b64encode(img_data).decode()
raise KeyError("Invalid url!")
@api_route("images/provider-icons/{provider_id}")
async def get_provider_icon(provider_id: str) -> str:
"""Get Provider icon as base64 string."""
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
icon_path = os.path.join(base_dir, "providers", provider_id, "icon.png")
if os.path.isfile(icon_path):
async with aiofiles.open(icon_path, "rb") as _file:
img_data = await _file.read()
return "data:image/png;base64," + b64encode(img_data).decode()
raise KeyError("Invalid provider: %s" % provider_id)
@api_route("images/provider-icons")
async def get_provider_icons(mass: MusicAssistant) -> Dict[str, str]:
"""Get Provider icons as base64 strings."""
return {
prov.id: await get_provider_icon(prov.id)
for prov in mass.get_providers(include_unavailable=True)
}
async def handle_api_request(request: web.Request):
"""Handle API requests."""
mass: MusicAssistant = request.app["mass"]
LOGGER.debug("Handling %s", request.path)
# check auth token
auth_token = request.headers.get("Authorization", "").split("Bearer ")[-1]
if not auth_token:
raise web.HTTPUnauthorized(
reason="Missing authorization token",
)
try:
token_info = jwt.decode(auth_token, mass.web.jwt_key, algorithms=["HS256"])
except jwt.InvalidTokenError as exc:
LOGGER.exception(exc, exc_info=exc)
msg = "Invalid authorization token, " + str(exc)
raise web.HTTPUnauthorized(reason=msg)
if mass.config.security.is_token_revoked(token_info):
raise web.HTTPUnauthorized(reason="Token is revoked")
mass.config.security.set_last_login(token_info["client_id"])
# handle request
handler, path_params = mass.web.get_api_handler(request.path, request.method)
data = await request.json() if request.can_read_body else {}
# execute handler and return results
try:
all_params = {**path_params, **request.query, **data}
params = parse_arguments(mass, handler.signature, all_params)
res = handler.target(**params)
if asyncio.iscoroutine(res):
res = await res
except Exception as exc: # pylint: disable=broad-except
LOGGER.debug("Error while handling %s", request.path, exc_info=exc)
raise web.HTTPInternalServerError(reason=str(exc))
return await async_json_response(res)
class WebSocketApi(web.View):
"""RPC-like API implementation using websockets."""
def __init__(self, request: web.Request):
"""Initialize."""
super().__init__(request)
self.authenticated = False
self.ws_client: Optional[web.WebSocketResponse] = None
@property
def mass(self) -> MusicAssistant:
"""Return MusicAssistant instance."""
return self.request.app["mass"]
async def get(self):
"""Handle GET."""
ws_client = web.WebSocketResponse()
self.ws_client = ws_client
await ws_client.prepare(self.request)
self.request.app["ws_clients"].append(ws_client)
await self._send_json(msg_type="info", data=self.mass.web.discovery_info)
# add listener for mass events
remove_listener = self.mass.eventbus.add_listener(self._handle_mass_event)
# handle incoming messages
try:
async for msg in ws_client:
await self.__handle_msg(msg)
finally:
# websocket disconnected
remove_listener()
self.request.app["ws_clients"].remove(ws_client)
LOGGER.debug("websocket connection closed: %s", self.request.remote)
return ws_client
async def __handle_msg(self, msg: WSMessage):
"""Handle incoming message."""
try:
if msg.type == WSMsgType.error:
LOGGER.warning(
"ws connection closed with exception %s", self.ws_client.exception()
)
return
if msg.type != WSMsgType.text:
return
if msg.data == "close":
await self.ws_client.close()
return
# process message
json_msg = msg.json(loads=ujson.loads)
# handle auth command
if json_msg["type"] == "auth":
token_info = jwt.decode(
json_msg["data"], self.mass.web.jwt_key, algorithms=["HS256"]
)
if self.mass.config.security.is_token_revoked(token_info):
raise AuthenticationError("Token is revoked")
self.authenticated = True
self.mass.config.security.set_last_login(token_info["client_id"])
# TODO: store token/app_id on ws_client obj and periodically check if token is expired or revoked
await self._send_json(
msg_type="result",
msg_id=json_msg.get("id"),
data=token_info,
)
elif not self.authenticated:
raise AuthenticationError("Not authenticated")
# handle regular command
elif json_msg["type"] == "command":
await self._handle_command(
json_msg["data"],
msg_id=json_msg.get("id"),
)
except AuthenticationError as exc: # pylint:disable=broad-except
# disconnect client on auth errors
await self._send_json(
msg_type="error", msg_id=json_msg.get("id"), data=str(exc)
)
await self.ws_client.close(message=str(exc).encode())
except Exception as exc: # pylint:disable=broad-except
# log the error only
await self._send_json(
msg_type="error", msg_id=json_msg.get("id"), data=str(exc)
)
LOGGER.error("Error with WS client", exc_info=exc)
async def _handle_command(
self,
cmd_data: Union[str, dict],
msg_id: Any = None,
):
"""Handle websocket command."""
# Command may be provided as string or a dict
if isinstance(cmd_data, str):
path = cmd_data
method = "GET"
params = {}
else:
path = cmd_data["path"]
method = cmd_data.get("method", "GET")
params = {x: cmd_data[x] for x in cmd_data if x not in ["path", "method"]}
LOGGER.debug("Handling command %s/%s", method, path)
# work out handler for the given path/command
route, path_params = self.mass.web.get_api_handler(path, method)
args = parse_arguments(self.mass, route.signature, {**params, **path_params})
res = route.target(**args)
if asyncio.iscoroutine(res):
res = await res
# return result of command to client
return await self._send_json(msg_type="result", msg_id=msg_id, data=res)
async def _send_json(
self,
msg_type: str,
msg_id: Optional[int] = None,
data: Optional[Any] = None,
):
"""Send message (back) to websocket client."""
await self.ws_client.send_str(
await async_json_serializer({"type": msg_type, "id": msg_id, "data": data})
)
async def _handle_mass_event(self, event: str, event_data: Any):
"""Broadcast events to connected client."""
if not self.authenticated:
return
try:
await self._send_json(
msg_type="event",
data={"event": event, "event_data": event_data},
)
except ConnectionResetError as exc:
LOGGER.debug("Error while sending message to api client", exc_info=exc)
await self.ws_client.close()
|
StarcoderdataPython
|
1714508
|
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import random
from transformers import AutoTokenizer
from tqdm import tqdm
from utils.generic_utils import read
class MMDialDataset(Dataset):
def __init__(self, data, tokenizer):
super().__init__()
self.data = data
self.tokenizer = tokenizer
def __len__(self):
return self.data.__len__()
def __getitem__(self, i):
context = MMDialDataset.extract(self.data[i], '<|context|>', keep_tokens=True)
labels = self.data[i][len(context):]
# belief = MMDialDataset.extract(labels, '<|belief|>')
# action = MMDialDataset.extract(labels, '<|action|>')
# response = MMDialDataset.extract(labels, '<|response|>')
ret = self.tokenizer(self.data[i], truncation=True, return_tensors='pt')
context_tokenized = self.tokenizer(context, truncation=True, return_tensors='pt')
ret['context_input_ids'] = context_tokenized['input_ids']
ret['context_attention_mask'] = context_tokenized['attention_mask']
labels_tokenized = self.tokenizer(labels, truncation=True, return_tensors='pt')
ret['labels'] = labels_tokenized['input_ids']
ret['labels_len'] = ret['labels'].shape[-1]
ret['id'] = i
return ret
@classmethod
def get_token_text(cls, token):
return token.replace('<', '').replace('>', '').replace('|', '').strip()
@classmethod
def extract(cls, text, begin_token, end_token=None, keep_tokens=False):
end_token = end_token or f'<|endof{MMDialDataset.get_token_text(begin_token)}|>'
begin_idx = text.find(begin_token)
end_idx = text.find(end_token)
if begin_idx == -1:
return ''
elif end_idx == -1:
return text[begin_idx + len(begin_token):].strip() if not keep_tokens else text[begin_idx:]
return text[begin_idx + len(begin_token): end_idx].strip() if not keep_tokens else text[begin_idx: end_idx + len(end_token)]
@classmethod
def create_data(cls, paths, tokenizer_or_transformer_model, split=(1,), shuffle=True):
assert sum(split) == 1
data = []
for path in paths:
data.extend(read(path))
if isinstance(tokenizer_or_transformer_model, str):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_or_transformer_model)
else:
tokenizer = tokenizer_or_transformer_model
if shuffle:
random.shuffle(data)
splits = []
begin_idx = 0
for i, s in enumerate(split):
if i == len(split) - 1:
end_idx = len(data)
else:
end_idx = int(begin_idx + len(data) * s)
splits.append(MMDialDataset(data[begin_idx: end_idx], tokenizer=tokenizer))
begin_idx = end_idx
return splits[0] if len(split) == 1 else splits
if __name__ == '__main__':
dataset = MMDialDataset.create_data(['resources/gpt2/resources/test.inp'], 'gpt2')
for sample in dataset:
print(sample)
input()
|
StarcoderdataPython
|
3314677
|
import torch
from torch.utils import data
from torch import nn
import numpy as np
from data.datasets import swiss_roll, double_circles, double_moons, Dataset
from regularization.regularization import regularization
from integrators.integrators import MS1, MS2, MS3, H1, H2, H2_sparse, Classification, get_intermediate_states
def train_2d_example(dataset='swiss_roll', net_type='H1', nf=4, n_layers=8, t_end=1, gradient_info=False, sparse=None,
seed=None):
if dataset == 'swiss_roll':
data_gen = swiss_roll
elif dataset == 'double_circles':
data_gen = double_circles
elif dataset == 'double_moons':
data_gen = double_moons
else:
raise ValueError("%s data set is not yet implemented" % dataset)
if sparse is not None:
if net_type != 'H2':
raise ValueError("Sparse networks only implemented for H2-DNNs")
out = 1
# Set seed
if seed is None:
seed = np.random.randint(10000)
torch.manual_seed(seed)
np.random.seed(seed)
# define data
data_size = 8000
train_data_size = 4000
test_data_size = data_size - train_data_size
if sparse is None:
data2d, labels, domain = data_gen(data_size, nf=nf)
else:
data2d, labels, domain = data_gen(data_size, nf=nf, input_ch=[0, 12])
if sparse != 'sparse' and sparse != 'full':
raise ValueError("sparse variable can be either 'sparse' or 'full'")
else:
mask_k, mask_j = set_masks_sparse(sparse, nf)
partition = {'train': range(0, data_size, 2),
'test': range(1, data_size, 2)}
# # Select training parameters
alpha = 5e-4
alphac = 1e-4
learning_rate = 0.5e-1
max_iteration = 50
max_in_iteration = 10
# define network structure and optimizer
batch_size = 125
training_set = Dataset(partition['train'], data2d, labels)
training_generator = data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
h = t_end / n_layers
if sparse is not None:
model = H2_sparse(n_layers, t_end, nf=16, mask_k=mask_k, mask_j=mask_j)
elif net_type == 'MS1':
model = MS1(n_layers, t_end, nf=nf)
elif net_type == 'MS2':
model = MS2(n_layers, t_end, nf=nf)
elif net_type == 'MS3':
model = MS3(n_layers, t_end, nf=nf)
elif net_type == 'H1_J1':
model = H1(n_layers, t_end, nf=nf, select_j='J1')
elif net_type == 'H1_J2':
model = H1(n_layers, t_end, nf=nf, select_j='J2')
elif net_type == 'H2':
model = H2(n_layers, t_end, nf=nf)
else:
raise ValueError("%s model is not yet implemented" % net_type)
loss_func = nn.BCEWithLogitsLoss()
optimizer_k = torch.optim.Adam(model.parameters(), lr=learning_rate) # , weight_decay=alpha/100)
if gradient_info:
loss_func2 = nn.Identity()
gradients_matrix = np.zeros([int(train_data_size/batch_size) * max_iteration, model.nf, model.nf, n_layers + 1])
else:
gradients_matrix = None
# check before correct rate
print('%s example using a %d-layer %s-DNN with %d features. Alpha=%.1e. Final_time=%.2f'
% (dataset, n_layers, net_type, nf, alpha, t_end))
# Training network
for epoch in range(max_iteration):
training_iterator = iter(training_generator)
for i_k in range(int(data2d[partition['train']].size(0) / training_generator.batch_size)):
local_samples, local_labels = next(training_iterator)
model_c = Classification(nf=nf)
optimizer_w = torch.optim.Adam(model_c.parameters(), lr=learning_rate)
with torch.no_grad():
YN = model(local_samples)
for i_w in range(max_in_iteration): # Inner iteration
optimizer_w.zero_grad()
loss = loss_func(model_c(YN), local_labels)
loss = loss + alphac * 0.5 * (torch.norm(model_c.W) ** 2 + torch.norm(model_c.mu) ** 2)
loss.backward()
optimizer_w.step()
if gradient_info:
local_samples.requires_grad = True
matrix_aux = np.zeros([model.nf, model.nf, n_layers + 1])
for k in range(model.nf):
optimizer_k.zero_grad()
Y_out = get_intermediate_states(model, local_samples)
YN = Y_out[-1]
loss = loss_func2(YN[:, k, 0].sum())
loss.backward()
for j in range(n_layers + 1):
matrix_aux[:, k, j] = Y_out[j].grad[:, :, 0].numpy().sum(axis=0) / training_generator.batch_size
gradients_matrix[epoch * int(train_data_size / batch_size) + i_k, :, :, :] = matrix_aux
local_samples.requires_grad = False
optimizer_k.zero_grad()
K = model.getK()
b = model.getb()
loss = loss_func(model_c(model(local_samples)), local_labels)
loss += regularization(alpha, h, K, b)
loss.backward()
li = list(optimizer_k.state)
if not (len(li) == 0):
for ii in range(2):
optimizer_k.state[li[ii]]['step'] = epoch
optimizer_k.step()
if epoch % 10 == 0 and out > 0:
model_c = Classification(nf=nf)
optimizer_w = torch.optim.Adam(model_c.parameters(), lr=learning_rate)
with torch.no_grad():
YN = model(local_samples)
for i_w in range(max_in_iteration): # Inner iteration
optimizer_w.zero_grad()
loss = loss_func(model_c(YN), local_labels)
loss = loss + alphac * 0.5 * (torch.norm(model_c.W) ** 2 + torch.norm(model_c.mu) ** 2)
loss.backward()
optimizer_w.step()
acc = (torch.ge(model_c(model(local_samples)), 0) == local_labels).sum().numpy() / batch_size
print('\tTrain Epoch: {:2d} - Loss: {:.6f} - Accuracy: {:.0f}%'.format(epoch, loss, acc*100))
# Train classification layer with all the data
model_c = Classification(nf=nf)
optimizer_w = torch.optim.Adam(model_c.parameters(), lr=learning_rate)
for epoch in range(max_iteration):
training_iterator = iter(training_generator)
for i_w in range(int(data2d[partition['train']].size(0) / training_generator.batch_size)):
local_samples, local_labels = next(training_iterator)
with torch.no_grad():
YN = model(local_samples)
optimizer_w.zero_grad()
loss = loss_func(model_c(YN), local_labels)
loss = loss + alphac * 0.5 * (torch.norm(model_c.W) ** 2 + torch.norm(model_c.mu) ** 2)
loss.backward()
optimizer_w.step()
# Accuracy results
with torch.no_grad():
train_acc = (torch.ge(model_c(model(data2d[partition['train'], :, :])), 0) == labels[partition['train'], :]
).sum().numpy() / train_data_size
test_acc = (torch.ge(model_c(model(data2d[partition['test'], :, :])), 0) == labels[partition['test'], :]
).sum().numpy() / test_data_size
return model, model_c, train_acc, test_acc, data2d, labels, partition, domain, gradients_matrix
def set_masks_sparse(sparse, nf):
if nf != 16:
print("Proceeding with nf=16...")
if sparse == 'sparse':
mask_aux = torch.tensor([[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 1]], dtype=torch.float)
mask_k = torch.cat((torch.cat((mask_aux, torch.zeros(8, 8)), dim=1),
torch.cat((torch.zeros(8, 8), mask_aux), dim=1)), dim=0)
mask_aux = torch.eye(8)
mask_j = torch.cat((torch.cat((torch.zeros(8, 8), mask_aux), dim=1),
torch.cat((mask_aux, torch.zeros(8, 8)), dim=1)), dim=0)
mask_k = mask_k.type(torch.bool)
mask_j = mask_j.type(torch.bool)
elif sparse == 'full':
mask_aux = torch.ones(8, 8)
mask_k = torch.cat((torch.cat((mask_aux, torch.zeros(8, 8)), dim=1),
torch.cat((torch.zeros(8, 8), mask_aux), dim=1)), dim=0)
mask_aux = torch.eye(8)
mask_j = torch.cat((torch.cat((torch.zeros(8, 8), mask_aux), dim=1),
torch.cat((mask_aux, torch.zeros(8, 8)), dim=1)), dim=0)
mask_k = mask_k.type(torch.bool)
mask_j = mask_j.type(torch.bool)
else:
raise ValueError("%s is not a valid parameter" % sparse)
return mask_k, mask_j
|
StarcoderdataPython
|
16726
|
<reponame>Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master
import numpy as np
import pandas as pd
import random
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
class Cluster(tk.Tk, object):
def __init__(self, state_init, server_attribute):
super(Cluster, self).__init__()
self.action_space = np.array([[0,0],[0,1],[0,2],[0,3],
[1,0],[1,1],[1,2],[1,3],
[2,0],[2,1],[2,2],[2,3],
[3,0],[3,1],[3,2],[3,3],
[4,0],[4,1],[4,2],[4,3],
[5,0],[5,1],[5,2],[5,3],
[6,0],[6,1],[6,2],[6,3],
[7,0],[7,1],[7,2],[7,3],
[8,0],[8,1],[8,2],[8,3],
[9,0],[9,1],[9,2],[9,3],
[10,0],[10,1],[10,2],[10,3],
[11,0],[11,1],[11,2],[11,3]])
self.n_actions = len(self.action_space)
self.cost_matrix = pd.DataFrame(np.array([[0,1,5,12],
[1,0,4,2],
[5,4,0,3],
[12,2,3,0]]),
columns=[0, 1, 2, 3])
self.server_attribute = server_attribute
self.QSs = self.read_file()
self.state_init = state_init
self.cost_init = self.cost_init()
def step(self, action, state, costs):
s = state.copy()
#action_real[查询,移动到的服务器]
action_real = self.action_space[action]
q = action_real[0]
index_server = action_real[1]
s.iloc[q, :] = 0
s.iloc[q, index_server] = 1
cost_new = self.cost_caculate(q, index_server)
if cost_new > costs[q]:
is_better = True
else:
is_better = False
# costs[action_real[0]] = cost_new
costs[q] = cost_new
cost_all = self.cost_all(costs)
reward = self.reward(cost_all, s)
s_ = s
return s_, costs, reward, cost_all, is_better
#判断结束的条件 选择的action在执行之后状态仍然没有变 or 判断状态是否在处与某种情况下,例如负载不平衡
def is_finish(self):
# TODO
return True
# read the file and store in an array[query,[server1,server2,......]]
def read_file(self):
server_attribute = self.server_attribute
with open("D:\SynologyDrive\Reinforcement-learning-with-tensorflow-master\contents\MyExperiment\Exp3_test\QueryAttribute_test",'r') as f:
content = f.readlines()
QSs = []
for item in content:
QS = []
item = item.strip("\n")
q = item.split(",")[0]
targetAttribute = item.split(",")[1:]
targetAttribute = list(map(int, targetAttribute))
servers = []
for attribute in targetAttribute:
server = server_attribute[server_attribute.loc[:, attribute] == 1].index[0]
servers.append(server)
QS.append(int(q))
QS.append(servers)
QSs.append(QS)
return QSs
# compute the initial costs array based on the initial state matrix. every element represent the total cost of the query
def cost_init(self):
state_init = self.state_init
# print(len(state_init))
states = self.state_array(state_init)
# print(len(states))
costs = []
# print(len(state_init))
for i in range(len(state_init)):
index_server = states[i][1]
cost = self.cost_caculate(i, index_server)
costs.append(cost)
return costs
def cost_caculate(self,q,index_server):
cost = 0
for j in range(len(self.QSs[q][1])):
target_server = self.QSs[q][1][j]
cost += self.cost_matrix.iloc[index_server, target_server]
return cost
# create the initial state matrix(random)
# compute the total reward based on the costs array
def cost_all(self, costs):
cost_all = 0
for i in range(len(costs)):
cost_all += costs[i]
return cost_all
def reward(self, cost_all, state):
list = []
for i in state.columns:
list.append(state[i].sum())
load_weight_var = np.var(list)
reward = (len(state)/cost_all) * self.function(1.1, load_weight_var)
return reward
def function(self, a, x):
y = 100/(a**x)
return y
# transform the state matrix into array
def state_array(self, state):
states = []
for i in range(len(state)):
for j in range(len(state.columns)):
state_arr = []
if state.iloc[i, j] == 1:
state_arr.append(i)
state_arr.append(j)
states.append(state_arr)
return states
def state_init():
init_state = pd.DataFrame(np.zeros(327*8).reshape(327, 8), columns=[0, 1, 2, 3, 4, 5, 6, 7])
for i in range(len(init_state)):
j = random.randint(0, 7)
init_state.iloc[i][j] = 1
return init_state
# if __name__ == '__main__':
# server_attribute = pd.DataFrame(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
# 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
# 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
# 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
# 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]).
# reshape(8, 24),
# columns=np.arange(24))
# env = Cluster(state_init(), server_attribute)
# Qss = env.QSs
# print(Qss)
# for i in range(len(Qss)):
# q = i
# for j in range(len(server_attribute)):
# index_server = j
# print(env.cost_init)
# print("The reward of initial state is:")
# print(env.reward(env.cost_all(env.cost_init), env.state_init))
# print(env.state_init)
# actions=list(range(env.n_actions))
# print(actions)
# env.after(100, update)
# env.mainloop()
|
StarcoderdataPython
|
1725928
|
import datetime, json, os
from cantools import config
from cantools.util import log, write
from cantools.web import fetch, send_mail
from .actor import Actor
try:
import psutil
except ImportError as e:
pass # google crap engine (get it if you need it!)
from six import with_metaclass
class BotMeta(type):
def __new__(cls, name, bases, attrs):
bc = type.__new__(cls, name, bases, attrs)
if name != "Bot":
name != "Monitor" and log("Initializing Bot Class: %s"%(name,), important=True)
config.pubsub.bots.update(name.lower(), bc)
return bc
class Bot(with_metaclass(BotMeta, Actor)):
num = 0
def __init__(self, server, channel, name=None):
Bot.num += 1
self.name = name or (self.__class__.__name__ + str(Bot.num))
self.server = server
self.channel = channel # often we only care about one channel
self.channels = set()
self._set_defaults()
log("Bot Spawned: '%s'"%(self.name,), 2)
channel.join(self)
self.server.bots[self.name] = self
def pub(self, message):
self.server.publish({
"message": message,
"channel": self.channel.name
}, self)
def write(self, obj): # receive message from channel
getattr(self, "on_%s"%(obj["action"],))(obj["data"])
def _default_handler(self, action):
def _h(*args):
log('Bot %s handling %s: "%s"'%(self.name, action, json.dumps(args)), 3)
return _h
def _set_defaults(self):
for action in ["channel", "publish", "subscribe", "unsubscribe", "pm", "error", "meta"]:
hname = "on_%s"%(action,)
if not hasattr(self, hname):
setattr(self, hname, self._default_handler(action))
class Monitor(Bot):
def __init__(self, server, channel, name="monitor"): # only one monitor
import event # here for google crap engine
self.current = {}
self.alert = {}
Bot.__init__(self, server, channel, name)
event.timeout(config.admin.monitor.interval, self._tick)
def _datedir(self):
n = datetime.datetime.now()
lp = os.path.join("logs", "monitor")
yp = os.path.join(lp, str(n.year))
mp = os.path.join(yp, str(n.month))
dp = os.path.join(mp, str(n.day))
if not os.path.isdir(lp):
os.mkdir(lp)
if not os.path.isdir(yp):
os.mkdir(yp)
if not os.path.isdir(mp):
os.mkdir(mp)
if not os.path.isdir(dp):
os.mkdir(dp)
return os.path.join(dp, str(n.hour))
def log(self, data):
self.pub(data)
if config.admin.monitor.log:
write(data, self._datedir(), True, append=True, newline=True)
def _cpu(self):
c = self.current["cpu"] = psutil.cpu_percent()
if self.alert.get("cpu"):
if c < config.admin.monitor.thresholds.cpu:
del self.alert["cpu"]
log("CPU calmed down")
send_mail(config.admin.contacts, subject="High CPU", body="just ended")
else:
if c >= config.admin.monitor.thresholds.cpu:
self.alert["cpu"] = True
log("CPU just started going crazy")
send_mail(config.admin.contacts, subject="High CPU", body="just started")
def _tick(self):
self._cpu()
dioc = psutil.disk_io_counters()
nioc = psutil.net_io_counters()
dmon = fetch(config.admin.host, "/_report",
config.admin.port, True, protocol=config.admin.protocol)
data = {
"gc": dmon["gc"],
"cpu": self.current["cpu"],
"read": dioc.read_time,
"write": dioc.write_time,
"sent": nioc.bytes_sent,
"recv": nioc.bytes_recv,
"process_memory": dmon["mem"],
"virtual_memory": psutil.virtual_memory().percent,
"swap_memory": psutil.swap_memory().percent,
"connections": len(psutil.net_connections()),
"web_connections": dmon["web"]["connections"],
"admin_connections": dmon["admin"]["connections"],
"web_requests": dmon["web"]["requests"],
"admin_requests": dmon["admin"]["requests"],
"totals": {
"web": {
"connections": dmon["web"]["total_connections"],
"requests": dmon["web"]["total_requests"],
"rolls": dmon["web"]["rolls"]
},
"admin": {
"connections": dmon["admin"]["total_connections"],
"requests": dmon["admin"]["total_requests"],
"rolls": dmon["admin"]["rolls"]
}
},
"devices": {
"web": dmon["web"]["devices"],
"admin": dmon["admin"]["devices"]
},
"ips": {
"web": dmon["web"]["ips"],
"admin": dmon["admin"]["ips"]
}
}
if config.admin.monitor.proxy:
data["ips"]["proxy"] = fetch(config.admin.host, "/_report",
config.admin.monitor.proxy, True)["ips"]
self.log(data)
return True
|
StarcoderdataPython
|
1624302
|
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from nose.plugins.attrib import attr
from manager_rest.test import base_test
from manager_rest.resources_v2 import Events
from manager_rest.manager_elasticsearch import ManagerElasticsearch
@attr(client_min_version=2, client_max_version=base_test.LATEST_API_VERSION)
class EventsTest(base_test.BaseServerTestCase):
def test_obsolete_post_request(self):
response = self.post('/events', {})
self.assertEqual(405, response.status_code)
def test_build_query_no_args(self):
# make sure nothing crashes...
Events._build_query()
def test_list_events(self):
ManagerElasticsearch.search_events = self._mock_es_search
response = self.client.events.list()
total = self._mock_es_search()['hits']['total']
hits = self._mock_es_search()['hits']['hits']
self.assertEquals(total, response.metadata.pagination.total)
self.assertEquals(len(hits), len(response.items))
def test_build_query(self):
self.maxDiff = None
filters, pagination, sort, range_filters = self._get_build_query_args()
query = Events._build_query(filters=filters,
sort=sort,
pagination=pagination,
range_filters=range_filters)
expected_query = self._get_expected_query()
# match the order of conditions list in both queries
# to overcome order differences in comparison
self._sort_query_conditions_list(expected_query)
self._sort_query_conditions_list(query)
self.assertDictEqual(expected_query, query)
def _get_build_query_args(self):
filters = {
'blueprint_id': ['some_blueprint'],
'deployment_id': ['some_deployment'],
'type': ['cloudify_event', 'cloudify_logs']
}
pagination = {
'size': 5,
'offset': 3
}
sort = {
'@timestamp': 'desc'
}
range_filters = {
'@timestamp': {
'from': '2015-01-01T15:00:0',
'to': '2016-12-31T01:00:0'
}
}
return filters, pagination, sort, range_filters
def _mock_es_search(self, *args, **kwargs):
result = {
'hits': {
'total': 10,
'hits': [{'_source': {k: k}} for k in range(1, 6)]
}
}
return result
def _sort_query_conditions_list(self, query):
conditions = query['query']['filtered']['filter']['bool']['must']
conditions.sort()
def _get_expected_query(self):
conditions = [
{
'query': {
'match': {
'context.blueprint_id': {
'query': 'some_blueprint',
'operator': 'and'
}
}
}
},
{
'query': {
'match': {
'context.deployment_id': {
'query': 'some_deployment',
'operator': 'and'
}
}
}
},
{
'terms': {
'type': ['cloudify_event', 'cloudify_logs']
}
},
{
'range': {
'@timestamp': {
'from': '2015-01-01T15:00:0',
'to': '2016-12-31T01:00:0'
}
}
}
]
expected_query = {
'query': {
'filtered': {
'filter': {
'bool': {
'must': conditions
}
}
}
},
'sort': [
{
'@timestamp': {
'order': 'desc',
'ignore_unmapped': True
}
}
],
'size': 5,
'from': 3
}
return expected_query
|
StarcoderdataPython
|
136896
|
<reponame>FuriousJulius/lg_ros_nodes
#!/usr/bin/env python3
import rospy
import unittest
from lg_msg_defs.msg import AdhocBrowser
from lg_common import AdhocBrowserPool
PKG = 'lg_common'
NAME = 'test_adhoc_browser_pool'
class TestAdhocBrowserPool(unittest.TestCase):
def setUp(self):
self.pool = AdhocBrowserPool('center', "/opt/google/chrome/extensions/", 0, 0)
rospy.sleep(1)
def test_rosbridge_params_passed(self):
test_browser_msg = AdhocBrowser()
self.pool._create_browser('test_test', test_browser_msg)
assert self.pool.browsers['test_test'] is not None
url = self.pool.browsers['test_test'].url
assert 'ros_instance_name=test_test' in url
assert 'rosbridge_secure=1' in url
assert 'rosbridge_port=1234' in url
if __name__ == '__main__':
rospy.init_node(NAME)
rospy.set_param('~rosbridge_secure', True)
rospy.set_param('~rosbridge_port', 1234)
import rostest
rostest.rosrun(PKG, NAME, TestAdhocBrowserPool)
|
StarcoderdataPython
|
1764665
|
import os
import pygame
from labyrinth_generator import generate
from random import randint
from time import time, sleep
import mazeFinder
BLOCKSIZE = 16
WIDTH = 590
HEIGHT = 480
DELAY = 20
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
# Class for the orange dude
class Player(object):
'''
Creates a player based on x and y values
'''
def __init__(self, x, y):
'''
initialises a player rectangle in the pygame
'''
self.x = x
self.y = y
self.rect = pygame.Rect(x, y, BLOCKSIZE, BLOCKSIZE)
@property
def position(self):
'''
returns the position of the rectangle
'''
return self.rect.x, self.rect.y
# Nice class to hold a wall rect
class Wall(object):
'''
Used to create walls that determine the maze layout
'''
def __init__(self, pos):
'''
initialises walls based on pos
'''
walls.append(self)
self.rect = pygame.Rect(pos[0], pos[1], BLOCKSIZE, BLOCKSIZE)
def setup(width,height):
'''
Used to call the labryinth generator functions to make a maze
for the search algorithm to search in
'''
global walls, player, clock, end_rect, screen
# Initialise pygame
os.environ["SDL_VIDEO_CENTERED"] = "1"
pygame.init()
# Set up the display
pygame.display.set_caption("Wait for the program to calculate the route")
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
walls = []
level = generate(width,height)
x = y = 0
for row in level:
for col in row:
if col == "W":
Wall((x, y))
elif col == "E":
end_rect = pygame.Rect(x, y, BLOCKSIZE, BLOCKSIZE)
elif col == "P":
player = Player(x, y)
x += BLOCKSIZE
y += BLOCKSIZE
x = 0
return screen, clock, walls, player, end_rect
def playOut(actions):
'''
Moves the player based on the action
'''
for action in actions:
if(action == "Move Up"):
player.rect.y -= BLOCKSIZE
elif(action == "Move Down"):
player.rect.y += BLOCKSIZE
elif(action == "Move Right"):
player.rect.x += BLOCKSIZE
elif(action == "Move Left"):
player.rect.x -= BLOCKSIZE
updateScreen(player, end_rect, walls)
sleep(0.1)
def updateScreen(player, end_rect, walls):
'''
Updates the screeen
'''
screen.fill(BLACK)
for wall in walls:
pygame.draw.rect(screen, WHITE, wall.rect)
pygame.draw.rect(screen, RED, end_rect)
pygame.draw.rect(screen, GREEN, player.rect)
pygame.display.flip()
def recordResutls(time, width, height, method,filterActionsValue):
'''
Used to record results in the results text file
'''
string = "Solver took "+str(time)+", for " +str(width)+" by "+str(height)+" maze using method ID "+str(method)+" with filter actions set to "+str(filterActionsValue) +"\n\r"
f= open("results.txt","a")
f.write(string)
f.close()
def game(method, filterActionsValue, width,height):
'''
Used to initialise a maze and find the path
Also records the time required to find the path
'''
setup(width,height)
running = True
updateScreen(player, end_rect, walls)
t0 = time()
path = mazeFinder.findSolution(method,filterActionsValue,player, end_rect, walls)
t1 = time()
totalTime = t1-t0
print ("Solver took ",totalTime, ' seconds')
recordResutls(totalTime, width, height, method, filterActionsValue)
pygame.display.set_caption("Press space to start the route")
while running:
clock.tick(5)
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
pygame.quit()
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
pygame.quit()
key = pygame.key.get_pressed()
if key[pygame.K_SPACE]:
pygame.display.set_caption("Following route")
playOut(path)
running = False
pygame.quit()
updateScreen(player, end_rect, walls)
if __name__ == '__main__':
game()
|
StarcoderdataPython
|
1742982
|
# coding=utf-8
from __future__ import absolute_import
import logging
import datetime
from urllib.parse import urlparse
from kubernetes import watch
from talos.common import cache
from talos.core import config
from talos.core.i18n import _
from wecubek8s.common import jsonfilter
from wecubek8s.common import k8s
from wecubek8s.common import const
from wecubek8s.db import resource as db_resource
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseEntity:
def list(self, filters=None):
clusters = db_resource.Cluster().list()
# all cached as default(3s)
results = self.cached_all(clusters)
if filters:
# The following options of operator is required by wecube-platform: eq/neq/is/isnot/gt/lt/like/in
# but kubernetes plugin supports for more: gte/lte/notin/regex/set/notset
# set test false/0/''/[]/{}/None as false
# you can also use regex to match the value
results = [ret for ret in results if jsonfilter.match_all(filters, ret)]
return results
def clear_cache(self, clusters):
cached_key = 'k8s.' + ','.join([cluster['id'] for cluster in sorted(clusters, key=lambda x: x['id'])
]) + '.' + self.__class__.__name__
cache.delete(cached_key)
def cached_all(self, clusters, expires=3):
cached_key = 'k8s.' + ','.join([cluster['id'] for cluster in sorted(clusters, key=lambda x: x['id'])
]) + '.' + self.__class__.__name__
cached_data = cache.get(cached_key, expires)
if not cache.validate(cached_data):
cached_data = self.all(clusters)
cache.set(cached_key, cached_data)
return cached_data
def all(self, clusters):
return []
def cluster_client(self, cluster):
k8s_auth = k8s.AuthToken(cluster['api_server'], cluster['token'])
k8s_client = k8s.Client(k8s_auth)
return k8s_client
class Cluster(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
parse_info = urlparse(item['api_server'])
api_info = parse_info.netloc.rsplit(':', 1)
api_host = ''
api_port = 0
if len(api_info) >= 2:
api_host = api_info[0]
api_port = int(api_info[1]) or (443 if api_info.scheme == 'https' else 80)
result = {
'id': item['id'],
'name': item['name'],
'displayName': item['name'],
'correlation_id': item['correlation_id'],
'api_server': item['api_server'],
'api_host': api_host,
'api_port': str(api_port),
'token': item['token'],
'metric_host': item['metric_host'],
'metric_port': item['metric_port'],
}
return result
def all(self, clusters):
results = []
for cluster in clusters:
results.append(self.to_dict(cluster, cluster))
return results
class Node(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
ip_address = None
for address in item.status.addresses:
if address.type == 'InternalIP':
ip_address = address.address
break
correlation_id = None
if item.metadata.labels:
for tag_key, tag_value in item.metadata.labels.items():
if tag_key == const.Tag.NODE_ID_TAG:
correlation_id = tag_value
break
result = {
'id': item.metadata.uid,
'name': item.metadata.name,
'displayName': f'{cluster["name"]}-{item.metadata.name}',
'ip_address': ip_address,
'cluster_id': cluster["id"],
'correlation_id': correlation_id,
}
return result
def all(self, clusters):
results = []
for cluster in clusters:
k8s_client = self.cluster_client(cluster)
for item in k8s_client.list_node().items:
results.append(self.to_dict(cluster, item))
return results
class Deployment(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
correlation_id = None
if item.metadata.labels:
for tag_key, tag_value in item.metadata.labels.items():
if tag_key == const.Tag.DEPLOYMENT_ID_TAG:
correlation_id = tag_value
break
result = {
'id': item.metadata.uid,
'name': item.metadata.name,
'displayName': f'{cluster["name"]}-{item.metadata.namespace}-{item.metadata.name}',
'namespace': item.metadata.namespace,
'cluster_id': cluster["id"],
'correlation_id': correlation_id,
}
return result
def all(self, clusters):
results = []
for cluster in clusters:
k8s_client = self.cluster_client(cluster)
for item in k8s_client.list_all_deployment().items:
results.append(self.to_dict(cluster, item))
return results
class ReplicaSet(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
controll_by = None
if item.metadata.owner_references:
for owner in item.metadata.owner_references:
if owner.controller and owner.kind == 'Deployment':
controll_by = owner.uid
break
result = {
'id': item.metadata.uid,
'name': item.metadata.name,
'displayName': f'{cluster["name"]}-{item.metadata.namespace}-{item.metadata.name}',
'namespace': item.metadata.namespace,
'deployment_id': controll_by,
'cluster_id': cluster["id"]
}
return result
def all(self, clusters):
results = []
for cluster in clusters:
k8s_client = self.cluster_client(cluster)
for item in k8s_client.list_all_replica_set().items:
results.append(self.to_dict(cluster, item))
return results
class Service(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
correlation_id = None
if item.metadata.labels:
for tag_key, tag_value in item.metadata.labels.items():
if tag_key == const.Tag.SERVICE_ID_TAG:
correlation_id = tag_value
break
result = {
'id': item.metadata.uid,
'name': item.metadata.name,
'displayName': f'{cluster["name"]}-{item.metadata.namespace}-{item.metadata.name}',
'namespace': item.metadata.namespace,
'cluster_id': cluster["id"],
'ip_address': item.spec.cluster_ip,
'correlation_id': correlation_id,
}
return result
def all(self, clusters):
results = []
for cluster in clusters:
k8s_client = self.cluster_client(cluster)
for item in k8s_client.list_all_service().items:
results.append(self.to_dict(cluster, item))
return results
class Pod(BaseEntity):
@classmethod
def to_dict(cls, cluster, item):
correlation_id = None
if item.metadata.labels:
for tag_key, tag_value in item.metadata.labels.items():
if tag_key == const.Tag.POD_ID_TAG:
correlation_id = tag_value
break
controll_by = None
if item.metadata.owner_references:
for owner in item.metadata.owner_references:
if owner.controller and owner.kind == 'ReplicaSet':
controll_by = owner.uid
break
result = {
'id': item.metadata.uid,
'name': item.metadata.name,
'displayName': f'{cluster["name"]}-{item.metadata.namespace}-{item.metadata.name}',
'namespace': item.metadata.namespace,
'ip_address': item.status.pod_ip,
'replicaset_id': controll_by,
'deployment_id': None,
'correlation_id': correlation_id,
'node_id': item.spec.node_name,
'cluster_id': cluster["id"],
}
# patch node_id
node_mapping = {}
nodes = Node().cached_all([cluster])
for node in nodes:
node_mapping.setdefault(node['cluster_id'], {}).setdefault(node['name'], node['id'])
# patch deployment_id
rs_mapping = {}
rss = ReplicaSet().cached_all([cluster])
for rs in rss:
rs_mapping.setdefault(rs['cluster_id'], {}).setdefault(rs['id'], rs['deployment_id'])
result['node_id'] = node_mapping.get(result['cluster_id'], {}).get(result['node_id'], None)
result['deployment_id'] = rs_mapping.get(result['cluster_id'], {}).get(result['replicaset_id'], None)
return result
def all(self, clusters):
results = []
for cluster in clusters:
k8s_client = self.cluster_client(cluster)
for item in k8s_client.list_all_pod().items:
results.append(self.to_dict(cluster, item))
return results
def watch(self, cluster, event_stop, notify):
k8s_client = self.cluster_client(cluster)
current_time = datetime.datetime.now(datetime.timezone.utc)
w = watch.Watch()
for event in w.stream(k8s_client.core_client.list_pod_for_all_namespaces):
if event['type'] == 'ADDED':
# new -> alert
if event['object'].metadata.creation_timestamp >= current_time:
notify('POD.ADDED', cluster['id'], self.to_dict(cluster, event['object']))
elif event['type'] == 'DELETED':
# delete -> alert
notify('POD.DELETED', cluster['id'], self.to_dict(cluster, event['object']))
if event_stop.is_set():
w.stop()
|
StarcoderdataPython
|
52343
|
<filename>AMAO/apps/Avaliacao/Questao/models/filtro_questao.py
# -*- coding: utf-8 -*-
from django.db import models
from tipo_questao import TipoQuestao
from questao import Questao
#from libs.uniqifiers_benchmark import f11 as uniqifier
class FiltroQuestao(models.Model):
"""
Classe que ira gerar uma questao(QuestaoDeAvaliacao) com base em alguns criterios/filtros(TipoQuestao).
"""
#vou fazer que um filtro de questao pode ser usado apenas num template de Avaliacao por vez, acho q fica mais
#logico a ideia.
templateAvaliacao = models.ForeignKey('Avaliacao.TemplateAvaliacao', related_name='filtrosQuestoes')
#representa a nota que o aluno receberá se conseguir 100% da questão
notaBase = models.DecimalField(u"Nota Base",max_digits=10, decimal_places=2,default="0.00")
#representa o limite inferior que a nota dessa questao pode chegar.
notaLimMinimo = models.DecimalField(u"Limite Mínimo da Nota",max_digits=10, decimal_places=2,default="0.00")
#representa o limite superior que a nota dessa questao pode chegar.
notaLimMaximo = models.DecimalField(u"Limite Máximo da Nota",max_digits=10, decimal_places=2,default="0.00")
#tipo que da questao, usado para filtragem
tipo = models.ManyToManyField(TipoQuestao, related_name="filtrosQuestoes")
#caso seja de uma questao expecifica.
questaoExata = models.ForeignKey(Questao, related_name='filtrosQuestoes', blank=True, null=True,limit_choices_to = {'verificada':True})
class Meta:
verbose_name = u'Filtro de Questão'
app_label = 'Questao'
def verifica_autor(self,autor):
"verifica se um dado autor(usuario) corresponde ao autor do templateAvaliacao desse filtro"
return self.templateAvaliacao.autor.pk == autor.pk
def _prepara_tipos_requeridos(self):
"""
Metodo usado por filtrarQuestao.
prepara os tipos requeridos, juntando n elementos de num_descendentes cada um dos tipos
retorna um vetor com um vetor de listas de todos os tipos e seus descententes
Ex:
Tipos:
* C -> Ponteiro -> Malloc
* Facil
* Estruturas de Dados -> Pilha
Isso resultaria no seguinte vetor:
[[C,Ponteiro,Malloc], [Facil,], [Estruturas de Dados, Pilha]]
"""
tiposRequeridos = []
for tipoFiltro in self.tipo.all():
listaTiposFilho_e_proprio = tipoFiltro.get_descendants(include_self=True)
tiposRequeridos.append(listaTiposFilho_e_proprio)
return tiposRequeridos
def _questoes_selecionadas(self,tiposRequeridos):
"""
Recupera todas as questoes selecionadas usando os filtros(sem serem exatas)
dos tiposRequeridos.
"""
#recupera todas as questoes
tdsQuestoes = Questao.objects.filter(verificada=True)
questoesSelecionadas = []
for questaoATestar in tdsQuestoes:
questao_valida = True
for grupoDeTiposRequeridos in tiposRequeridos:
tipo_valido = False
for tipoQuestao_da_questaoATestar in questaoATestar.tipo.all():
if tipoQuestao_da_questaoATestar in grupoDeTiposRequeridos:
tipo_valido=True
break
if not tipo_valido:
questao_valida = False
break
if questao_valida:
questoesSelecionadas.append(questaoATestar)
return questoesSelecionadas
def filtrarQuestao(self):
"""
Retorna uma questao utilizando criterios de busca baseado no campo 'tipo', ou retorna questaoExata se esta for != None
Passando como parametro uma lista de questoes previamente selecionadas, para evitar a selecao de uma destas.
se for uma questão exata e simulado=True então nao pega a propria questão mas uma qualquer que seja do mesmo tipo que esta.
"""
######################
#: se tiver questão exata, retorna a mesma(uma lista com apenas ela)
#caso contrario, tenta recuperar uma questão aleatoria, seguindo os tipos do filtro
if self.questaoExata:
# print "(EXATA) " + self.questaoExata.slug
return [self.questaoExata,]
#prepara os tipos requeridos, juntando n elementos de num_descendentes cada um dos tipos
tiposRequeridos = self._prepara_tipos_requeridos()
# print "===================================="
# print ">>>tiposFiltro:"
# print self.tipo.all()
# print ">>>tiposRequeridos:"
# print tiposRequeridos
# print ">>>>>>>>>>>>>>>>>>>"
questoesSelecionadas = self._questoes_selecionadas(tiposRequeridos)
if questoesSelecionadas == []:
raise Exception("Nenhuma questao encontrada para os seguintes filtro:%s"%str(self.pk))
# #randamiza uma dessas questoes para ser a resposta.
# import random
# rand = random.randint(0, questoesSelecionadas.__len__()-1)
# questao = questoesSelecionadas[rand]
# print str(rand) + " " + questao.slug
return questoesSelecionadas
#Ver qual o unicode que vou por para esse model
# def __unicode__(self):
# return self.arquivo.name
|
StarcoderdataPython
|
3353424
|
bills = [7, 12, 22, 52, 102, 15, 25, 55, 105, 30, 60, 110, 70, 120, 150]
count = 0
while True:
N, M = map(int, input().split())
if N == 0 and M == 0:
break
for i in bills:
if M - N == i:
count = 1
break
else:
count = 0
if count == 1:
print('possible')
else:
print('impossible')
|
StarcoderdataPython
|
192464
|
<reponame>opennode/waldur-ansible<filename>conftest.py
from waldur_ansible.common.tests.integration import integration_tests_config
def pytest_addoption(parser):
parser.addoption(integration_tests_config.TEST_TAG_FLAG, action="append", help="specify what type of tests to run")
|
StarcoderdataPython
|
195036
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect SDK.
# Copyright (c) 2019 Ingram Micro. All Rights Reserved.
import os
import pytest
from connect.config import Config
conf_dict = {
'apiEndpoint': 'http://localhost:8080/api/public/v1/',
'apiKey': '<KEY>',
'products': 'CN-631-322-000'
}
def setup_module(module):
module.prev_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def teardown_module(module):
os.chdir(module.prev_dir)
def test_global_implicit_global_config():
""" Global config is instantiated from config.json if not explicitly set """
assert Config.get_instance().api_key == conf_dict.get('apiKey')
assert Config.get_instance().api_url == conf_dict.get('apiEndpoint')
assert isinstance(Config.get_instance().products, list)
assert len(Config.get_instance().products) == 1
assert Config.get_instance().products[0] == conf_dict.get('products')
# noinspection PyPropertyAccess
def test_global_config_immutable_properties():
with pytest.raises(AttributeError):
Config.get_instance().api_key = conf_dict.get('apiKey')
Config.get_instance().api_url = conf_dict.get('apiEndpoint')
Config.get_instance().products = [conf_dict.get('products')]
def test_init_config_with_non_existing_file():
with pytest.raises(IOError):
Config(file='non_existing_config.json')
def test_init_config_with_file():
_assert_config(Config(file='config.json'))
def test_init_config_with_arguments():
_assert_config(Config(
api_key=conf_dict.get('apiKey'),
api_url=conf_dict.get('apiEndpoint'),
products=conf_dict.get('products'),
))
def test_init_config_with_invalid_arguments():
with pytest.raises(ValueError):
Config(
api_key='',
api_url='',
products='',
)
# noinspection PyPropertyAccess
def test_config_immutable_properties():
config = Config(file='config.json')
with pytest.raises(AttributeError):
config.api_key = conf_dict.get('apiKey')
config.api_url = conf_dict.get('apiEndpoint')
config.products = [conf_dict.get('products')]
def _assert_config(config):
assert config.api_key == conf_dict.get('apiKey')
assert config.api_url == conf_dict.get('apiEndpoint')
assert isinstance(config.products, list)
assert len(config.products) == 1
assert config.products[0] == conf_dict.get('products')
|
StarcoderdataPython
|
1681387
|
<gh_stars>10-100
"""Genereate pseudo labels by softmax classifier.
"""
from __future__ import print_function, division
import os
import math
import PIL.Image as Image
import numpy as np
import cv2
import torch
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from tqdm import tqdm
import spml.data.transforms as transforms
import spml.utils.general.vis as vis_utils
import spml.utils.general.others as other_utils
from spml.data.datasets.base_dataset import ListDataset
from spml.config.default import config
from spml.config.parse_args import parse_args
from spml.models.embeddings.resnet_pspnet import resnet_101_pspnet
from spml.models.embeddings.resnet_deeplab import resnet_101_deeplab
from spml.models.predictions.softmax_classifier import softmax_classifier
from spml.models.crf import DenseCRF
cudnn.enabled = True
cudnn.benchmark = True
WALK_STEPS=0
TH=None
def main():
"""Generate pseudo labels by softmax classifier.
"""
# Retreve experiment configurations.
args = parse_args('Generate pseudo labels by softmax classifier.')
# Create directories to save results.
semantic_dir = os.path.join(args.save_dir, 'semantic_gray')
semantic_rgb_dir = os.path.join(args.save_dir, 'semantic_color')
# Create color map.
color_map = vis_utils.load_color_map(config.dataset.color_map_path)
color_map = color_map.numpy()
# Create data loaders.
test_dataset = ListDataset(
data_dir=args.data_dir,
data_list=args.data_list,
img_mean=config.network.pixel_means,
img_std=config.network.pixel_stds,
size=None,
random_crop=False,
random_scale=False,
random_mirror=False,
training=False)
test_image_paths = test_dataset.image_paths
# Define CRF.
postprocessor = DenseCRF(
iter_max=args.crf_iter_max,
pos_xy_std=args.crf_pos_xy_std,
pos_w=args.crf_pos_w,
bi_xy_std=args.crf_bi_xy_std,
bi_rgb_std=args.crf_bi_rgb_std,
bi_w=args.crf_bi_w,)
# Create models.
if config.network.backbone_types == 'panoptic_pspnet_101':
embedding_model = resnet_101_pspnet(config).cuda()
elif config.network.backbone_types == 'panoptic_deeplab_101':
embedding_model = resnet_101_deeplab(config).cuda()
else:
raise ValueError('Not support ' + config.network.backbone_types)
prediction_model = softmax_classifier(config).cuda()
embedding_model.eval()
prediction_model.eval()
# Load trained weights.
model_path_template = os.path.join(args.snapshot_dir, 'model-{:d}.pth')
save_iter = config.train.max_iteration - 1
embedding_model.load_state_dict(
torch.load(model_path_template.format(save_iter))['embedding_model'],
resume=True)
prediction_model.load_state_dict(
torch.load(model_path_template.format(save_iter))['prediction_model'])
# Start inferencing.
with torch.no_grad():
for data_index in tqdm(range(len(test_dataset))):
# Image path.
image_path = test_image_paths[data_index]
base_name = os.path.basename(image_path).replace('.jpg', '.png')
# Image resolution.
original_image_batch, original_label_batch, _ = test_dataset[data_index]
image_h, image_w = original_image_batch['image'].shape[-2:]
lab_tags = np.unique(original_label_batch['semantic_label'])
lab_tags = lab_tags[lab_tags < config.dataset.num_classes]
label_tags = np.zeros((config.dataset.num_classes,), dtype=np.bool)
label_tags[lab_tags] = True
label_tags = torch.from_numpy(label_tags).cuda()
# Image resolution.
batches = other_utils.create_image_pyramid(
original_image_batch, original_label_batch,
scales=[0.75, 1],
is_flip=True)
affs = []
semantic_probs = []
for image_batch, label_batch, data_info in batches:
resize_image_h, resize_image_w = image_batch['image'].shape[-2:]
# Crop and Pad the input image.
image_batch['image'] = transforms.resize_with_pad(
image_batch['image'].transpose(1, 2, 0),
config.test.crop_size,
image_pad_value=0).transpose(2, 0, 1)
image_batch['image'] = torch.FloatTensor(
image_batch['image'][np.newaxis, ...]).cuda()
pad_image_h, pad_image_w = image_batch['image'].shape[-2:]
embeddings = embedding_model(image_batch, resize_as_input=True)
outputs = prediction_model(embeddings)
embs = embeddings['embedding'][:, :, :resize_image_h, :resize_image_w]
semantic_logit = outputs['semantic_logit'][..., :resize_image_h, :resize_image_w]
if data_info['is_flip']:
embs = torch.flip(embs, dims=[3])
semantic_logit = torch.flip(semantic_logit, dims=[3])
embs = F.interpolate(embs, size=(image_h//8, image_w//8), mode='bilinear')
embs = embs / torch.norm(embs, dim=1)
embs_flat = embs.view(embs.shape[1], -1)
aff = torch.matmul(embs_flat.t(), embs_flat).mul_(5).add_(-5).exp_()
affs.append(aff)
semantic_logit = F.interpolate(
semantic_logit, size=(image_h//8, image_w//8), mode='bilinear')
#semantic_prob = F.softmax(semantic_logit, dim=1)
#semantic_probs.append(semantic_prob)
semantic_probs.append(semantic_logit)
cat_semantic_probs = torch.cat(semantic_probs, dim=0)
#semantic_probs, _ = torch.max(cat_semantic_probs, dim=0)
#semantic_probs[0] = torch.min(cat_semantic_probs[:, 0, :, :], dim=0)[0]
semantic_probs = torch.mean(cat_semantic_probs, dim=0)
semantic_probs = F.softmax(semantic_probs, dim=0)
# normalize cam.
max_prob = torch.max(semantic_probs.view(21, -1), dim=1)[0]
cam_full_arr = semantic_probs / max_prob.view(21, 1, 1)
cam_shape = cam_full_arr.shape[-2:]
label_tags = (~label_tags).view(-1, 1, 1).expand(-1, cam_shape[0], cam_shape[1])
cam_full_arr = cam_full_arr.masked_fill(label_tags, 0)
if TH is not None:
cam_full_arr[0] = TH
aff = torch.mean(torch.stack(affs, dim=0), dim=0)
# Start random walk.
aff_mat = aff ** 20
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(WALK_STEPS):
trans_mat = torch.matmul(trans_mat, trans_mat)
cam_vec = cam_full_arr.view(21, -1)
cam_rw = torch.matmul(cam_vec, trans_mat)
cam_rw = cam_rw.view(21, cam_shape[0], cam_shape[1])
cam_rw = cam_rw.data.cpu().numpy()
cam_rw = cv2.resize(cam_rw.transpose(1, 2, 0),
dsize=(image_w, image_h),
interpolation=cv2.INTER_LINEAR)
cam_rw_pred = np.argmax(cam_rw, axis=-1).astype(np.uint8)
# CRF
#image = image_batch['image'].data.cpu().numpy().astype(np.float32)
#image = image[0, :, :image_h, :image_w].transpose(1, 2, 0)
#image *= np.reshape(config.network.pixel_stds, (1, 1, 3))
#image += np.reshape(config.network.pixel_means, (1, 1, 3))
#image = image * 255
#image = image.astype(np.uint8)
#cam_rw = postprocessor(image, cam_rw.transpose(2,0,1))
#cam_rw_pred = np.argmax(cam_rw, axis=0).astype(np.uint8)
# Save semantic predictions.
semantic_pred = cam_rw_pred
semantic_pred_name = os.path.join(
semantic_dir, base_name)
if not os.path.isdir(os.path.dirname(semantic_pred_name)):
os.makedirs(os.path.dirname(semantic_pred_name))
Image.fromarray(semantic_pred, mode='L').save(semantic_pred_name)
semantic_pred_rgb = color_map[semantic_pred]
semantic_pred_rgb_name = os.path.join(
semantic_rgb_dir, base_name)
if not os.path.isdir(os.path.dirname(semantic_pred_rgb_name)):
os.makedirs(os.path.dirname(semantic_pred_rgb_name))
Image.fromarray(semantic_pred_rgb, mode='RGB').save(
semantic_pred_rgb_name)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3344059
|
<filename>utils/lib/periphery.py
"""
Small class to handle the periphery.
"""
import numpy as np
import lib.kernels as kernels
class Periphery(object):
"""
Small class to handle a single body.
"""
def __init__(self, location, orientation, reference_configuration, reference_normals, quadrature_weights):
"""
Constructor. Take arguments like ...
"""
# Location as np.array.shape = 3
self.location = location
# Orientation as Quaternion
self.orientation = orientation
# Number of blobs
self.Nblobs = reference_configuration.size // 3
# Reference configuration. Coordinates of blobs for quaternion [1, 0, 0, 0]
# and location = np.array[0, 0, 0]) as a np.array.shape = (Nblobs, 3)
# or np.array.shape = (Nblobs * 3)
self.reference_configuration = np.reshape(reference_configuration, (self.Nblobs, 3))
self.reference_normals = np.reshape(reference_normals, (self.Nblobs, 3))
self.quadrature_weights = quadrature_weights.flatten()
self.Nblobs = self.quadrature_weights.size
# Name of body and type of body. A string or number
self.name = None
self.type = None
self.rotation_matrix = None
# Some default functions
self.function_slip = np.zeros((self.Nblobs, 3))
self.ID = None
# Vectors for singularity subtractions
self.ex = None
self.ey = None
self.ez = None
self.density = np.zeros(3 * self.Nblobs)
self.density_new = np.zeros(3 * self.Nblobs)
def get_r_vectors(self, location=None, orientation=None):
"""
Return the coordinates of the blobs.
"""
# Get location and orientation
if location is None:
location = self.location
if orientation is None:
orientation = self.orientation
# Compute blobs coordinates
rotation_matrix = orientation.rotation_matrix()
r_vectors = np.array([np.dot(rotation_matrix, vec) for vec in self.reference_configuration])
r_vectors += location
return r_vectors
def get_normals(self, orientation=None):
"""
Return the normals of the periphery.
"""
# Get orientation
if orientation is None:
orientation = self.orientation
# Compute blobs coordinates
rotation_matrix = orientation.rotation_matrix()
normals = np.array([np.dot(rotation_matrix, vec) for vec in self.reference_normals])
return normals
def get_singularity_subtraction_vectors(self, eta=1):
# Compute correction for singularity subtractions
r_vectors_blobs = self.get_r_vectors()
normals = self.get_normals()
quadrature_weights = self.quadrature_weights
Nperiphery = quadrature_weights.size
e = np.zeros((Nperiphery, 3))
e[:, 0] = 1.0
e *= quadrature_weights[:, None]
self.ex = kernels.stresslet_kernel_times_normal_times_density_numba(r_vectors_blobs, normals, e, eta)
e[:, :] = 0.0
e[:, 1] = 1.0
e *= quadrature_weights[:, None]
self.ey = kernels.stresslet_kernel_times_normal_times_density_numba(r_vectors_blobs, normals, e, eta)
e[:, :] = 0.0
e[:, 2] = 1.0
e *= quadrature_weights[:, None]
self.ez = kernels.stresslet_kernel_times_normal_times_density_numba(r_vectors_blobs, normals, e, eta)
return
|
StarcoderdataPython
|
188558
|
import pandas as pd
from pipedown.nodes.base.metric import Metric
from pipedown.utils.urls import get_node_url
class MeanSquaredError(Metric):
CODE_URL = get_node_url("metrics/mean_squared_error.py")
def run(self, y_pred: pd.Series, y_true: pd.Series):
return ((y_pred - y_true).pow(2)).mean()
def get_metric_name(self):
return "mean_squared_error"
|
StarcoderdataPython
|
195732
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import requests
import datadog_checks.dev.tooling.manifest_validator.common.validator as common
from datadog_checks.dev.tooling.manifest_validator.common.validator import BaseManifestValidator
from ..constants import V2
METRIC_TO_CHECK_EXCLUDE_LIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
class DisplayOnPublicValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
correct_is_public = True
path = '/display_on_public_website'
is_public = decoded.get_path(path)
if not isinstance(is_public, bool):
output = ' required boolean: display_on_public_website'
if fix:
decoded.set_path(path, correct_is_public)
self.fix(output, f' new `display_on_public_website`: {correct_is_public}')
else:
self.fail(output)
class SchemaValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
if not self.should_validate():
return
# Get API and APP keys which are needed to call Datadog API
org_name = self.ctx.obj.get('org')
if not org_name:
self.fail('No `org` has been set')
return
if org_name not in self.ctx.obj.get('orgs'):
self.fail(f'Selected org {org_name} is not in `orgs`')
return
org = self.ctx.obj['orgs'][org_name]
dd_url = org.get('dd_url')
if not dd_url:
self.fail(f'No `dd_url` has been set for org `{org_name}`')
return
url = f"{dd_url}/api/beta/apps/manifest/validate"
# prep for upload
payload = {"data": {"type": "app_manifest", "attributes": decoded}}
try:
payload_json = json.dumps(payload)
r = requests.post(url, data=payload_json)
if r.status_code == 400:
# parse the errors
errors = "\n".join(r.json()["errors"])
message = f"Error validating manifest schema:\n{errors}"
self.fail(message)
else:
r.raise_for_status()
except Exception as e:
self.fail(str(e))
def get_v2_validators(ctx, is_extras, is_marketplace):
return [
common.MaintainerValidator(
is_extras, is_marketplace, check_in_extras=False, check_in_marketplace=False, version=V2
),
common.MetricsMetadataValidator(version=V2),
common.MetricToCheckValidator(version=V2),
common.ImmutableAttributesValidator(version=V2),
common.LogsCategoryValidator(version=V2),
DisplayOnPublicValidator(version=V2),
# keep SchemaValidator last, and avoid running this validation if errors already found
SchemaValidator(ctx=ctx, version=V2, skip_if_errors=True),
]
|
StarcoderdataPython
|
118380
|
<reponame>hairong-wang/XLNet_learn2learn<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# Author: <NAME>
import json
import pandas as pd
from nltk.translate.bleu_score import sentence_bleu
INFILE = "path/to/input/file"
class BleuScore:
def __init__(self, infile):
self._infile = infile
def get_df(self):
i = 0
df = {}
with open(self._infile, 'r') as fp:
for line in fp:
df[i] = json.loads(line)
i += 1
return pd.DataFrame.from_dict(df, orient='index')
def calculate_BLEU_4_gram(self,record):
reference = []
for answer in record[0]['human_answers']:
answer_array = answer.split(' ')
reference.append(answer_array)
candidate = record[0]['answers'][0]['text'].split(' ')
score = sentence_bleu(reference, candidate,weights=(0, 0, 0, 1))
return score
#Add a column called BLEU_score to the qas column
def add_BLEU_score(self, train_df):
train_df_new = train_df.copy()
bleu = []
for i in range(train_df_new.shape[0]):
record = train_df_new.loc[i]['qas']
bleu.append(self.calculate_BLEU_4_gram(record))
train_df_new['BLEU_4gram_score'] = bleu
return train_df_new
def main():
blue = BleuScore()
train_df = blue.get_df(train_infile)
train_df_small = train_df.head(100)
train_df_new = blue.add_BLEU_score(train_df_small)
train_df_new_sort = train_df_new.sort_values('BLEU_4gram_score',ascending=False)
if __name__==__main__:
main()
|
StarcoderdataPython
|
4807411
|
<reponame>berylgithub/ppbap<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 15:08:41 2019
@author: Saint8312
"""
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
import pickle
import os
import data_checker
if __name__ == '__main__':
'''
load and split the dataset
'''
filename = os.getcwd()+'/Data/dataset_ha_alpha_122319.pkl'
dataset = data_checker.data_load(filename)
features = np.array([data['x_vector'] for data in dataset])
labels = np.array([data['y'] for data in dataset])
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=13)
print('Training Features Shape:', x_train.shape)
print('Training Labels Shape:', y_train.shape)
print('Testing Features Shape:', x_test.shape)
print('Testing Labels Shape:', y_test.shape)
'''
data regression
'''
rf = RandomForestRegressor(n_estimators= 1000, random_state=42, verbose=1, n_jobs=-1, min_samples_split=3, max_features="sqrt", bootstrap=True, oob_score=True)
rf.fit(x_train, y_train)
'''
model saver
'''
with open(os.getcwd()+"/Model/rf_pp_ha_alpha_split.pkl", "wb") as f:
pickle.dump(rf, f)
'''
train set analysis
'''
#Mean Absolute Error
preds = rf.predict(x_train)
errors = abs(preds - y_train)
print('Mean Absolute Error:', round(np.mean(errors), 2))
#Mean Absolute Percentage Error & Accuracy
mape = 100 * (errors / y_train)
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
#Root Mean Squared Error
rmse = np.sqrt(mean_squared_error(y_train, preds))
print('Root Mean Squared Error :', round(rmse, 2))
#Pearson Correlation Coefficient (PCC) score
pcc = pearsonr(y_train, preds)
print('Pearson Correlation Coefficient :', round(pcc[0],2))
print(preds, y_train)
'''
test set analysis
'''
#Mean Absolute Error
preds = rf.predict(x_test)
errors = abs(preds - y_test)
print('Mean Absolute Error:', round(np.mean(errors), 2))
#Mean Absolute Percentage Error & Accuracy
mape = 100 * (errors / y_test)
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
#Root Mean Squared Error
rmse = np.sqrt(mean_squared_error(y_test, preds))
print('Root Mean Squared Error :', round(rmse, 2))
#Pearson Correlation Coefficient (PCC) score
pcc = pearsonr(y_test, preds)
print('Pearson Correlation Coefficient :', round(pcc[0],2))
# '''
# k-fold cross validation
# '''
# folds = [3,4,5,7,10]
# for fold in folds:
# kfolds=[]
# n=fold
# idx = 0
# kf = KFold(n_splits=n)
# for train_index, test_index in kf.split(features):
# kfold = {}
# print("index training :",idx)
# print("TRAIN:", len(train_index), "TEST:", len(test_index))
# x_train, x_test = features[train_index], features[test_index]
# y_train, y_test = labels[train_index], labels[test_index]
# rf = RandomForestRegressor(n_estimators = 1000, random_state=13, verbose=0)
# rf.fit(x_train, y_train)
#
# idx+=1
#
# #Pearson Correlation Coefficient (PCC) score
# preds = rf.predict(x_train)
# pcc = pearsonr(y_train, preds)
# kfold["pcc_train"] = pcc[0]
# print('PCC train :', round(pcc[0],2))
#
# preds = rf.predict(x_test)
# pcc = pearsonr(y_test, preds)
# kfold["pcc_test"] = pcc[0]
# print('PCC test :', round(pcc[0],2))
# print('===================')
#
# kfold["train_idx"] = train_index
# kfold["test_idx"] = test_index
# kfold["k"] = n
# kfold["idx"] = idx
# kfold["model"] = rf
# kfolds.append(kfold)
# kfolds = sorted(kfolds, key=lambda k: k['pcc_test'], reverse=True)
# print(kfolds[0]['k'], kfolds[0]['pcc_test'])
# #save best model
# with open(os.getcwd()+"/Model/rf_pp_ha_a_"+str(n)+"fold_best.pkl", "wb") as f:
# pickle.dump(kfolds[0], f)
# '''
# model loader
# '''
# with open(os.getcwd()+"/Model/rf_pp_alpha.pkl", "rb") as f:
# rf = pickle.load(f)
|
StarcoderdataPython
|
1731617
|
import cv2, os
def flip_images():
gest_folder = "data/train"
for g_id in os.listdir(gest_folder):
for i in range(900):
path = gest_folder + "/" + g_id + "/" + str(i) + ".jpg"
new_path = gest_folder + "/" + g_id + "/" + str(i + 900) + ".jpg"
print(path)
img = cv2.imread(path, 0)
img = cv2.flip(img, 1)
cv2.imwrite(new_path, img)
flip_images()
|
StarcoderdataPython
|
1769170
|
<filename>data/parse_recs.py<gh_stars>0
# Given a rec list from Tumblr like mine, trying to extract a list of links (fanfic recs).
# Also trying to extract a header image given a blog name.
import pytumblr
from ao3 import AO3
from ao3.works import RestrictedWork
import ffnet
from notion.client import NotionClient
ao3 = AO3()
client = NotionClient(token_v2="token here")
# From tumblr API console https://api.tumblr.com/console
# Authenticate via OAuth
# NEED TO FILL OUT
client = pytumblr.TumblrRestClient()
# post url options
# https://starrybouquet.tumblr.com/post/620329944196710401/heya-any-suggestions-for-good-affinity-fix-it
# https://samcaarter.tumblr.com/private/621914267347795968/tumblr_qchqnjlukx1r9gqxq
# https://professortennant.tumblr.com/post/175193322905/samjack-rec-list-pt-1
|
StarcoderdataPython
|
3335925
|
<reponame>go2starr/lshhdc<gh_stars>10-100
from lsh import Cluster, jaccard_sim
from .utils import *
def test_same_set():
"""A set should be clustered with itself"""
s = randset()
cluster = Cluster()
cluster.add_set(s)
cluster.add_set(s)
assert len(cluster.get_sets()) == 1
def test_similar_sets():
"""Two similar sets should be clustered"""
cluster = Cluster()
cluster.add_set("abcdefg")
cluster.add_set("abcdefghi")
assert len(cluster.get_sets()) == 1
def test_dissimilar_sets():
"""Two non-similar sets should not be clustered"""
cluster = Cluster()
cluster.add_set("12345abcdef")
cluster.add_set("1234567890z")
print cluster.get_sets()
assert len(cluster.get_sets()) == 2
def test_cluster_threshold():
"""Expected error for threshold to similarity should be reasonable"""
n_tests = 50
dim = 15
expected_error = 0.20
tot_err = 0
for test in range(n_tests):
# Get some sets and their similarities
sets = (randset(), randset())
jsim = jaccard_sim(*sets)
# Find the threshold at which they cluster together
for threshold in range(1, 100, 5):
threshold = float(threshold) / 100
cluster = Cluster(dim, threshold)
cluster.add_set(sets[0])
cluster.add_set(sets[1])
if len(cluster.get_sets()) == 2:
tot_err += abs(jsim - threshold)
break
avg_err = float(tot_err) / n_tests
assert avg_err <= expected_error
|
StarcoderdataPython
|
3315879
|
def LimbLength(S,skel):
|
StarcoderdataPython
|
1671132
|
"""Feature extraction pipeline for sutter."""
import logging
from feature_extractors.admission import AdmissionExtractor
from feature_extractors.comorbidities import ComorbiditiesExtractor
from feature_extractors.demographics import BasicDemographicsExtractor
from feature_extractors.discharge import DischargeExtractor
from feature_extractors.encounter_reason import EncounterReasonExtractor
from feature_extractors.health_history import HealthHistoryExtractor
from feature_extractors.hospital_problems import HospitalProblemsExtractor
from feature_extractors.lab_results import LabResultsExtractor
from feature_extractors.labels import ReadmissionExtractor
from feature_extractors.medications import MedicationsExtractor
from feature_extractors.payer import PayerExtractor
from feature_extractors.procedures import ProceduresExtractor
from feature_extractors.provider import ProviderExtractor
from feature_extractors.socioeconomic import SocioeconomicExtractor
from feature_extractors.utilization import UtilizationExtractor
from feature_extractors.vitals import VitalsExtractor
from fex import runner
logging.basicConfig(format='%(levelname)s:%(name)s:%(asctime)s=> %(message)s',
datefmt='%m/%d %H:%M:%S',
level=logging.INFO)
feature_extractors = [
ReadmissionExtractor(),
BasicDemographicsExtractor(),
EncounterReasonExtractor(),
UtilizationExtractor(),
DischargeExtractor(),
ProviderExtractor(),
PayerExtractor(),
AdmissionExtractor(),
HealthHistoryExtractor(),
ComorbiditiesExtractor(), # temporarily disabling (too slow)
ProceduresExtractor(),
HospitalProblemsExtractor(),
VitalsExtractor(),
MedicationsExtractor(),
LabResultsExtractor(),
SocioeconomicExtractor()
]
if __name__ == '__main__':
runner.run(*feature_extractors, args=["--no-cache"])
|
StarcoderdataPython
|
3357758
|
<reponame>pcaston/core<gh_stars>1-10
"""The awair component."""
from __future__ import annotations
from asyncio import gather
from typing import Any
from async_timeout import timeout
from python_awair import Awair
from python_awair.exceptions import AuthError
from openpeerpower.const import CONF_ACCESS_TOKEN
from openpeerpower.exceptions import ConfigEntryAuthFailed
from openpeerpower.helpers.aiohttp_client import async_get_clientsession
from openpeerpower.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import API_TIMEOUT, DOMAIN, LOGGER, UPDATE_INTERVAL, AwairResult
PLATFORMS = ["sensor"]
async def async_setup_entry(opp, config_entry) -> bool:
"""Set up Awair integration from a config entry."""
session = async_get_clientsession(opp)
coordinator = AwairDataUpdateCoordinator(opp, config_entry, session)
await coordinator.async_config_entry_first_refresh()
opp.data.setdefault(DOMAIN, {})
opp.data[DOMAIN][config_entry.entry_id] = coordinator
opp.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(opp, config_entry) -> bool:
"""Unload Awair configuration."""
unload_ok = await opp.config_entries.async_unload_platforms(config_entry, PLATFORMS)
if unload_ok:
opp.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class AwairDataUpdateCoordinator(DataUpdateCoordinator):
"""Define a wrapper class to update Awair data."""
def __init__(self, opp, config_entry, session) -> None:
"""Set up the AwairDataUpdateCoordinator class."""
access_token = config_entry.data[CONF_ACCESS_TOKEN]
self._awair = Awair(access_token=access_token, session=session)
self._config_entry = config_entry
super().__init__(opp, LOGGER, name=DOMAIN, update_interval=UPDATE_INTERVAL)
async def _async_update_data(self) -> Any | None:
"""Update data via Awair client library."""
with timeout(API_TIMEOUT):
try:
LOGGER.debug("Fetching users and devices")
user = await self._awair.user()
devices = await user.devices()
results = await gather(
*[self._fetch_air_data(device) for device in devices]
)
return {result.device.uuid: result for result in results}
except AuthError as err:
raise ConfigEntryAuthFailed from err
except Exception as err:
raise UpdateFailed(err) from err
async def _fetch_air_data(self, device):
"""Fetch latest air quality data."""
LOGGER.debug("Fetching data for %s", device.uuid)
air_data = await device.air_data_latest()
LOGGER.debug(air_data)
return AwairResult(device=device, air_data=air_data)
|
StarcoderdataPython
|
1761517
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
segmentTest = DQMEDHarvester("DTSegmentAnalysisTest",
detailedAnalysis = cms.untracked.bool(False),
#Perform basic diagnostic in endLumi/EndRun
runOnline = cms.untracked.bool(True),
#Names of the quality tests: they must match those specified in "qtList"
chi2TestName = cms.untracked.string('chi2InRange'),
segmRecHitTestName = cms.untracked.string('segmRecHitInRange'),
#Permetted value of chi2 segment quality
chi2Threshold = cms.untracked.double(5.0),
normalizeHistoPlots = cms.untracked.bool(False),
# top folder for the histograms in DQMStore
topHistoFolder = cms.untracked.string('DT/02-Segments'),
# hlt DQM mode
hltDQMMode = cms.untracked.bool(False),
nEventsCert = cms.untracked.int32(1000),
maxPhiHit = cms.untracked.int32(7),
maxPhiZHit = cms.untracked.int32(11),
)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.